diff --git a/README.md b/README.md index f7a7ac514..0e8bdca4d 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ on how you can run your own `geth` instance. By far the most common scenario is people wanting to simply interact with the Ethereum network: create accounts; transfer funds; deploy and interact with contracts. For this particular use-case the user doesn't care about years-old historical data, so we can -fast-sync quickly to the current state of the network. To do so: +sync quickly to the current state of the network. To do so: ```shell $ geth console @@ -68,7 +68,7 @@ This command will: causing it to download more data in exchange for avoiding processing the entire history of the Ethereum network, which is very CPU intensive. * Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console), - (via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://web3js.readthedocs.io/) + (via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md) (note: the `web3` version bundled within `geth` is very old, and not up to date with official docs), as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server). This tool is optional and if you leave it out you can always attach to an already running @@ -159,7 +159,7 @@ docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \ ethereum/client-go ``` -This will start `geth` in fast-sync mode with a DB memory allowance of 1GB just as the +This will start `geth` in snap-sync mode with a DB memory allowance of 1GB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image. diff --git a/SECURITY.md b/SECURITY.md index 635c0869f..88b3f8fe1 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -29,92 +29,147 @@ Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A` ``` -----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 +Version: SKS 1.1.6 +Comment: Hostname: pgp.mit.edu -mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY -neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9 -L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi -m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b -fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd -EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7 -M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv -QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H -h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c -2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ -EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB -tDlFdGhlcmV1bSBGb3VuZGF0aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0 -aGVyZXVtLm9yZz6JAj4EEwECACgCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheA -BQJaCWH6BQkFo2BYAAoJEOiNMzT6X2oK+DEP/3H6dxkm0hvHZKoHLVuuxcu3EHYo -k5sd3MMWPrZSN8qzZnY7ayEDMxnarWOizc+2jfOxfJlzX/g8lR1/fsHdWPFPhPoV -Qk8ygrHn1H8U8+rpw/U03BqmqHpYCDzJ+CIis9UWROniqXw1nuqu/FtWOsdWxNKh -jUo6k/0EsaXsxRPzgJv7fEUcVcQ7as/C3x9sy3muc2gvgA4/BKoGPb1/U0GuA8lV -fDIDshAggmnSUAg+TuYSAAdoFQ1sKwFMPigcLJF2eyKuK3iUyixJrec/c4LSf3wA -cGghbeuqI8INP0Y2zvXDQN2cByxsFAuoZG+m0cyKGaDH2MVUvOKKYqn/03qvrf15 -AWAsW0l0yQwOTCo3FbsNzemClm5Bj/xH0E4XuwXwChcMCMOWJrFoxyvCEI+keoQc -c08/a8/MtS7vBAABXwOziSmm6CNqmzpWrh/fDrjlJlba9U3MxzvqU3IFlTdMratv -6V+SgX+L25lCzW4NxxUavoB8fAlvo8lxpHKo24FP+RcLQ8XqkU3RiUsgRjQRFOqQ -TaJcsp8mimmiYyf24mNu6b48pi+a5c/eQR9w59emeEUZqsJU+nqv8BWIIp7o4Agh -NYnKjkhPlY5e1fLVfAHIADZFynWwRPkPMJSrBiP5EtcOFxQGHGjRxU/KjXkvE0hV -xYb1PB8pWMTu/beeiQI+BBMBAgAoBQJYJd7YAhsDBQkB4TOABgsJCAcDAgYVCAIJ -CgsEFgIDAQIeAQIXgAAKCRDojTM0+l9qCplDD/9IZ2i+m1cnqQKtiyHbyFGx32oL -fzqPylX2bOG5DPsSTorSUdJMGVfT04oVxXc4S/2DVnNvi7RAbSiLapCWSplgtBOj -j1xlblOoXxT3m7s1XHGCX5tENxI9fVSSPVKJn+fQaWpPB2MhBA+1lUI6GJ+11T7K -J8LrP/fiw1/nOb7rW61HW44Gtyox23sA/d1+DsFVaF8hxJlNj5coPKr8xWzQ8pQl -juzdjHDukjevuw4rRmRq9vozvj9keEU9XJ5dldyEVXFmdDk7KT0p0Rla9nxYhzf/ -r/Bv8Bzy0HCWRb2D31BjXXGG05oVnYmNGxGFxYja4MwgrMmne3ilEVjfUJsapsqi -w41BAyQgIdfREulYN7ahsF5PrjVAqBd9IGtE8ULelF2SQxEBQBngEkP0ahP6tRAL -i7/CBjPKOyKijtqVny7qrGOnU2ygcA88/WDibexDhrjz0Gx8WmErU7rIWZiZ5u4Y -vJYVRo0+6rBCXRPeSJfiP5h1p17Anr2l42boAYslfcrzquB8MHtrNcyn650OLtHG -nbxgIdniKrpuzGN6Opw+O2id2JhD1/1p4SOemwAmthplr1MIyOHNP3q93rEj2J7h -5zPS/AJuKkMDFUpslPNLQjCOwPXtdzL7/kUZGBSyez1T3TaW1uY6l9XaJJRaSn+v -1zPgfp4GJ3lPs4AlAbQ0RXRoZXJldW0gRm91bmRhdGlvbiBCdWcgQm91bnR5IDxi -b3VudHlAZXRoZXJldW0ub3JnPokCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC -AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagoENg/+LnSaVeMxiGVtcjWl -b7Xd73yrEy4uxiESS1AalW9mMf7oZzfI05f7QIQlaLAkNac74vZDJbPKjtb7tpMO -RFhRZMCveq6CPKU6pd1SI8IUVUKwpEe6AJP3lHdVP57dquieFE2HlYKm6uHbCGWU -0cjyTA+uu2KbgCHGmofsPY/xOcZLGEHTHqa5w60JJAQm+BSDKnw8wTyrxGvA3EK/ -ePSvOZMYa+iw6vYuZeBIMbdiXR/A2keBi3GuvqB8tDMj7P22TrH5mVDm3zNqGYD6 -amDPeiWp4cztY3aZyLcgYotqXPpDceZzDn+HopBPzAb/llCdE7bVswKRhphVMw4b -bhL0R/TQY7Sf6TK2LKSBrjv0DWOSijikE71SJcBnJvHU7EpKrQQ0lMGclm3ynyji -Nf0YTPXQt4I+fwTmOew2GFeK3UytNWbWI7oXX7Nm4bj9bhf3IJ0kmZb/Gs73+xII -e7Rz52Mby436tWyQIQiF9ITYNGvNf53TwBBZMn0pKPiTyr3Ur7FHEotkEOFNh1// -4zQY10XxuBdLrYGyZ4V8xHJM+oKre8Eg2R9qHXVbjvErHE+7CvgnV7YUip0criPr -BlKRvuoJaSliH2JFhSjWVrkPmFGrWN0BAx10yIqMnEplfKeHf4P9Elek3oInS8WP -G1zJG6s/t5+hQK0X37+TB+6rd3GJAj4EEwECACgFAlgl4TsCGwMFCQHhM4AGCwkI -BwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOiNMzT6X2oKzf8P/iIKd77WHTbp4pMN -8h52HyZJtDJmjA1DPZrbGl1TesW/Z9uTd12txlgqZnbG2GfN9+LSP6EOPzR6v2xC -OVhR+RdWhZDJJuQCVS7lJIqQrZgmeTZG0TyQPZdLjVFBOrrhVwYX+HXbu429IzHr -URf5InyR1QgqOXyElDYS6e28HFqvaoA0DWTWDDqOLPVl+U5fuceIE2XXdv3AGLeP -Yf8J5MPobjPiZtBqI6S6iENY2Yn35qLX+axeC/iYSCHVtFuCCIdb/QYR1ZZV8Ps/ -aI9DwC7LU+YfPw7iqCIoqxSeA3o1PORkdSigEg3jtfRv5UqVo9a0oBb9jdoADsat -F/gW0E7mto3XGOiaR0eB9SSdsM3x7Bz4A0HIGNaxpZo1RWqlO91leP4c13Px7ISv -5OGXfLg+M8qb+qxbGd1HpitGi9s1y1aVfEj1kOtZ0tN8eu+Upg5WKwPNBDX3ar7J -9NCULgVSL+E79FG+zXw62gxiQrLfKzm4wU/9L5wVkwQnm29hLJ0tokrSBZFnc/1l -7OC+GM63tYicKkY4rqmoWUeYx7IwFH9mtDtvR1RxO85RbQhZizwpZpdpRkH0DqZu -ZJRmRa5r7rPqmfa7d+VIFhz2Xs8pJMLVqxTsLKcLglmjw7aOrYG0SWeH7YraXWGD -N3SlvSBiVwcK7QUKzLLvpadLwxfsuQINBFgl3tgBEACbgq6HTN5gEBi0lkD/MafI -nmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4hYontkMaKRlCg2Rvgjvk3Zve0 -PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT19BdeAQRFvcfd+8w8 -f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj26bf+2+1 -DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6 -D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66i -PsR99MQ7FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A -4tGkHl08KZ2N9o6GrfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8gr -eW8xB4zuf9Mkuou+RHNmo8PebHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0 -VRxdPImKun+4LOXbfOxArOSkY6i35+gsgkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9 -IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/bM1ACUtipMiIVeUs2uFiRjpz -A1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJaCWIIBQkFo2BYAAoJ -EOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg3IHMGxDM -b/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8 -KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0I -Q1UKKXvzZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0 -K9lneidcqtBDvlggJTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0T -NOOE8fXlvu8iuIAMBSDL9ep6sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd -5MTi0MDRNTij431kn8T/D0LCgmoUmYYMBgbwFhXr67axPZlKjrqR0z3F/Elv0ZPP -cVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1qScl9HiMxjt/H6aPastH63/7w -cN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4/Lih6Z1TlwcFVap+ -cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1pM6AOQPpZ -85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4 -=r6KK ------END PGP PUBLIC KEY BLOCK----- +mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1 +82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM +qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q +lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac +48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y +PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho +yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F +nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c +2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB +8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG +b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa +FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b +mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28 +rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB +Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV +Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD +CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt +09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX +QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt +Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp +XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo +AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2 +D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl +s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI +ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85 +kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j +dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu +O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01 +ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky +fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc +T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S +V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL +CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf +Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm +5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp +7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm +otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS +DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF +aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr +A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz +/WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF +rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt +BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt +lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0 +GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj +GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB +6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E +qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA +zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA +nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP +5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN +WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2 +8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+ +N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8 +c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT +wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl +D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem +J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf +Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw +NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL +QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V +Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye +uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7 +TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc +Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC +AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ +diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC +sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF +E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA +B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU +C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7 +BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1 +TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ +SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg +CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8 +HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B +AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR +0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19 +VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O +wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0 +OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR +WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1 +EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG +jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M +Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC +MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/ +AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh +ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX +FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm +/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+ +Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6 +DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT +1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655 +9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU ++2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1 +qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4 +OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN +BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h +YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1 +9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj +26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6 +D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7 +FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G +rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe +bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs +gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/ +bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa +CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg +3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8 +KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz +ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg +JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6 +sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM +BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q +Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4 +/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p +M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl +BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q +1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs +6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l +LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3 +yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd +cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle +5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX +GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1 +rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR +XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa +QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc +mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB +8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy +I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro +yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh +ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz +i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP ++m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB +402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur +epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx +PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano= +=arte +-----END PGP PUBLIC KEY BLOCK------ ``` diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 4e63e3eff..8a0cbe335 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -496,7 +496,7 @@ func TestEstimateGas(t *testing.T) { GasPrice: big.NewInt(0), Value: nil, Data: common.Hex2Bytes("b9b046f9"), - }, 0, errors.New("invalid opcode: opcode 0xfe not defined"), nil}, + }, 0, errors.New("invalid opcode: INVALID"), nil}, {"Valid", ethereum.CallMsg{ From: addr, diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go index 0e98709b1..ff69a78c6 100644 --- a/accounts/abi/bind/bind.go +++ b/accounts/abi/bind/bind.go @@ -88,6 +88,13 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] transactIdentifiers = make(map[string]bool) eventIdentifiers = make(map[string]bool) ) + + for _, input := range evmABI.Constructor.Inputs { + if hasStruct(input.Type) { + bindStructType[lang](input.Type, structs) + } + } + for _, original := range evmABI.Methods { // Normalize the method for capital cases and non-anonymous inputs/outputs normalized := original diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 5a436607c..992497993 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -1911,6 +1911,50 @@ var bindTests = []struct { nil, nil, }, + { + name: `ConstructorWithStructParam`, + contract: ` + pragma solidity >=0.8.0 <0.9.0; + + contract ConstructorWithStructParam { + struct StructType { + uint256 field; + } + + constructor(StructType memory st) {} + } + `, + bytecode: []string{`0x608060405234801561001057600080fd5b506040516101c43803806101c48339818101604052810190610032919061014a565b50610177565b6000604051905090565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6100958261004c565b810181811067ffffffffffffffff821117156100b4576100b361005d565b5b80604052505050565b60006100c7610038565b90506100d3828261008c565b919050565b6000819050919050565b6100eb816100d8565b81146100f657600080fd5b50565b600081519050610108816100e2565b92915050565b60006020828403121561012457610123610047565b5b61012e60206100bd565b9050600061013e848285016100f9565b60008301525092915050565b6000602082840312156101605761015f610042565b5b600061016e8482850161010e565b91505092915050565b603f806101856000396000f3fe6080604052600080fdfea2646970667358221220cdffa667affecefac5561f65f4a4ba914204a8d4eb859d8cd426fb306e5c12a364736f6c634300080a0033`}, + abi: []string{`[{"inputs":[{"components":[{"internalType":"uint256","name":"field","type":"uint256"}],"internalType":"struct ConstructorWithStructParam.StructType","name":"st","type":"tuple"}],"stateMutability":"nonpayable","type":"constructor"}]`}, + imports: ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" + `, + tester: ` + var ( + key, _ = crypto.GenerateKey() + user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) + ) + defer sim.Close() + + _, tx, _, err := DeployConstructorWithStructParam(user, sim, ConstructorWithStructParamStructType{Field: big.NewInt(42)}) + if err != nil { + t.Fatalf("DeployConstructorWithStructParam() got err %v; want nil err", err) + } + sim.Commit() + + if _, err = bind.WaitDeployed(nil, sim, tx); err != nil { + t.Logf("Deployment tx: %+v", tx) + t.Errorf("bind.WaitDeployed(nil, %T, ) got err %v; want nil err", sim, err) + } + `, + }, } // Tests that packages generated by the binder can be successfully compiled and @@ -1934,22 +1978,23 @@ func TestGolangBindings(t *testing.T) { } // Generate the test suite for all the contracts for i, tt := range bindTests { - var types []string - if tt.types != nil { - types = tt.types - } else { - types = []string{tt.name} - } - // Generate the binding and create a Go source file in the workspace - bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases) - if err != nil { - t.Fatalf("test %d: failed to generate binding: %v", i, err) - } - if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil { - t.Fatalf("test %d: failed to write binding: %v", i, err) - } - // Generate the test file with the injected test code - code := fmt.Sprintf(` + t.Run(tt.name, func(t *testing.T) { + var types []string + if tt.types != nil { + types = tt.types + } else { + types = []string{tt.name} + } + // Generate the binding and create a Go source file in the workspace + bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases) + if err != nil { + t.Fatalf("test %d: failed to generate binding: %v", i, err) + } + if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil { + t.Fatalf("test %d: failed to write binding: %v", i, err) + } + // Generate the test file with the injected test code + code := fmt.Sprintf(` package bindtest import ( @@ -1961,9 +2006,10 @@ func TestGolangBindings(t *testing.T) { %s } `, tt.imports, tt.name, tt.tester) - if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil { - t.Fatalf("test %d: failed to write tests: %v", i, err) - } + if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil { + t.Fatalf("test %d: failed to write tests: %v", i, err) + } + }) } // Convert the package to go modules and use the current source for go-ethereum moder := exec.Command(gocmd, "mod", "init", "bindtest") diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go index ec0698493..43cd6c645 100644 --- a/accounts/abi/unpack.go +++ b/accounts/abi/unpack.go @@ -290,7 +290,7 @@ func tuplePointsTo(index int, output []byte) (start int, err error) { offset := big.NewInt(0).SetBytes(output[index : index+32]) outputLen := big.NewInt(int64(len(output))) - if offset.Cmp(big.NewInt(int64(len(output)))) > 0 { + if offset.Cmp(outputLen) > 0 { return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen) } if offset.BitLen() > 63 { diff --git a/build/checksums.txt b/build/checksums.txt index 48a13b53e..5df27bbf6 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,19 +1,19 @@ # This file contains sha256 checksums of optional build dependencies. -2255eb3e4e824dd7d5fcdc2e7f84534371c186312e546fb1086a34c17752f431 go1.17.2.src.tar.gz -7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94 go1.17.2.darwin-amd64.tar.gz -ce8771bd3edfb5b28104084b56bbb532eeb47fbb7769c3e664c6223712c30904 go1.17.2.darwin-arm64.tar.gz -8cea5b8d1f8e8cbb58069bfed58954c71c5b1aca2f3c857765dae83bf724d0d7 go1.17.2.freebsd-386.tar.gz -c96e57218fb03e74d683ad63b1684d44c89d5e5b994f36102b33dce21b58499a go1.17.2.freebsd-amd64.tar.gz -8617f2e40d51076983502894181ae639d1d8101bfbc4d7463a2b442f239f5596 go1.17.2.linux-386.tar.gz -f242a9db6a0ad1846de7b6d94d507915d14062660616a61ef7c808a76e4f1676 go1.17.2.linux-amd64.tar.gz -a5a43c9cdabdb9f371d56951b14290eba8ce2f9b0db48fb5fc657943984fd4fc go1.17.2.linux-arm64.tar.gz -04d16105008230a9763005be05606f7eb1c683a3dbf0fbfed4034b23889cb7f2 go1.17.2.linux-armv6l.tar.gz -12e2dc7e0ffeebe77083f267ef6705fec1621cdf2ed6489b3af04a13597ed68d go1.17.2.linux-ppc64le.tar.gz -c4b2349a8d11350ca038b8c57f3cc58dc0b31284bcbed4f7fca39aeed28b4a51 go1.17.2.linux-s390x.tar.gz -8a85257a351996fdf045fe95ed5fdd6917dd48636d562dd11dedf193005a53e0 go1.17.2.windows-386.zip -fa6da0b829a66f5fab7e4e312fd6aa1b2d8f045c7ecee83b3d00f6fe5306759a go1.17.2.windows-amd64.zip -00575c85dc7a129ba892685a456b27a3f3670f71c8bfde1c5ad151f771d55df7 go1.17.2.windows-arm64.zip +3defb9a09bed042403195e872dcbc8c6fae1485963332279668ec52e80a95a2d go1.17.5.src.tar.gz +2db6a5d25815b56072465a2cacc8ed426c18f1d5fc26c1fc8c4f5a7188658264 go1.17.5.darwin-amd64.tar.gz +111f71166de0cb8089bb3e8f9f5b02d76e1bf1309256824d4062a47b0e5f98e0 go1.17.5.darwin-arm64.tar.gz +443c1cd9768df02085014f1eb034ebc7dbe032ffc8a9bb9f2e6617d037eee23c go1.17.5.freebsd-386.tar.gz +17180bdc4126acffd0ebf86d66ef5cbc3488b6734e93374fb00eb09494e006d3 go1.17.5.freebsd-amd64.tar.gz +4f4914303bc18f24fd137a97e595735308f5ce81323c7224c12466fd763fc59f go1.17.5.linux-386.tar.gz +bd78114b0d441b029c8fe0341f4910370925a4d270a6a590668840675b0c653e go1.17.5.linux-amd64.tar.gz +6f95ce3da40d9ce1355e48f31f4eb6508382415ca4d7413b1e7a3314e6430e7e go1.17.5.linux-arm64.tar.gz +aa1fb6c53b4fe72f159333362a10aca37ae938bde8adc9c6eaf2a8e87d1e47de go1.17.5.linux-armv6l.tar.gz +3d4be616e568f0a02cb7f7769bcaafda4b0969ed0f9bb4277619930b96847e70 go1.17.5.linux-ppc64le.tar.gz +8087d4fe991e82804e6485c26568c2e0ee0bfde00ceb9015dc86cb6bf84ef40b go1.17.5.linux-s390x.tar.gz +6d7b9948ee14a906b14f5cbebdfab63cd6828b0b618160847ecd3cc3470a26fe go1.17.5.windows-386.zip +671faf99cd5d81cd7e40936c0a94363c64d654faa0148d2af4bbc262555620b9 go1.17.5.windows-amd64.zip +45e88676b68e9cf364be469b5a27965397f4e339aa622c2f52c10433c56e5030 go1.17.5.windows-arm64.zip d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz diff --git a/build/ci.go b/build/ci.go index 1e2547fbb..8b302511a 100644 --- a/build/ci.go +++ b/build/ci.go @@ -147,7 +147,7 @@ var ( // This is the version of go that will be downloaded by // // go run ci.go install -dlgo - dlgoVersion = "1.17.2" + dlgoVersion = "1.17.5" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) diff --git a/cmd/clef/main.go b/cmd/clef/main.go index 61d2811f6..3aaf898db 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -898,7 +898,7 @@ func testExternalUI(api *core.SignerAPI) { addr, _ := common.NewMixedcaseAddressFromString("0x0011223344556677889900112233445566778899") data := `{"types":{"EIP712Domain":[{"name":"name","type":"string"},{"name":"version","type":"string"},{"name":"chainId","type":"uint256"},{"name":"verifyingContract","type":"address"}],"Person":[{"name":"name","type":"string"},{"name":"test","type":"uint8"},{"name":"wallet","type":"address"}],"Mail":[{"name":"from","type":"Person"},{"name":"to","type":"Person"},{"name":"contents","type":"string"}]},"primaryType":"Mail","domain":{"name":"Ether Mail","version":"1","chainId":"1","verifyingContract":"0xCCCcccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"},"message":{"from":{"name":"Cow","test":"3","wallet":"0xcD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"},"to":{"name":"Bob","wallet":"0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB","test":"2"},"contents":"Hello, Bob!"}}` //_, err := api.SignData(ctx, accounts.MimetypeTypedData, *addr, hexutil.Encode([]byte(data))) - var typedData core.TypedData + var typedData apitypes.TypedData json.Unmarshal([]byte(data), &typedData) _, err := api.SignTypedData(ctx, *addr, typedData) expectApprove("sign 712 typed data", err) @@ -1025,7 +1025,7 @@ func GenDoc(ctx *cli.Context) { "of the work in canonicalizing and making sense of the data, and it's up to the UI to present" + "the user with the contents of the `message`" sighash, msg := accounts.TextAndHash([]byte("hello world")) - messages := []*core.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}} + messages := []*apitypes.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}} add("SignDataRequest", desc, &core.SignDataRequest{ Address: common.NewMixedcaseAddress(a), diff --git a/cmd/devp2p/internal/v4test/discv4tests.go b/cmd/devp2p/internal/v4test/discv4tests.go index 04ad67637..5f340ed94 100644 --- a/cmd/devp2p/internal/v4test/discv4tests.go +++ b/cmd/devp2p/internal/v4test/discv4tests.go @@ -229,7 +229,7 @@ func PingPastExpiration(t *utesting.T) { reply, _, _ := te.read(te.l1) if reply != nil { - t.Fatal("Expected no reply, got", reply) + t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply) } } @@ -247,7 +247,7 @@ func WrongPacketType(t *utesting.T) { reply, _, _ := te.read(te.l1) if reply != nil { - t.Fatal("Expected no reply, got", reply) + t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply) } } @@ -282,9 +282,16 @@ func FindnodeWithoutEndpointProof(t *utesting.T) { rand.Read(req.Target[:]) te.send(te.l1, &req) - reply, _, _ := te.read(te.l1) - if reply != nil { - t.Fatal("Expected no response, got", reply) + for { + reply, _, _ := te.read(te.l1) + if reply == nil { + // No response, all good + break + } + if reply.Kind() == v4wire.PingPacket { + continue // A ping is ok, just ignore it + } + t.Fatalf("Expected no reply, got %v %v", reply.Name(), reply) } } @@ -304,7 +311,7 @@ func BasicFindnode(t *utesting.T) { t.Fatal("read find nodes", err) } if reply.Kind() != v4wire.NeighborsPacket { - t.Fatal("Expected neighbors, got", reply.Name()) + t.Fatalf("Expected neighbors, got %v %v", reply.Name(), reply) } } @@ -341,7 +348,7 @@ func UnsolicitedNeighbors(t *utesting.T) { t.Fatal("read find nodes", err) } if reply.Kind() != v4wire.NeighborsPacket { - t.Fatal("Expected neighbors, got", reply.Name()) + t.Fatalf("Expected neighbors, got %v %v", reply.Name(), reply) } nodes := reply.(*v4wire.Neighbors).Nodes if contains(nodes, encFakeKey) { diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index edb439425..11d71e4ce 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" @@ -112,7 +113,7 @@ func Transition(ctx *cli.Context) error { log.Warn(fmt.Sprintf("--%s has been deprecated in favour of --%s", TraceDisableReturnDataFlag.Name, TraceEnableReturnDataFlag.Name)) } // Configure the EVM logger - logConfig := &vm.LogConfig{ + logConfig := &logger.Config{ DisableStack: ctx.Bool(TraceDisableStackFlag.Name), EnableMemory: !ctx.Bool(TraceDisableMemoryFlag.Name) || ctx.Bool(TraceEnableMemoryFlag.Name), EnableReturnData: !ctx.Bool(TraceDisableReturnDataFlag.Name) || ctx.Bool(TraceEnableReturnDataFlag.Name), @@ -134,7 +135,7 @@ func Transition(ctx *cli.Context) error { return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err)) } prevFile = traceFile - return vm.NewJSONLogger(logConfig, traceFile), nil + return logger.NewJSONLogger(logConfig, traceFile), nil } } else { getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) { diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 447bb2c2e..889de43e0 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm/runtime" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "gopkg.in/urfave/cli.v1" @@ -107,7 +108,7 @@ func runCmd(ctx *cli.Context) error { glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name))) log.Root().SetHandler(glogger) - logconfig := &vm.LogConfig{ + logconfig := &logger.Config{ EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name), DisableStack: ctx.GlobalBool(DisableStackFlag.Name), DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), @@ -117,7 +118,7 @@ func runCmd(ctx *cli.Context) error { var ( tracer vm.EVMLogger - debugLogger *vm.StructLogger + debugLogger *logger.StructLogger statedb *state.StateDB chainConfig *params.ChainConfig sender = common.BytesToAddress([]byte("sender")) @@ -125,12 +126,12 @@ func runCmd(ctx *cli.Context) error { genesisConfig *core.Genesis ) if ctx.GlobalBool(MachineFlag.Name) { - tracer = vm.NewJSONLogger(logconfig, os.Stdout) + tracer = logger.NewJSONLogger(logconfig, os.Stdout) } else if ctx.GlobalBool(DebugFlag.Name) { - debugLogger = vm.NewStructLogger(logconfig) + debugLogger = logger.NewStructLogger(logconfig) tracer = debugLogger } else { - debugLogger = vm.NewStructLogger(logconfig) + debugLogger = logger.NewStructLogger(logconfig) } if ctx.GlobalString(GenesisFlag.Name) != "" { gen := readGenesis(ctx.GlobalString(GenesisFlag.Name)) @@ -288,10 +289,10 @@ func runCmd(ctx *cli.Context) error { if ctx.GlobalBool(DebugFlag.Name) { if debugLogger != nil { fmt.Fprintln(os.Stderr, "#### TRACE ####") - vm.WriteTrace(os.Stderr, debugLogger.StructLogs()) + logger.WriteTrace(os.Stderr, debugLogger.StructLogs()) } fmt.Fprintln(os.Stderr, "#### LOGS ####") - vm.WriteLogs(os.Stderr, statedb.Logs()) + logger.WriteLogs(os.Stderr, statedb.Logs()) } if bench || ctx.GlobalBool(StatDumpFlag.Name) { diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index 5e9bf696b..90596d9b3 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" @@ -58,7 +59,7 @@ func stateTestCmd(ctx *cli.Context) error { log.Root().SetHandler(glogger) // Configure the EVM logger - config := &vm.LogConfig{ + config := &logger.Config{ EnableMemory: !ctx.GlobalBool(DisableMemoryFlag.Name), DisableStack: ctx.GlobalBool(DisableStackFlag.Name), DisableStorage: ctx.GlobalBool(DisableStorageFlag.Name), @@ -66,18 +67,18 @@ func stateTestCmd(ctx *cli.Context) error { } var ( tracer vm.EVMLogger - debugger *vm.StructLogger + debugger *logger.StructLogger ) switch { case ctx.GlobalBool(MachineFlag.Name): - tracer = vm.NewJSONLogger(config, os.Stderr) + tracer = logger.NewJSONLogger(config, os.Stderr) case ctx.GlobalBool(DebugFlag.Name): - debugger = vm.NewStructLogger(config) + debugger = logger.NewStructLogger(config) tracer = debugger default: - debugger = vm.NewStructLogger(config) + debugger = logger.NewStructLogger(config) } // Load the test content from the input file src, err := ioutil.ReadFile(ctx.Args().First()) @@ -118,7 +119,7 @@ func stateTestCmd(ctx *cli.Context) error { if ctx.GlobalBool(DebugFlag.Name) { if debugger != nil { fmt.Fprintln(os.Stderr, "#### TRACE ####") - vm.WriteTrace(os.Stderr, debugger.StructLogs()) + logger.WriteTrace(os.Stderr, debugger.StructLogs()) } } } diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 08b9a1154..7a642edd0 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/scwallet" "github.com/ethereum/go-ethereum/accounts/usbwallet" "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" @@ -159,17 +158,10 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { if ctx.GlobalIsSet(utils.OverrideArrowGlacierFlag.Name) { cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name)) } - backend, eth := utils.RegisterEthService(stack, &cfg.Eth) - - // Configure catalyst. - if ctx.GlobalBool(utils.CatalystFlag.Name) { - if eth == nil { - utils.Fatalf("Catalyst does not work in light client mode.") - } - if err := catalyst.Register(stack, eth); err != nil { - utils.Fatalf("%v", err) - } + if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) { + cfg.Eth.OverrideTerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name)) } + backend, _ := utils.RegisterEthService(stack, &cfg.Eth, ctx.GlobalBool(utils.CatalystFlag.Name)) // Configure GraphQL if requested if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) { diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index 8a767241e..7a0135b9a 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -77,13 +77,13 @@ func localConsole(ctx *cli.Context) error { // Create and start the node based on the CLI flags prepare(ctx) stack, backend := makeFullNode(ctx) - startNode(ctx, stack, backend) + startNode(ctx, stack, backend, true) defer stack.Close() - // Attach to the newly started node and start the JavaScript console + // Attach to the newly started node and create the JavaScript console. client, err := stack.Attach() if err != nil { - utils.Fatalf("Failed to attach to the inproc geth: %v", err) + return fmt.Errorf("Failed to attach to the inproc geth: %v", err) } config := console.Config{ DataDir: utils.MakeDataDir(ctx), @@ -91,29 +91,34 @@ func localConsole(ctx *cli.Context) error { Client: client, Preload: utils.MakeConsolePreloads(ctx), } - console, err := console.New(config) if err != nil { - utils.Fatalf("Failed to start the JavaScript console: %v", err) + return fmt.Errorf("Failed to start the JavaScript console: %v", err) } defer console.Stop(false) - // If only a short execution was requested, evaluate and return + // If only a short execution was requested, evaluate and return. if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" { console.Evaluate(script) return nil } - // Otherwise print the welcome screen and enter interactive mode + + // Track node shutdown and stop the console when it goes down. + // This happens when SIGTERM is sent to the process. + go func() { + stack.Wait() + console.StopInteractive() + }() + + // Print the welcome screen and enter interactive mode. console.Welcome() console.Interactive() - return nil } // remoteConsole will connect to a remote geth instance, attaching a JavaScript // console to it. func remoteConsole(ctx *cli.Context) error { - // Attach to a remotely running geth instance and start the JavaScript console endpoint := ctx.Args().First() if endpoint == "" { path := node.DefaultDataDir() @@ -150,7 +155,6 @@ func remoteConsole(ctx *cli.Context) error { Client: client, Preload: utils.MakeConsolePreloads(ctx), } - console, err := console.New(config) if err != nil { utils.Fatalf("Failed to start the JavaScript console: %v", err) @@ -165,7 +169,6 @@ func remoteConsole(ctx *cli.Context) error { // Otherwise print the welcome screen and enter interactive mode console.Welcome() console.Interactive() - return nil } @@ -189,13 +192,13 @@ func dialRPC(endpoint string) (*rpc.Client, error) { func ephemeralConsole(ctx *cli.Context) error { // Create and start the node based on the CLI flags stack, backend := makeFullNode(ctx) - startNode(ctx, stack, backend) + startNode(ctx, stack, backend, false) defer stack.Close() // Attach to the newly started node and start the JavaScript console client, err := stack.Attach() if err != nil { - utils.Fatalf("Failed to attach to the inproc geth: %v", err) + return fmt.Errorf("Failed to attach to the inproc geth: %v", err) } config := console.Config{ DataDir: utils.MakeDataDir(ctx), @@ -206,22 +209,24 @@ func ephemeralConsole(ctx *cli.Context) error { console, err := console.New(config) if err != nil { - utils.Fatalf("Failed to start the JavaScript console: %v", err) + return fmt.Errorf("Failed to start the JavaScript console: %v", err) } defer console.Stop(false) - // Evaluate each of the specified JavaScript files - for _, file := range ctx.Args() { - if err = console.Execute(file); err != nil { - utils.Fatalf("Failed to execute %s: %v", file, err) - } - } - + // Interrupt the JS interpreter when node is stopped. go func() { stack.Wait() console.Stop(false) }() - console.Stop(true) + // Evaluate each of the specified JavaScript files. + for _, file := range ctx.Args() { + if err = console.Execute(file); err != nil { + return fmt.Errorf("Failed to execute %s: %v", file, err) + } + } + + // The main script is now done, but keep running timers/callbacks. + console.Stop(true) return nil } diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 85333cbbb..ac6bbab85 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -75,6 +75,7 @@ var ( utils.USBFlag, utils.SmartCardDaemonPathFlag, utils.OverrideArrowGlacierFlag, + utils.OverrideTerminalTotalDifficulty, utils.EthashCacheDirFlag, utils.EthashCachesInMemoryFlag, utils.EthashCachesOnDiskFlag, @@ -276,6 +277,9 @@ func prepare(ctx *cli.Context) { case ctx.GlobalIsSet(utils.RopstenFlag.Name): log.Info("Starting Geth on Ropsten testnet...") + case ctx.GlobalIsSet(utils.SepoliaFlag.Name): + log.Info("Starting Geth on Sepolia testnet...") + case ctx.GlobalIsSet(utils.RinkebyFlag.Name): log.Info("Starting Geth on Rinkeby testnet...") @@ -291,7 +295,11 @@ func prepare(ctx *cli.Context) { // If we're a full node on mainnet without --cache specified, bump default cache allowance if ctx.GlobalString(utils.SyncModeFlag.Name) != "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) && !ctx.GlobalIsSet(utils.NetworkIdFlag.Name) { // Make sure we're not on any supported preconfigured testnet either - if !ctx.GlobalIsSet(utils.RopstenFlag.Name) && !ctx.GlobalIsSet(utils.RinkebyFlag.Name) && !ctx.GlobalIsSet(utils.GoerliFlag.Name) && !ctx.GlobalIsSet(utils.DeveloperFlag.Name) { + if !ctx.GlobalIsSet(utils.RopstenFlag.Name) && + !ctx.GlobalIsSet(utils.SepoliaFlag.Name) && + !ctx.GlobalIsSet(utils.RinkebyFlag.Name) && + !ctx.GlobalIsSet(utils.GoerliFlag.Name) && + !ctx.GlobalIsSet(utils.DeveloperFlag.Name) { // Nope, we're really on mainnet. Bump that cache up! log.Info("Bumping default cache on mainnet", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 4096) ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(4096)) @@ -333,7 +341,7 @@ func geth(ctx *cli.Context) error { defer stack.Close() stack.RegisterAPIs(pluginGetAPIs(stack, wrapperBackend)) - startNode(ctx, stack, backend) + startNode(ctx, stack, backend, false) stack.Wait() return nil } @@ -341,11 +349,11 @@ func geth(ctx *cli.Context) error { // startNode boots up the system node and all registered protocols, after which // it unlocks any requested accounts, and starts the RPC/IPC interfaces and the // miner. -func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) { +func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isConsole bool) { debug.Memsize.Add("node", stack) // Start up the node itself - utils.StartNode(ctx, stack) + utils.StartNode(ctx, stack, isConsole) // Unlock any account specifically requested unlockAccounts(ctx, stack) diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index ddd8d822b..f8e40b187 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -68,7 +68,7 @@ func Fatalf(format string, args ...interface{}) { os.Exit(1) } -func StartNode(ctx *cli.Context, stack *node.Node) { +func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) { if err := stack.Start(); err != nil { Fatalf("Error starting protocol stack: %v", err) } @@ -87,17 +87,33 @@ func StartNode(ctx *cli.Context, stack *node.Node) { go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024) } - <-sigc - log.Info("Got interrupt, shutting down...") - go stack.Close() - for i := 10; i > 0; i-- { - <-sigc - if i > 1 { - log.Warn("Already shutting down, interrupt more to panic.", "times", i-1) + shutdown := func() { + log.Info("Got interrupt, shutting down...") + go stack.Close() + for i := 10; i > 0; i-- { + <-sigc + if i > 1 { + log.Warn("Already shutting down, interrupt more to panic.", "times", i-1) + } } + debug.Exit() // ensure trace and CPU profile data is flushed. + debug.LoudPanic("boom") + } + + if isConsole { + // In JS console mode, SIGINT is ignored because it's handled by the console. + // However, SIGTERM still shuts down the node. + for { + sig := <-sigc + if sig == syscall.SIGTERM { + shutdown() + return + } + } + } else { + <-sigc + shutdown() } - debug.Exit() // ensure trace and CPU profile data is flushed. - debug.LoudPanic("boom") }() } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 25453148c..ffff2c92c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -45,6 +45,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/gasprice" @@ -214,7 +215,7 @@ var ( defaultSyncMode = ethconfig.Defaults.SyncMode SyncModeFlag = TextMarshalerFlag{ Name: "syncmode", - Usage: `Blockchain sync mode ("fast", "full", "snap" or "light")`, + Usage: `Blockchain sync mode ("snap", "full" or "light")`, Value: &defaultSyncMode, } GCModeFlag = cli.StringFlag{ @@ -248,6 +249,10 @@ var ( Name: "override.arrowglacier", Usage: "Manually specify Arrow Glacier fork-block, overriding the bundled setting", } + OverrideTerminalTotalDifficulty = cli.Uint64Flag{ + Name: "override.terminaltotaldifficulty", + Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting", + } // Light server and client settings LightServeFlag = cli.IntFlag{ Name: "light.serve", @@ -1196,7 +1201,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { cfg.NetRestrict = list } - if ctx.GlobalBool(DeveloperFlag.Name) || ctx.GlobalBool(CatalystFlag.Name) { + if ctx.GlobalBool(DeveloperFlag.Name) { // --dev mode can't use p2p networking. cfg.MaxPeers = 0 cfg.ListenAddr = "" @@ -1705,13 +1710,18 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) { // RegisterEthService adds an Ethereum client to the stack. // The second return value is the full node instance, which may be nil if the // node is running as a light client. -func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) { +func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool) (ethapi.Backend, *eth.Ethereum) { if cfg.SyncMode == downloader.LightSync { backend, err := les.New(stack, cfg) if err != nil { Fatalf("Failed to register the Ethereum service: %v", err) } stack.RegisterAPIs(tracers.APIs(backend.ApiBackend)) + if isCatalyst { + if err := catalyst.RegisterLight(stack, backend); err != nil { + Fatalf("Failed to register the catalyst service: %v", err) + } + } return backend.ApiBackend, nil } backend, err := eth.New(stack, cfg) @@ -1724,6 +1734,11 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend Fatalf("Failed to create the LES server: %v", err) } } + if isCatalyst { + if err := catalyst.Register(stack, backend); err != nil { + Fatalf("Failed to register the catalyst service: %v", err) + } + } stack.RegisterAPIs(tracers.APIs(backend.APIBackend)) return backend.APIBackend, backend } diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go new file mode 100644 index 000000000..9467fea67 --- /dev/null +++ b/consensus/beacon/consensus.go @@ -0,0 +1,376 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package beacon + +import ( + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" +) + +// Proof-of-stake protocol constants. +var ( + beaconDifficulty = common.Big0 // The default block difficulty in the beacon consensus + beaconNonce = types.EncodeNonce(0) // The default block nonce in the beacon consensus +) + +// Various error messages to mark blocks invalid. These should be private to +// prevent engine specific errors from being referenced in the remainder of the +// codebase, inherently breaking if the engine is swapped out. Please put common +// error types into the consensus package. +var ( + errTooManyUncles = errors.New("too many uncles") + errInvalidMixDigest = errors.New("invalid mix digest") + errInvalidNonce = errors.New("invalid nonce") + errInvalidUncleHash = errors.New("invalid uncle hash") +) + +// Beacon is a consensus engine that combines the eth1 consensus and proof-of-stake +// algorithm. There is a special flag inside to decide whether to use legacy consensus +// rules or new rules. The transition rule is described in the eth1/2 merge spec. +// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md +// +// The beacon here is a half-functional consensus engine with partial functions which +// is only used for necessary consensus checks. The legacy consensus engine can be any +// engine implements the consensus interface (except the beacon itself). +type Beacon struct { + ethone consensus.Engine // Original consensus engine used in eth1, e.g. ethash or clique +} + +// New creates a consensus engine with the given embedded eth1 engine. +func New(ethone consensus.Engine) *Beacon { + if _, ok := ethone.(*Beacon); ok { + panic("nested consensus engine") + } + return &Beacon{ethone: ethone} +} + +// Author implements consensus.Engine, returning the verified author of the block. +func (beacon *Beacon) Author(header *types.Header) (common.Address, error) { + if !beacon.IsPoSHeader(header) { + return beacon.ethone.Author(header) + } + return header.Coinbase, nil +} + +// VerifyHeader checks whether a header conforms to the consensus rules of the +// stock Ethereum consensus engine. +func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { + reached, _ := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1) + if !reached { + return beacon.ethone.VerifyHeader(chain, header, seal) + } + // Short circuit if the parent is not known + parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) + if parent == nil { + return consensus.ErrUnknownAncestor + } + // Sanity checks passed, do a proper verification + return beacon.verifyHeader(chain, header, parent) +} + +// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers +// concurrently. The method returns a quit channel to abort the operations and +// a results channel to retrieve the async verifications. +// VerifyHeaders expect the headers to be ordered and continuous. +func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { + if !beacon.IsPoSHeader(headers[len(headers)-1]) { + return beacon.ethone.VerifyHeaders(chain, headers, seals) + } + var ( + preHeaders []*types.Header + postHeaders []*types.Header + preSeals []bool + ) + for index, header := range headers { + if beacon.IsPoSHeader(header) { + preHeaders = headers[:index] + postHeaders = headers[index:] + preSeals = seals[:index] + break + } + } + // All the headers have passed the transition point, use new rules. + if len(preHeaders) == 0 { + return beacon.verifyHeaders(chain, headers, nil) + } + // The transition point exists in the middle, separate the headers + // into two batches and apply different verification rules for them. + var ( + abort = make(chan struct{}) + results = make(chan error, len(headers)) + ) + go func() { + var ( + old, new, out = 0, len(preHeaders), 0 + errors = make([]error, len(headers)) + done = make([]bool, len(headers)) + oldDone, oldResult = beacon.ethone.VerifyHeaders(chain, preHeaders, preSeals) + newDone, newResult = beacon.verifyHeaders(chain, postHeaders, preHeaders[len(preHeaders)-1]) + ) + for { + for ; done[out]; out++ { + results <- errors[out] + if out == len(headers)-1 { + return + } + } + select { + case err := <-oldResult: + errors[old], done[old] = err, true + old++ + case err := <-newResult: + errors[new], done[new] = err, true + new++ + case <-abort: + close(oldDone) + close(newDone) + return + } + } + }() + return abort, results +} + +// VerifyUncles verifies that the given block's uncles conform to the consensus +// rules of the Ethereum consensus engine. +func (beacon *Beacon) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { + if !beacon.IsPoSHeader(block.Header()) { + return beacon.ethone.VerifyUncles(chain, block) + } + // Verify that there is no uncle block. It's explicitly disabled in the beacon + if len(block.Uncles()) > 0 { + return errTooManyUncles + } + return nil +} + +// verifyHeader checks whether a header conforms to the consensus rules of the +// stock Ethereum consensus engine. The difference between the beacon and classic is +// (a) The following fields are expected to be constants: +// - difficulty is expected to be 0 +// - nonce is expected to be 0 +// - unclehash is expected to be Hash(emptyHeader) +// to be the desired constants +// (b) the timestamp is not verified anymore +// (c) the extradata is limited to 32 bytes +func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header) error { + // Ensure that the header's extra-data section is of a reasonable size + if len(header.Extra) > 32 { + return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra)) + } + // Verify the seal parts. Ensure the mixhash, nonce and uncle hash are the expected value. + if header.MixDigest != (common.Hash{}) { + return errInvalidMixDigest + } + if header.Nonce != beaconNonce { + return errInvalidNonce + } + if header.UncleHash != types.EmptyUncleHash { + return errInvalidUncleHash + } + // Verify the block's difficulty to ensure it's the default constant + if beaconDifficulty.Cmp(header.Difficulty) != 0 { + return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, beaconDifficulty) + } + // Verify that the gas limit is <= 2^63-1 + if header.GasLimit > params.MaxGasLimit { + return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit) + } + // Verify that the gasUsed is <= gasLimit + if header.GasUsed > header.GasLimit { + return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit) + } + // Verify that the block number is parent's +1 + if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(common.Big1) != 0 { + return consensus.ErrInvalidNumber + } + // Verify the header's EIP-1559 attributes. + return misc.VerifyEip1559Header(chain.Config(), parent, header) +} + +// verifyHeaders is similar to verifyHeader, but verifies a batch of headers +// concurrently. The method returns a quit channel to abort the operations and +// a results channel to retrieve the async verifications. An additional parent +// header will be passed if the relevant header is not in the database yet. +func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, ancestor *types.Header) (chan<- struct{}, <-chan error) { + var ( + abort = make(chan struct{}) + results = make(chan error, len(headers)) + ) + go func() { + for i, header := range headers { + var parent *types.Header + if i == 0 { + if ancestor != nil { + parent = ancestor + } else { + parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1) + } + } else if headers[i-1].Hash() == headers[i].ParentHash { + parent = headers[i-1] + } + if parent == nil { + select { + case <-abort: + return + case results <- consensus.ErrUnknownAncestor: + } + continue + } + err := beacon.verifyHeader(chain, header, parent) + select { + case <-abort: + return + case results <- err: + } + } + }() + return abort, results +} + +// Prepare implements consensus.Engine, initializing the difficulty field of a +// header to conform to the beacon protocol. The changes are done inline. +func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { + // Transition isn't triggered yet, use the legacy rules for preparation. + reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1) + if err != nil { + return err + } + if !reached { + return beacon.ethone.Prepare(chain, header) + } + header.Difficulty = beaconDifficulty + return nil +} + +// Finalize implements consensus.Engine, setting the final state on the header +func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { + // Finalize is different with Prepare, it can be used in both block generation + // and verification. So determine the consensus rules by header type. + if !beacon.IsPoSHeader(header) { + beacon.ethone.Finalize(chain, header, state, txs, uncles) + return + } + // The block reward is no longer handled here. It's done by the + // external consensus engine. + header.Root = state.IntermediateRoot(true) +} + +// FinalizeAndAssemble implements consensus.Engine, setting the final state and +// assembling the block. +func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { + // FinalizeAndAssemble is different with Prepare, it can be used in both block + // generation and verification. So determine the consensus rules by header type. + if !beacon.IsPoSHeader(header) { + return beacon.ethone.FinalizeAndAssemble(chain, header, state, txs, uncles, receipts) + } + // Finalize and assemble the block + beacon.Finalize(chain, header, state, txs, uncles) + return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), nil +} + +// Seal generates a new sealing request for the given input block and pushes +// the result into the given channel. +// +// Note, the method returns immediately and will send the result async. More +// than one result may also be returned depending on the consensus algorithm. +func (beacon *Beacon) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { + if !beacon.IsPoSHeader(block.Header()) { + return beacon.ethone.Seal(chain, block, results, stop) + } + // The seal verification is done by the external consensus engine, + // return directly without pushing any block back. In another word + // beacon won't return any result by `results` channel which may + // blocks the receiver logic forever. + return nil +} + +// SealHash returns the hash of a block prior to it being sealed. +func (beacon *Beacon) SealHash(header *types.Header) common.Hash { + return beacon.ethone.SealHash(header) +} + +// CalcDifficulty is the difficulty adjustment algorithm. It returns +// the difficulty that a new block should have when created at time +// given the parent block's time and difficulty. +func (beacon *Beacon) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { + // Transition isn't triggered yet, use the legacy rules for calculation + if reached, _ := IsTTDReached(chain, parent.Hash(), parent.Number.Uint64()); !reached { + return beacon.ethone.CalcDifficulty(chain, time, parent) + } + return beaconDifficulty +} + +// APIs implements consensus.Engine, returning the user facing RPC APIs. +func (beacon *Beacon) APIs(chain consensus.ChainHeaderReader) []rpc.API { + return beacon.ethone.APIs(chain) +} + +// Close shutdowns the consensus engine +func (beacon *Beacon) Close() error { + return beacon.ethone.Close() +} + +// IsPoSHeader reports the header belongs to the PoS-stage with some special fields. +// This function is not suitable for a part of APIs like Prepare or CalcDifficulty +// because the header difficulty is not set yet. +func (beacon *Beacon) IsPoSHeader(header *types.Header) bool { + if header.Difficulty == nil { + panic("IsPoSHeader called with invalid difficulty") + } + return header.Difficulty.Cmp(beaconDifficulty) == 0 +} + +// InnerEngine returns the embedded eth1 consensus engine. +func (beacon *Beacon) InnerEngine() consensus.Engine { + return beacon.ethone +} + +// SetThreads updates the mining threads. Delegate the call +// to the eth1 engine if it's threaded. +func (beacon *Beacon) SetThreads(threads int) { + type threaded interface { + SetThreads(threads int) + } + if th, ok := beacon.ethone.(threaded); ok { + th.SetThreads(threads) + } +} + +// IsTTDReached checks if the TotalTerminalDifficulty has been surpassed on the `parentHash` block. +// It depends on the parentHash already being stored in the database. +// If the parentHash is not stored in the database a UnknownAncestor error is returned. +func IsTTDReached(chain consensus.ChainHeaderReader, parentHash common.Hash, number uint64) (bool, error) { + if chain.Config().TerminalTotalDifficulty == nil { + return false, nil + } + td := chain.GetTd(parentHash, number) + if td == nil { + return false, consensus.ErrUnknownAncestor + } + return td.Cmp(chain.Config().TerminalTotalDifficulty) >= 0, nil +} diff --git a/consensus/clique/api.go b/consensus/clique/api.go index 03f2daffa..cb270d321 100644 --- a/consensus/clique/api.go +++ b/consensus/clique/api.go @@ -196,7 +196,11 @@ func (sb *blockNumberOrHashOrRLP) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &input); err != nil { return err } - sb.RLP = hexutil.MustDecode(input) + blob, err := hexutil.Decode(input) + if err != nil { + return err + } + sb.RLP = blob return nil } diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 38597e152..685186817 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -295,9 +295,8 @@ func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.H } } // Verify that the gas limit is <= 2^63-1 - cap := uint64(0x7fffffffffffffff) - if header.GasLimit > cap { - return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap) + if header.GasLimit > params.MaxGasLimit { + return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit) } // If all checks passed, validate any special fields for hard forks if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil { diff --git a/consensus/consensus.go b/consensus/consensus.go index 2a5aac945..af8ce98ff 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -44,6 +44,9 @@ type ChainHeaderReader interface { // GetHeaderByHash retrieves a block header from the database by its hash. GetHeaderByHash(hash common.Hash) *types.Header + + // GetTd retrieves the total difficulty from the database by hash and number. + GetTd(hash common.Hash, number uint64) *big.Int } // ChainReader defines a small collection of methods needed to access the local diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 7fa427f68..7dec436a2 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -281,9 +281,8 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected) } // Verify that the gas limit is <= 2^63-1 - cap := uint64(0x7fffffffffffffff) - if header.GasLimit > cap { - return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap) + if header.GasLimit > params.MaxGasLimit { + return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit) } // Verify that the gasUsed is <= gasLimit if header.GasUsed > header.GasLimit { diff --git a/consensus/merger.go b/consensus/merger.go new file mode 100644 index 000000000..ffbcbf2b8 --- /dev/null +++ b/consensus/merger.go @@ -0,0 +1,110 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package consensus + +import ( + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +// transitionStatus describes the status of eth1/2 transition. This switch +// between modes is a one-way action which is triggered by corresponding +// consensus-layer message. +type transitionStatus struct { + LeftPoW bool // The flag is set when the first NewHead message received + EnteredPoS bool // The flag is set when the first FinalisedBlock message received +} + +// Merger is an internal help structure used to track the eth1/2 transition status. +// It's a common structure can be used in both full node and light client. +type Merger struct { + db ethdb.KeyValueStore + status transitionStatus + mu sync.RWMutex +} + +// NewMerger creates a new Merger which stores its transition status in the provided db. +func NewMerger(db ethdb.KeyValueStore) *Merger { + var status transitionStatus + blob := rawdb.ReadTransitionStatus(db) + if len(blob) != 0 { + if err := rlp.DecodeBytes(blob, &status); err != nil { + log.Crit("Failed to decode the transition status", "err", err) + } + } + return &Merger{ + db: db, + status: status, + } +} + +// ReachTTD is called whenever the first NewHead message received +// from the consensus-layer. +func (m *Merger) ReachTTD() { + m.mu.Lock() + defer m.mu.Unlock() + + if m.status.LeftPoW { + return + } + m.status = transitionStatus{LeftPoW: true} + blob, err := rlp.EncodeToBytes(m.status) + if err != nil { + panic(fmt.Sprintf("Failed to encode the transition status: %v", err)) + } + rawdb.WriteTransitionStatus(m.db, blob) + log.Info("Left PoW stage") +} + +// FinalizePoS is called whenever the first FinalisedBlock message received +// from the consensus-layer. +func (m *Merger) FinalizePoS() { + m.mu.Lock() + defer m.mu.Unlock() + + if m.status.EnteredPoS { + return + } + m.status = transitionStatus{LeftPoW: true, EnteredPoS: true} + blob, err := rlp.EncodeToBytes(m.status) + if err != nil { + panic(fmt.Sprintf("Failed to encode the transition status: %v", err)) + } + rawdb.WriteTransitionStatus(m.db, blob) + log.Info("Entered PoS stage") +} + +// TDDReached reports whether the chain has left the PoW stage. +func (m *Merger) TDDReached() bool { + m.mu.RLock() + defer m.mu.RUnlock() + + return m.status.LeftPoW +} + +// PoSFinalized reports whether the chain has entered the PoS stage. +func (m *Merger) PoSFinalized() bool { + m.mu.RLock() + defer m.mu.RUnlock() + + return m.status.EnteredPoS +} diff --git a/console/console.go b/console/console.go index dd39300d0..ab26bd64f 100644 --- a/console/console.go +++ b/console/console.go @@ -17,6 +17,7 @@ package console import ( + "errors" "fmt" "io" "io/ioutil" @@ -26,6 +27,7 @@ import ( "regexp" "sort" "strings" + "sync" "syscall" "github.com/dop251/goja" @@ -74,6 +76,13 @@ type Console struct { histPath string // Absolute path to the console scrollback history history []string // Scroll history maintained by the console printer io.Writer // Output writer to serialize any display strings to + + interactiveStopped chan struct{} + stopInteractiveCh chan struct{} + signalReceived chan struct{} + stopped chan struct{} + wg sync.WaitGroup + stopOnce sync.Once } // New initializes a JavaScript interpreted runtime environment and sets defaults @@ -92,12 +101,16 @@ func New(config Config) (*Console, error) { // Initialize the console and return console := &Console{ - client: config.Client, - jsre: jsre.New(config.DocRoot, config.Printer), - prompt: config.Prompt, - prompter: config.Prompter, - printer: config.Printer, - histPath: filepath.Join(config.DataDir, HistoryFile), + client: config.Client, + jsre: jsre.New(config.DocRoot, config.Printer), + prompt: config.Prompt, + prompter: config.Prompter, + printer: config.Printer, + histPath: filepath.Join(config.DataDir, HistoryFile), + interactiveStopped: make(chan struct{}), + stopInteractiveCh: make(chan struct{}), + signalReceived: make(chan struct{}, 1), + stopped: make(chan struct{}), } if err := os.MkdirAll(config.DataDir, 0700); err != nil { return nil, err @@ -105,6 +118,10 @@ func New(config Config) (*Console, error) { if err := console.init(config.Preload); err != nil { return nil, err } + + console.wg.Add(1) + go console.interruptHandler() + return console, nil } @@ -337,9 +354,63 @@ func (c *Console) Evaluate(statement string) { } }() c.jsre.Evaluate(statement, c.printer) + + // Avoid exiting Interactive when jsre was interrupted by SIGINT. + c.clearSignalReceived() } -// Interactive starts an interactive user session, where input is propted from +// interruptHandler runs in its own goroutine and waits for signals. +// When a signal is received, it interrupts the JS interpreter. +func (c *Console) interruptHandler() { + defer c.wg.Done() + + // During Interactive, liner inhibits the signal while it is prompting for + // input. However, the signal will be received while evaluating JS. + // + // On unsupported terminals, SIGINT can also happen while prompting. + // Unfortunately, it is not possible to abort the prompt in this case and + // the c.readLines goroutine leaks. + sig := make(chan os.Signal, 1) + signal.Notify(sig, syscall.SIGINT) + defer signal.Stop(sig) + + for { + select { + case <-sig: + c.setSignalReceived() + c.jsre.Interrupt(errors.New("interrupted")) + case <-c.stopInteractiveCh: + close(c.interactiveStopped) + c.jsre.Interrupt(errors.New("interrupted")) + case <-c.stopped: + return + } + } +} + +func (c *Console) setSignalReceived() { + select { + case c.signalReceived <- struct{}{}: + default: + } +} + +func (c *Console) clearSignalReceived() { + select { + case <-c.signalReceived: + default: + } +} + +// StopInteractive causes Interactive to return as soon as possible. +func (c *Console) StopInteractive() { + select { + case c.stopInteractiveCh <- struct{}{}: + case <-c.stopped: + } +} + +// Interactive starts an interactive user session, where in.put is propted from // the configured user prompter. func (c *Console) Interactive() { var ( @@ -349,15 +420,11 @@ func (c *Console) Interactive() { inputLine = make(chan string, 1) // receives user input inputErr = make(chan error, 1) // receives liner errors requestLine = make(chan string) // requests a line of input - interrupt = make(chan os.Signal, 1) ) - // Monitor Ctrl-C. While liner does turn on the relevant terminal mode bits to avoid - // the signal, a signal can still be received for unsupported terminals. Unfortunately - // there is no way to cancel the line reader when this happens. The readLines - // goroutine will be leaked in this case. - signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) - defer signal.Stop(interrupt) + defer func() { + c.writeHistory() + }() // The line reader runs in a separate goroutine. go c.readLines(inputLine, inputErr, requestLine) @@ -368,7 +435,14 @@ func (c *Console) Interactive() { requestLine <- prompt select { - case <-interrupt: + case <-c.interactiveStopped: + fmt.Fprintln(c.printer, "node is down, exiting console") + return + + case <-c.signalReceived: + // SIGINT received while prompting for input -> unsupported terminal. + // I'm not sure if the best choice would be to leave the console running here. + // Bash keeps running in this case. node.js does not. fmt.Fprintln(c.printer, "caught interrupt, exiting") return @@ -476,12 +550,19 @@ func (c *Console) Execute(path string) error { // Stop cleans up the console and terminates the runtime environment. func (c *Console) Stop(graceful bool) error { - if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil { - return err - } - if err := os.Chmod(c.histPath, 0600); err != nil { // Force 0600, even if it was different previously - return err - } + c.stopOnce.Do(func() { + // Stop the interrupt handler. + close(c.stopped) + c.wg.Wait() + }) + c.jsre.Stop(graceful) return nil } + +func (c *Console) writeHistory() error { + if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil { + return err + } + return os.Chmod(c.histPath, 0600) // Force 0600, even if it was different previously +} diff --git a/core/block_validator_test.go b/core/block_validator_test.go index 86f9835a0..0f183ba52 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -17,14 +17,21 @@ package core import ( + "encoding/json" + "math/big" "runtime" "testing" "time" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" + "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" ) @@ -76,6 +83,172 @@ func TestHeaderVerification(t *testing.T) { } } +func TestHeaderVerificationForMergingClique(t *testing.T) { testHeaderVerificationForMerging(t, true) } +func TestHeaderVerificationForMergingEthash(t *testing.T) { testHeaderVerificationForMerging(t, false) } + +// Tests the verification for eth1/2 merging, including pre-merge and post-merge +func testHeaderVerificationForMerging(t *testing.T, isClique bool) { + var ( + testdb = rawdb.NewMemoryDatabase() + preBlocks []*types.Block + postBlocks []*types.Block + runEngine consensus.Engine + chainConfig *params.ChainConfig + merger = consensus.NewMerger(rawdb.NewMemoryDatabase()) + ) + if isClique { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key.PublicKey) + engine = clique.New(params.AllCliqueProtocolChanges.Clique, testdb) + ) + genspec := &Genesis{ + ExtraData: make([]byte, 32+common.AddressLength+crypto.SignatureLength), + Alloc: map[common.Address]GenesisAccount{ + addr: {Balance: big.NewInt(1)}, + }, + BaseFee: big.NewInt(params.InitialBaseFee), + } + copy(genspec.ExtraData[32:], addr[:]) + genesis := genspec.MustCommit(testdb) + + genEngine := beacon.New(engine) + preBlocks, _ = GenerateChain(params.AllCliqueProtocolChanges, genesis, genEngine, testdb, 8, nil) + td := 0 + for i, block := range preBlocks { + header := block.Header() + if i > 0 { + header.ParentHash = preBlocks[i-1].Hash() + } + header.Extra = make([]byte, 32+crypto.SignatureLength) + header.Difficulty = big.NewInt(2) + + sig, _ := crypto.Sign(genEngine.SealHash(header).Bytes(), key) + copy(header.Extra[len(header.Extra)-crypto.SignatureLength:], sig) + preBlocks[i] = block.WithSeal(header) + // calculate td + td += int(block.Difficulty().Uint64()) + } + config := *params.AllCliqueProtocolChanges + config.TerminalTotalDifficulty = big.NewInt(int64(td)) + postBlocks, _ = GenerateChain(&config, preBlocks[len(preBlocks)-1], genEngine, testdb, 8, nil) + chainConfig = &config + runEngine = beacon.New(engine) + } else { + gspec := &Genesis{Config: params.TestChainConfig} + genesis := gspec.MustCommit(testdb) + genEngine := beacon.New(ethash.NewFaker()) + + preBlocks, _ = GenerateChain(params.TestChainConfig, genesis, genEngine, testdb, 8, nil) + td := 0 + for _, block := range preBlocks { + // calculate td + td += int(block.Difficulty().Uint64()) + } + config := *params.TestChainConfig + config.TerminalTotalDifficulty = big.NewInt(int64(td)) + postBlocks, _ = GenerateChain(params.TestChainConfig, preBlocks[len(preBlocks)-1], genEngine, testdb, 8, nil) + + chainConfig = &config + runEngine = beacon.New(ethash.NewFaker()) + } + + preHeaders := make([]*types.Header, len(preBlocks)) + for i, block := range preBlocks { + preHeaders[i] = block.Header() + + blob, _ := json.Marshal(block.Header()) + t.Logf("Log header before the merging %d: %v", block.NumberU64(), string(blob)) + } + postHeaders := make([]*types.Header, len(postBlocks)) + for i, block := range postBlocks { + postHeaders[i] = block.Header() + + blob, _ := json.Marshal(block.Header()) + t.Logf("Log header after the merging %d: %v", block.NumberU64(), string(blob)) + } + // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces + chain, _ := NewBlockChain(testdb, nil, chainConfig, runEngine, vm.Config{}, nil, nil) + defer chain.Stop() + + // Verify the blocks before the merging + for i := 0; i < len(preBlocks); i++ { + _, results := runEngine.VerifyHeaders(chain, []*types.Header{preHeaders[i]}, []bool{true}) + // Wait for the verification result + select { + case result := <-results: + if result != nil { + t.Errorf("test %d: verification failed %v", i, result) + } + case <-time.After(time.Second): + t.Fatalf("test %d: verification timeout", i) + } + // Make sure no more data is returned + select { + case result := <-results: + t.Fatalf("test %d: unexpected result returned: %v", i, result) + case <-time.After(25 * time.Millisecond): + } + chain.InsertChain(preBlocks[i : i+1]) + } + + // Make the transition + merger.ReachTTD() + merger.FinalizePoS() + + // Verify the blocks after the merging + for i := 0; i < len(postBlocks); i++ { + _, results := runEngine.VerifyHeaders(chain, []*types.Header{postHeaders[i]}, []bool{true}) + // Wait for the verification result + select { + case result := <-results: + if result != nil { + t.Errorf("test %d: verification failed %v", i, result) + } + case <-time.After(time.Second): + t.Fatalf("test %d: verification timeout", i) + } + // Make sure no more data is returned + select { + case result := <-results: + t.Fatalf("test %d: unexpected result returned: %v", i, result) + case <-time.After(25 * time.Millisecond): + } + chain.InsertBlockWithoutSetHead(postBlocks[i]) + } + + // Verify the blocks with pre-merge blocks and post-merge blocks + var ( + headers []*types.Header + seals []bool + ) + for _, block := range preBlocks { + headers = append(headers, block.Header()) + seals = append(seals, true) + } + for _, block := range postBlocks { + headers = append(headers, block.Header()) + seals = append(seals, true) + } + _, results := runEngine.VerifyHeaders(chain, headers, seals) + for i := 0; i < len(headers); i++ { + select { + case result := <-results: + if result != nil { + t.Errorf("test %d: verification failed %v", i, result) + } + case <-time.After(time.Second): + t.Fatalf("test %d: verification timeout", i) + } + } + // Make sure no more data is returned + select { + case result := <-results: + t.Fatalf("unexpected result returned: %v", result) + case <-time.After(25 * time.Millisecond): + } +} + // Tests that concurrent header verification works, for both good and bad blocks. func TestHeaderConcurrentVerification2(t *testing.T) { testHeaderConcurrentVerification(t, 2) } func TestHeaderConcurrentVerification8(t *testing.T) { testHeaderConcurrentVerification(t, 8) } diff --git a/core/blockchain.go b/core/blockchain.go index 162d244f4..9b93f421e 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "math/big" - mrand "math/rand" "sort" "sync" "sync/atomic" @@ -208,15 +207,14 @@ type BlockChain struct { validator Validator // Block and state validator interface prefetcher Prefetcher processor Processor // Block transaction processor interface + forker *ForkChoice vmConfig vm.Config - - shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. } // NewBlockChain returns a fully initialised block chain using information -// available in the database. It initialises the default Ethereum Validator and -// Processor. -func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(block *types.Block) bool, txLookupLimit *uint64) (*BlockChain, error) { +// available in the database. It initialises the default Ethereum Validator +// and Processor. +func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, vmConfig vm.Config, shouldPreserve func(header *types.Header) bool, txLookupLimit *uint64) (*BlockChain, error) { if cacheConfig == nil { cacheConfig = defaultCacheConfig } @@ -237,18 +235,18 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par Journal: cacheConfig.TrieCleanJournal, Preimages: cacheConfig.Preimages, }), - quit: make(chan struct{}), - chainmu: syncx.NewClosableMutex(), - shouldPreserve: shouldPreserve, - bodyCache: bodyCache, - bodyRLPCache: bodyRLPCache, - receiptsCache: receiptsCache, - blockCache: blockCache, - txLookupCache: txLookupCache, - futureBlocks: futureBlocks, - engine: engine, - vmConfig: vmConfig, + quit: make(chan struct{}), + chainmu: syncx.NewClosableMutex(), + bodyCache: bodyCache, + bodyRLPCache: bodyRLPCache, + receiptsCache: receiptsCache, + blockCache: blockCache, + txLookupCache: txLookupCache, + futureBlocks: futureBlocks, + engine: engine, + vmConfig: vmConfig, } + bc.forker = NewForkChoice(bc, shouldPreserve) bc.validator = NewBlockValidator(chainConfig, bc, engine) bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) bc.processor = NewStateProcessor(chainConfig, bc, engine) @@ -382,7 +380,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par // Start future block processor. bc.wg.Add(1) - go bc.futureBlocksLoop() + go bc.updateFutureBlocks() // Start tx indexer/unindexer. if txLookupLimit != nil { @@ -631,9 +629,9 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo return rootNumber, bc.loadLastState() } -// FastSyncCommitHead sets the current head block to the one defined by the hash +// SnapSyncCommitHead sets the current head block to the one defined by the hash // irrelevant what the chain contents were prior. -func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error { +func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { // Make sure that both the block as well at its state trie exists block := bc.GetBlockByHash(hash) if block == nil { @@ -738,30 +736,24 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { // // Note, this function assumes that the `mu` mutex is held! func (bc *BlockChain) writeHeadBlock(block *types.Block) { - // If the block is on a side chain or an unknown one, force other heads onto it too - updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash() - // Add the block to the canonical chain number scheme and mark as the head batch := bc.db.NewBatch() + rawdb.WriteHeadHeaderHash(batch, block.Hash()) + rawdb.WriteHeadFastBlockHash(batch, block.Hash()) rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) rawdb.WriteTxLookupEntriesByBlock(batch, block) rawdb.WriteHeadBlockHash(batch, block.Hash()) - // If the block is better than our head or is on a different chain, force update heads - if updateHeads { - rawdb.WriteHeadHeaderHash(batch, block.Hash()) - rawdb.WriteHeadFastBlockHash(batch, block.Hash()) - } // Flush the whole batch into the disk, exit the node if failed if err := batch.Write(); err != nil { log.Crit("Failed to update chain indexes and markers", "err", err) } // Update all in-memory chain markers in the last step - if updateHeads { - bc.hc.SetCurrentHeader(block.Header()) - bc.currentFastBlock.Store(block) - headFastBlockGauge.Update(int64(block.NumberU64())) - } + bc.hc.SetCurrentHeader(block.Header()) + + bc.currentFastBlock.Store(block) + headFastBlockGauge.Update(int64(block.NumberU64())) + bc.currentBlock.Store(block) headBlockGauge.Update(int64(block.NumberU64())) } @@ -877,12 +869,6 @@ const ( SideStatTy ) -// numberHash is just a container for a number and a hash, to represent a block -type numberHash struct { - number uint64 - hash common.Hash -} - // InsertReceiptChain attempts to complete an already existing header chain with // transaction and receipt data. func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts, ancientLimit uint64) (int, error) { @@ -928,13 +914,17 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [ // Rewind may have occurred, skip in that case. if bc.CurrentHeader().Number.Cmp(head.Number()) >= 0 { - currentFastBlock, td := bc.CurrentFastBlock(), bc.GetTd(head.Hash(), head.NumberU64()) - if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 { - rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) - bc.currentFastBlock.Store(head) - headFastBlockGauge.Update(int64(head.NumberU64())) - return true + reorg, err := bc.forker.ReorgNeeded(bc.CurrentFastBlock().Header(), head.Header()) + if err != nil { + log.Warn("Reorg failed", "err", err) + return false + } else if !reorg { + return false } + rawdb.WriteHeadFastBlockHash(bc.db, head.Hash()) + bc.currentFastBlock.Store(head) + headFastBlockGauge.Update(int64(head.NumberU64())) + return true } return false } @@ -1181,30 +1171,15 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error { return nil } -// WriteBlockWithState writes the block and all associated state to the database. -func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { - if !bc.chainmu.TryLock() { - return NonStatTy, errInsertionInterrupted - } - defer bc.chainmu.Unlock() - return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent) -} - -// writeBlockWithState writes the block and all associated state to the database, -// but is expects the chain mutex to be held. -func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { - if bc.insertStopped() { - return NonStatTy, errInsertionInterrupted - } - +// writeBlockWithState writes block, metadata and corresponding state data to the +// database. +func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB) error { // Calculate the total difficulty of the block ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) if ptd == nil { - return NonStatTy, consensus.ErrUnknownAncestor + return consensus.ErrUnknownAncestor } // Make sure no inconsistent state is leaked during insertion - currentBlock := bc.CurrentBlock() - localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64()) externTd := new(big.Int).Add(block.Difficulty(), ptd) // Irrelevant of the canonical status, write the block itself to the database. @@ -1222,15 +1197,13 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // Commit all cached state changes into underlying memory database. root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number())) if err != nil { - return NonStatTy, err + return err } triedb := bc.stateCache.TrieDB() // If we're running an archive node, always flush if bc.cacheConfig.TrieDirtyDisabled { - if err := triedb.Commit(root, false, nil); err != nil { - return NonStatTy, err - } + return triedb.Commit(root, false, nil) } else { // Full but not archive node, do proper garbage collection triedb.Reference(root, common.Hash{}) // metadata reference to keep trie alive @@ -1278,23 +1251,30 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } } } - // If the total difficulty is higher than our known, add it to the canonical chain - // Second clause in the if statement reduces the vulnerability to selfish mining. - // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf - reorg := externTd.Cmp(localTd) > 0 - currentBlock = bc.CurrentBlock() - if !reorg && externTd.Cmp(localTd) == 0 { - // Split same-difficulty blocks by number, then preferentially select - // the block generated by the local miner as the canonical block. - if block.NumberU64() < currentBlock.NumberU64() { - reorg = true - } else if block.NumberU64() == currentBlock.NumberU64() { - var currentPreserve, blockPreserve bool - if bc.shouldPreserve != nil { - currentPreserve, blockPreserve = bc.shouldPreserve(currentBlock), bc.shouldPreserve(block) - } - reorg = !currentPreserve && (blockPreserve || mrand.Float64() < 0.5) - } + return nil +} + +// WriteBlockWithState writes the block and all associated state to the database. +func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { + if !bc.chainmu.TryLock() { + return NonStatTy, errChainStopped + } + defer bc.chainmu.Unlock() + + return bc.writeBlockAndSetHead(block, receipts, logs, state, emitHeadEvent) +} + +// writeBlockAndSetHead writes the block and all associated state to the database, +// and also it applies the given block as the new chain head. This function expects +// the chain mutex to be held. +func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { + if err := bc.writeBlockWithState(block, receipts, logs, state); err != nil { + return NonStatTy, err + } + currentBlock := bc.CurrentBlock() + reorg, err := bc.forker.ReorgNeeded(currentBlock.Header(), block.Header()) + if err != nil { + return NonStatTy, err } if reorg { // Reorganise the chain if the parent is not the head block @@ -1313,6 +1293,13 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } bc.futureBlocks.Remove(block.Hash()) + ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1) + if ptd == nil { + return NonStatTy, consensus.ErrUnknownAncestor + } + // Make sure no inconsistent state is leaked during insertion + externTd := new(big.Int).Add(block.Difficulty(), ptd) + if status == CanonStatTy { pluginNewHead(block, block.Hash(), logs, externTd) bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) @@ -1321,7 +1308,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. } // In theory we should fire a ChainHeadEvent when we inject // a canonical block, but sometimes we can insert a batch of - // canonicial blocks. Avoid firing too much ChainHeadEvents, + // canonicial blocks. Avoid firing too many ChainHeadEvents, // we will fire an accumulated ChainHeadEvent and disable fire // event here. if emitHeadEvent { @@ -1337,11 +1324,18 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // addFutureBlock checks if the block is within the max allowed window to get // accepted for future processing, and returns an error if the block is too far // ahead and was not added. +// +// TODO after the transition, the future block shouldn't be kept. Because +// it's not checked in the Geth side anymore. func (bc *BlockChain) addFutureBlock(block *types.Block) error { max := uint64(time.Now().Unix() + maxTimeFutureBlocks) if block.Time() > max { return fmt.Errorf("future block timestamp %v > allowed %v", block.Time(), max) } + if block.Difficulty().Cmp(common.Big0) == 0 { + // Never add PoS blocks into the future queue + return nil + } bc.futureBlocks.Add(block.Hash(), block) return nil } @@ -1349,15 +1343,12 @@ func (bc *BlockChain) addFutureBlock(block *types.Block) error { // InsertChain attempts to insert the given batch of blocks in to the canonical // chain or, otherwise, create a fork. If an error is returned it will return // the index number of the failing block as well an error describing what went -// wrong. -// -// After insertion is done, all accumulated events will be fired. +// wrong. After insertion is done, all accumulated events will be fired. func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { // Sanity check that we have something meaningful to import if len(chain) == 0 { return 0, nil } - bc.blockProcFeed.Send(true) defer bc.blockProcFeed.Send(false) @@ -1376,26 +1367,12 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { prev.Hash().Bytes()[:4], i, block.NumberU64(), block.Hash().Bytes()[:4], block.ParentHash().Bytes()[:4]) } } - - // Pre-check passed, start the full block imports. + // Pre-checks passed, start the full block imports if !bc.chainmu.TryLock() { return 0, errChainStopped } defer bc.chainmu.Unlock() - return bc.insertChain(chain, true) -} - -// InsertChainWithoutSealVerification works exactly the same -// except for seal verification, seal verification is omitted -func (bc *BlockChain) InsertChainWithoutSealVerification(block *types.Block) (int, error) { - bc.blockProcFeed.Send(true) - defer bc.blockProcFeed.Send(false) - - if !bc.chainmu.TryLock() { - return 0, errChainStopped - } - defer bc.chainmu.Unlock() - return bc.insertChain(types.Blocks([]*types.Block{block}), false) + return bc.insertChain(chain, true, true) } // insertChain is the internal implementation of InsertChain, which assumes that @@ -1406,7 +1383,7 @@ func (bc *BlockChain) InsertChainWithoutSealVerification(block *types.Block) (in // racey behaviour. If a sidechain import is in progress, and the historic state // is imported, but then new canon-head is added before the actual sidechain // completes, then the historic state could be pruned again -func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) { +func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) (int, error) { // If the chain is terminating, don't even bother starting up. if bc.insertStopped() { return 0, nil @@ -1448,14 +1425,23 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // from the canonical chain, which has not been verified. // Skip all known blocks that are behind us. var ( - current = bc.CurrentBlock() - localTd = bc.GetTd(current.Hash(), current.NumberU64()) - externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil + reorg bool + current = bc.CurrentBlock() ) for block != nil && bc.skipBlock(err, it) { - externTd = new(big.Int).Add(externTd, block.Difficulty()) - if localTd.Cmp(externTd) < 0 { - break + reorg, err = bc.forker.ReorgNeeded(current.Header(), block.Header()) + if err != nil { + return it.index, err + } + if reorg { + // Switch to import mode if the forker says the reorg is necessary + // and also the block is not on the canonical chain. + // In eth2 the forker always returns true for reorg decision (blindly trusting + // the external consensus engine), but in order to prevent the unnecessary + // reorgs when importing known blocks, the special case is handled here. + if block.NumberU64() > current.NumberU64() || bc.GetCanonicalHash(block.NumberU64()) != block.Hash() { + break + } } log.Debug("Ignoring already known block", "number", block.Number(), "hash", block.Hash()) stats.ignored++ @@ -1482,11 +1468,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // Falls through to the block import } switch { - // First block is pruned, insert as sidechain and reorg only if TD grows enough + // First block is pruned case errors.Is(err, consensus.ErrPrunedAncestor): - log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash()) - return bc.insertSideChain(block, it) - + if setHead { + // First block is pruned, insert as sidechain and reorg only if TD grows enough + log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash()) + return bc.insertSideChain(block, it) + } else { + // We're post-merge and the parent is pruned, try to recover the parent state + log.Debug("Pruned ancestor", "number", block.Number(), "hash", block.Hash()) + return it.index, bc.recoverAncestors(block) + } // First block is future, shove it (and all children) to the future queue (unknown ancestor) case errors.Is(err, consensus.ErrFutureBlock) || (errors.Is(err, consensus.ErrUnknownAncestor) && bc.futureBlocks.Contains(it.first().ParentHash())): for block != nil && (it.index == 0 || errors.Is(err, consensus.ErrUnknownAncestor)) { @@ -1641,12 +1633,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // Update the metrics touched during block validation accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete, we can mark them storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete, we can mark them - blockValidationTimer.Update(time.Since(substart) - (statedb.AccountHashes + statedb.StorageHashes - triehash)) // Write the block to the chain and get the status. substart = time.Now() - status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false) + var status WriteStatus + if !setHead { + // Don't set the head, only insert the block + err = bc.writeBlockWithState(block, receipts, logs, statedb) + } else { + status, err = bc.writeBlockAndSetHead(block, receipts, logs, statedb, false) + } atomic.StoreUint32(&followupInterrupt, 1) if err != nil { return it.index, err @@ -1659,6 +1656,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits) blockInsertTimer.UpdateSince(start) + if !setHead { + // We did not setHead, so we don't have any stats to update + log.Info("Inserted block", "number", block.Number(), "hash", block.Hash(), "txs", len(block.Transactions()), "elapsed", common.PrettyDuration(time.Since(start))) + return it.index, nil + } + switch status { case CanonStatTy: log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), @@ -1717,10 +1720,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // // The method writes all (header-and-body-valid) blocks to disk, then tries to // switch over to the new chain if the TD exceeded the current chain. +// insertSideChain is only used pre-merge. func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) { var ( - externTd *big.Int - current = bc.CurrentBlock() + externTd *big.Int + lastBlock = block + current = bc.CurrentBlock() ) // The first sidechain block error is already verified to be ErrPrunedAncestor. // Since we don't import them here, we expect ErrUnknownAncestor for the remaining @@ -1771,6 +1776,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()), "root", block.Root()) } + lastBlock = block } // At this point, we've written all sidechain blocks to database. Loop ended // either on some other error or all were processed. If there was some other @@ -1778,8 +1784,12 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i // // If the externTd was larger than our local TD, we now need to reimport the previous // blocks to regenerate the required state - localTd := bc.GetTd(current.Hash(), current.NumberU64()) - if localTd.Cmp(externTd) > 0 { + reorg, err := bc.forker.ReorgNeeded(current.Header(), lastBlock.Header()) + if err != nil { + return it.index, err + } + if !reorg { + localTd := bc.GetTd(current.Hash(), current.NumberU64()) log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd) return it.index, err } @@ -1815,7 +1825,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i // memory here. if len(blocks) >= 2048 || memory > 64*1024*1024 { log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64()) - if _, err := bc.insertChain(blocks, false); err != nil { + if _, err := bc.insertChain(blocks, false, true); err != nil { return 0, err } blocks, memory = blocks[:0], 0 @@ -1829,14 +1839,98 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i } if len(blocks) > 0 { log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64()) - return bc.insertChain(blocks, false) + return bc.insertChain(blocks, false, true) } return 0, nil } +// recoverAncestors finds the closest ancestor with available state and re-execute +// all the ancestor blocks since that. +// recoverAncestors is only used post-merge. +func (bc *BlockChain) recoverAncestors(block *types.Block) error { + // Gather all the sidechain hashes (full blocks may be memory heavy) + var ( + hashes []common.Hash + numbers []uint64 + parent = block + ) + for parent != nil && !bc.HasState(parent.Root()) { + hashes = append(hashes, parent.Hash()) + numbers = append(numbers, parent.NumberU64()) + parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) + + // If the chain is terminating, stop iteration + if bc.insertStopped() { + log.Debug("Abort during blocks iteration") + return errInsertionInterrupted + } + } + if parent == nil { + return errors.New("missing parent") + } + // Import all the pruned blocks to make the state available + for i := len(hashes) - 1; i >= 0; i-- { + // If the chain is terminating, stop processing blocks + if bc.insertStopped() { + log.Debug("Abort during blocks processing") + return errInsertionInterrupted + } + var b *types.Block + if i == 0 { + b = block + } else { + b = bc.GetBlock(hashes[i], numbers[i]) + } + if _, err := bc.insertChain(types.Blocks{b}, false, false); err != nil { + return err + } + } + return nil +} + +// collectLogs collects the logs that were generated or removed during +// the processing of the block that corresponds with the given hash. +// These logs are later announced as deleted or reborn. +func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log { + number := bc.hc.GetBlockNumber(hash) + if number == nil { + return nil + } + receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) + + var logs []*types.Log + for _, receipt := range receipts { + for _, log := range receipt.Logs { + l := *log + if removed { + l.Removed = true + } + logs = append(logs, &l) + } + } + return logs +} + +// mergeLogs returns a merged log slice with specified sort order. +func mergeLogs(logs [][]*types.Log, reverse bool) []*types.Log { + var ret []*types.Log + if reverse { + for i := len(logs) - 1; i >= 0; i-- { + ret = append(ret, logs[i]...) + } + } else { + for i := 0; i < len(logs); i++ { + ret = append(ret, logs[i]...) + } + } + return ret +} + // reorg takes two blocks, an old chain and a new chain and will reconstruct the // blocks and inserts them to be part of the new canonical chain and accumulates // potential missing transactions and post an event about them. +// Note the new head block won't be processed here, callers need to handle it +// externally. func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { var ( newChain types.Blocks @@ -1848,49 +1942,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { deletedLogs [][]*types.Log rebirthLogs [][]*types.Log - - // collectLogs collects the logs that were generated or removed during - // the processing of the block that corresponds with the given hash. - // These logs are later announced as deleted or reborn - collectLogs = func(hash common.Hash, removed bool) { - number := bc.hc.GetBlockNumber(hash) - if number == nil { - return - } - receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) - - var logs []*types.Log - for _, receipt := range receipts { - for _, log := range receipt.Logs { - l := *log - if removed { - l.Removed = true - } - logs = append(logs, &l) - } - } - if len(logs) > 0 { - if removed { - deletedLogs = append(deletedLogs, logs) - } else { - rebirthLogs = append(rebirthLogs, logs) - } - } - } - // mergeLogs returns a merged log slice with specified sort order. - mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log { - var ret []*types.Log - if reverse { - for i := len(logs) - 1; i >= 0; i-- { - ret = append(ret, logs[i]...) - } - } else { - for i := 0; i < len(logs); i++ { - ret = append(ret, logs[i]...) - } - } - return ret - } ) // Reduce the longer chain to the same number as the shorter one if oldBlock.NumberU64() > newBlock.NumberU64() { @@ -1898,7 +1949,12 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { oldChain = append(oldChain, oldBlock) deletedTxs = append(deletedTxs, oldBlock.Transactions()...) - collectLogs(oldBlock.Hash(), true) + + // Collect deleted logs for notification + logs := bc.collectLogs(oldBlock.Hash(), true) + if len(logs) > 0 { + deletedLogs = append(deletedLogs, logs) + } } } else { // New chain is longer, stash all blocks away for subsequent insertion @@ -1923,8 +1979,12 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { // Remove an old block as well as stash away a new block oldChain = append(oldChain, oldBlock) deletedTxs = append(deletedTxs, oldBlock.Transactions()...) - collectLogs(oldBlock.Hash(), true) + // Collect deleted logs for notification + logs := bc.collectLogs(oldBlock.Hash(), true) + if len(logs) > 0 { + deletedLogs = append(deletedLogs, logs) + } newChain = append(newChain, newBlock) // Step back with both chains @@ -1951,8 +2011,15 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { blockReorgAddMeter.Mark(int64(len(newChain))) blockReorgDropMeter.Mark(int64(len(oldChain))) blockReorgMeter.Mark(1) + } else if len(newChain) > 0 { + // Special case happens in the post merge stage that current head is + // the ancestor of new head while these two blocks are not consecutive + log.Info("Extend chain", "add", len(newChain), "number", newChain[0].NumberU64(), "hash", newChain[0].Hash()) + blockReorgAddMeter.Mark(int64(len(newChain))) } else { - log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) + // len(newChain) == 0 && len(oldChain) > 0 + // rewind the canonical chain to a lower point. + log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain)) } // Insert the new chain(except the head block(reverse order)), // taking care of the proper incremental order. @@ -1961,8 +2028,10 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { bc.writeHeadBlock(newChain[i]) // Collect reborn logs due to chain reorg - collectLogs(newChain[i].Hash(), false) - + logs := bc.collectLogs(newChain[i].Hash(), false) + if len(logs) > 0 { + rebirthLogs = append(rebirthLogs, logs) + } // Collect the new added transactions. addedTxs = append(addedTxs, newChain[i].Transactions()...) } @@ -2002,12 +2071,54 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { return nil } -// futureBlocksLoop processes the 'future block' queue. -func (bc *BlockChain) futureBlocksLoop() { - defer bc.wg.Done() +// InsertBlockWithoutSetHead executes the block, runs the necessary verification +// upon it and then persist the block and the associate state into the database. +// The key difference between the InsertChain is it won't do the canonical chain +// updating. It relies on the additional SetChainHead call to finalize the entire +// procedure. +func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error { + if !bc.chainmu.TryLock() { + return errChainStopped + } + defer bc.chainmu.Unlock() + _, err := bc.insertChain(types.Blocks{block}, true, false) + return err +} + +// SetChainHead rewinds the chain to set the new head block as the specified +// block. It's possible that after the reorg the relevant state of head +// is missing. It can be fixed by inserting a new block which triggers +// the re-execution. +func (bc *BlockChain) SetChainHead(newBlock *types.Block) error { + if !bc.chainmu.TryLock() { + return errChainStopped + } + defer bc.chainmu.Unlock() + + // Run the reorg if necessary and set the given block as new head. + if newBlock.ParentHash() != bc.CurrentBlock().Hash() { + if err := bc.reorg(bc.CurrentBlock(), newBlock); err != nil { + return err + } + } + bc.writeHeadBlock(newBlock) + + // Emit events + logs := bc.collectLogs(newBlock.Hash(), false) + bc.chainFeed.Send(ChainEvent{Block: newBlock, Hash: newBlock.Hash(), Logs: logs}) + if len(logs) > 0 { + bc.logsFeed.Send(logs) + } + bc.chainHeadFeed.Send(ChainHeadEvent{Block: newBlock}) + log.Info("Set the chain head", "number", newBlock.Number(), "hash", newBlock.Hash()) + return nil +} + +func (bc *BlockChain) updateFutureBlocks() { futureTimer := time.NewTicker(5 * time.Second) defer futureTimer.Stop() + defer bc.wg.Done() for { select { case <-futureTimer.C: @@ -2103,7 +2214,14 @@ func (bc *BlockChain) maintainTxIndex(ancients uint64) { // If a previous indexing existed, make sure that we fill in any missing entries if bc.txLookupLimit == 0 || head < bc.txLookupLimit { if *tail > 0 { - rawdb.IndexTransactions(bc.db, 0, *tail, bc.quit) + // It can happen when chain is rewound to a historical point which + // is even lower than the indexes tail, recap the indexing target + // to new head to avoid reading non-existent block bodies. + end := *tail + if end > head+1 { + end = head + 1 + } + rawdb.IndexTransactions(bc.db, 0, end, bc.quit) } return } @@ -2188,6 +2306,6 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (i return 0, errChainStopped } defer bc.chainmu.Unlock() - _, err := bc.hc.InsertHeaderChain(chain, start) + _, err := bc.hc.InsertHeaderChain(chain, start, bc.forker) return 0, err } diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index beaa57b0c..9e966df4e 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -73,6 +73,12 @@ func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { return bc.hc.GetHeaderByNumber(number) } +// GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going +// backwards from the given number. +func (bc *BlockChain) GetHeadersFrom(number, count uint64) []rlp.RawValue { + return bc.hc.GetHeadersFrom(number, count) +} + // GetBody retrieves a block body (transactions and uncles) from the database by // hash, caching it if found. func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index f4f762078..eb5025ed5 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -79,10 +79,10 @@ func testShortRepair(t *testing.T, snapshots bool) { // already committed, after which the process crashed. In this case we expect the full // chain to be rolled back to the committed block, but the chain data itself left in // the database for replaying. -func TestShortFastSyncedRepair(t *testing.T) { testShortFastSyncedRepair(t, false) } -func TestShortFastSyncedRepairWithSnapshots(t *testing.T) { testShortFastSyncedRepair(t, true) } +func TestShortSnapSyncedRepair(t *testing.T) { testShortSnapSyncedRepair(t, false) } +func TestShortSnapSyncedRepairWithSnapshots(t *testing.T) { testShortSnapSyncedRepair(t, true) } -func testShortFastSyncedRepair(t *testing.T, snapshots bool) { +func testShortSnapSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -119,10 +119,10 @@ func testShortFastSyncedRepair(t *testing.T, snapshots bool) { // not yet committed, but the process crashed. In this case we expect the chain to // detect that it was fast syncing and not delete anything, since we can just pick // up directly where we left off. -func TestShortFastSyncingRepair(t *testing.T) { testShortFastSyncingRepair(t, false) } -func TestShortFastSyncingRepairWithSnapshots(t *testing.T) { testShortFastSyncingRepair(t, true) } +func TestShortSnapSyncingRepair(t *testing.T) { testShortSnapSyncingRepair(t, false) } +func TestShortSnapSyncingRepairWithSnapshots(t *testing.T) { testShortSnapSyncingRepair(t, true) } -func testShortFastSyncingRepair(t *testing.T, snapshots bool) { +func testShortSnapSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -203,14 +203,14 @@ func testShortOldForkedRepair(t *testing.T, snapshots bool) { // crashed. In this test scenario the side chain is below the committed block. In // this case we expect the canonical chain to be rolled back to the committed block, // but the chain data itself left in the database for replaying. -func TestShortOldForkedFastSyncedRepair(t *testing.T) { - testShortOldForkedFastSyncedRepair(t, false) +func TestShortOldForkedSnapSyncedRepair(t *testing.T) { + testShortOldForkedSnapSyncedRepair(t, false) } -func TestShortOldForkedFastSyncedRepairWithSnapshots(t *testing.T) { - testShortOldForkedFastSyncedRepair(t, true) +func TestShortOldForkedSnapSyncedRepairWithSnapshots(t *testing.T) { + testShortOldForkedSnapSyncedRepair(t, true) } -func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) { +func testShortOldForkedSnapSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -250,14 +250,14 @@ func testShortOldForkedFastSyncedRepair(t *testing.T, snapshots bool) { // test scenario the side chain is below the committed block. In this case we expect // the chain to detect that it was fast syncing and not delete anything, since we // can just pick up directly where we left off. -func TestShortOldForkedFastSyncingRepair(t *testing.T) { - testShortOldForkedFastSyncingRepair(t, false) +func TestShortOldForkedSnapSyncingRepair(t *testing.T) { + testShortOldForkedSnapSyncingRepair(t, false) } -func TestShortOldForkedFastSyncingRepairWithSnapshots(t *testing.T) { - testShortOldForkedFastSyncingRepair(t, true) +func TestShortOldForkedSnapSyncingRepairWithSnapshots(t *testing.T) { + testShortOldForkedSnapSyncingRepair(t, true) } -func testShortOldForkedFastSyncingRepair(t *testing.T, snapshots bool) { +func testShortOldForkedSnapSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -340,14 +340,14 @@ func testShortNewlyForkedRepair(t *testing.T, snapshots bool) { // crashed. In this test scenario the side chain reaches above the committed block. // In this case we expect the canonical chain to be rolled back to the committed // block, but the chain data itself left in the database for replaying. -func TestShortNewlyForkedFastSyncedRepair(t *testing.T) { - testShortNewlyForkedFastSyncedRepair(t, false) +func TestShortNewlyForkedSnapSyncedRepair(t *testing.T) { + testShortNewlyForkedSnapSyncedRepair(t, false) } -func TestShortNewlyForkedFastSyncedRepairWithSnapshots(t *testing.T) { - testShortNewlyForkedFastSyncedRepair(t, true) +func TestShortNewlyForkedSnapSyncedRepairWithSnapshots(t *testing.T) { + testShortNewlyForkedSnapSyncedRepair(t, true) } -func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) { +func testShortNewlyForkedSnapSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6 @@ -387,14 +387,14 @@ func testShortNewlyForkedFastSyncedRepair(t *testing.T, snapshots bool) { // this test scenario the side chain reaches above the committed block. In this // case we expect the chain to detect that it was fast syncing and not delete // anything, since we can just pick up directly where we left off. -func TestShortNewlyForkedFastSyncingRepair(t *testing.T) { - testShortNewlyForkedFastSyncingRepair(t, false) +func TestShortNewlyForkedSnapSyncingRepair(t *testing.T) { + testShortNewlyForkedSnapSyncingRepair(t, false) } -func TestShortNewlyForkedFastSyncingRepairWithSnapshots(t *testing.T) { - testShortNewlyForkedFastSyncingRepair(t, true) +func TestShortNewlyForkedSnapSyncingRepairWithSnapshots(t *testing.T) { + testShortNewlyForkedSnapSyncingRepair(t, true) } -func testShortNewlyForkedFastSyncingRepair(t *testing.T, snapshots bool) { +func testShortNewlyForkedSnapSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6 @@ -475,14 +475,14 @@ func testShortReorgedRepair(t *testing.T, snapshots bool) { // the fast sync pivot point was already committed to disk and then the process // crashed. In this case we expect the canonical chain to be rolled back to the // committed block, but the chain data itself left in the database for replaying. -func TestShortReorgedFastSyncedRepair(t *testing.T) { - testShortReorgedFastSyncedRepair(t, false) +func TestShortReorgedSnapSyncedRepair(t *testing.T) { + testShortReorgedSnapSyncedRepair(t, false) } -func TestShortReorgedFastSyncedRepairWithSnapshots(t *testing.T) { - testShortReorgedFastSyncedRepair(t, true) +func TestShortReorgedSnapSyncedRepairWithSnapshots(t *testing.T) { + testShortReorgedSnapSyncedRepair(t, true) } -func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) { +func testShortReorgedSnapSyncedRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -521,14 +521,14 @@ func testShortReorgedFastSyncedRepair(t *testing.T, snapshots bool) { // the fast sync pivot point was not yet committed, but the process crashed. In // this case we expect the chain to detect that it was fast syncing and not delete // anything, since we can just pick up directly where we left off. -func TestShortReorgedFastSyncingRepair(t *testing.T) { - testShortReorgedFastSyncingRepair(t, false) +func TestShortReorgedSnapSyncingRepair(t *testing.T) { + testShortReorgedSnapSyncingRepair(t, false) } -func TestShortReorgedFastSyncingRepairWithSnapshots(t *testing.T) { - testShortReorgedFastSyncingRepair(t, true) +func TestShortReorgedSnapSyncingRepairWithSnapshots(t *testing.T) { + testShortReorgedSnapSyncingRepair(t, true) } -func testShortReorgedFastSyncingRepair(t *testing.T, snapshots bool) { +func testShortReorgedSnapSyncingRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -656,14 +656,14 @@ func testLongDeepRepair(t *testing.T, snapshots bool) { // sync pivot point - newer than the ancient limit - was already committed, after // which the process crashed. In this case we expect the chain to be rolled back // to the committed block, with everything afterwads kept as fast sync data. -func TestLongFastSyncedShallowRepair(t *testing.T) { - testLongFastSyncedShallowRepair(t, false) +func TestLongSnapSyncedShallowRepair(t *testing.T) { + testLongSnapSyncedShallowRepair(t, false) } -func TestLongFastSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongFastSyncedShallowRepair(t, true) +func TestLongSnapSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongSnapSyncedShallowRepair(t, true) } -func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) { +func testLongSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -705,10 +705,10 @@ func testLongFastSyncedShallowRepair(t *testing.T, snapshots bool) { // sync pivot point - older than the ancient limit - was already committed, after // which the process crashed. In this case we expect the chain to be rolled back // to the committed block, with everything afterwads deleted. -func TestLongFastSyncedDeepRepair(t *testing.T) { testLongFastSyncedDeepRepair(t, false) } -func TestLongFastSyncedDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncedDeepRepair(t, true) } +func TestLongSnapSyncedDeepRepair(t *testing.T) { testLongSnapSyncedDeepRepair(t, false) } +func TestLongSnapSyncedDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncedDeepRepair(t, true) } -func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) { +func testLongSnapSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -750,14 +750,14 @@ func testLongFastSyncedDeepRepair(t *testing.T, snapshots bool) { // process crashed. In this case we expect the chain to detect that it was fast // syncing and not delete anything, since we can just pick up directly where we // left off. -func TestLongFastSyncingShallowRepair(t *testing.T) { - testLongFastSyncingShallowRepair(t, false) +func TestLongSnapSyncingShallowRepair(t *testing.T) { + testLongSnapSyncingShallowRepair(t, false) } -func TestLongFastSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongFastSyncingShallowRepair(t, true) +func TestLongSnapSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongSnapSyncingShallowRepair(t, true) } -func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) { +func testLongSnapSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -800,10 +800,10 @@ func testLongFastSyncingShallowRepair(t *testing.T, snapshots bool) { // process crashed. In this case we expect the chain to detect that it was fast // syncing and not delete anything, since we can just pick up directly where we // left off. -func TestLongFastSyncingDeepRepair(t *testing.T) { testLongFastSyncingDeepRepair(t, false) } -func TestLongFastSyncingDeepRepairWithSnapshots(t *testing.T) { testLongFastSyncingDeepRepair(t, true) } +func TestLongSnapSyncingDeepRepair(t *testing.T) { testLongSnapSyncingDeepRepair(t, false) } +func TestLongSnapSyncingDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncingDeepRepair(t, true) } -func testLongFastSyncingDeepRepair(t *testing.T, snapshots bool) { +func testLongSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -946,14 +946,14 @@ func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) { // the side chain is below the committed block. In this case we expect the chain // to be rolled back to the committed block, with everything afterwads kept as // fast sync data; the side chain completely nuked by the freezer. -func TestLongOldForkedFastSyncedShallowRepair(t *testing.T) { - testLongOldForkedFastSyncedShallowRepair(t, false) +func TestLongOldForkedSnapSyncedShallowRepair(t *testing.T) { + testLongOldForkedSnapSyncedShallowRepair(t, false) } -func TestLongOldForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncedShallowRepair(t, true) +func TestLongOldForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncedShallowRepair(t, true) } -func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -998,14 +998,14 @@ func testLongOldForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) { // the side chain is below the committed block. In this case we expect the canonical // chain to be rolled back to the committed block, with everything afterwads deleted; // the side chain completely nuked by the freezer. -func TestLongOldForkedFastSyncedDeepRepair(t *testing.T) { - testLongOldForkedFastSyncedDeepRepair(t, false) +func TestLongOldForkedSnapSyncedDeepRepair(t *testing.T) { + testLongOldForkedSnapSyncedDeepRepair(t, false) } -func TestLongOldForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncedDeepRepair(t, true) +func TestLongOldForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncedDeepRepair(t, true) } -func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1049,14 +1049,14 @@ func testLongOldForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) { // chain is below the committed block. In this case we expect the chain to detect // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. -func TestLongOldForkedFastSyncingShallowRepair(t *testing.T) { - testLongOldForkedFastSyncingShallowRepair(t, false) +func TestLongOldForkedSnapSyncingShallowRepair(t *testing.T) { + testLongOldForkedSnapSyncingShallowRepair(t, false) } -func TestLongOldForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncingShallowRepair(t, true) +func TestLongOldForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncingShallowRepair(t, true) } -func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -1101,14 +1101,14 @@ func testLongOldForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) { // chain is below the committed block. In this case we expect the chain to detect // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. -func TestLongOldForkedFastSyncingDeepRepair(t *testing.T) { - testLongOldForkedFastSyncingDeepRepair(t, false) +func TestLongOldForkedSnapSyncingDeepRepair(t *testing.T) { + testLongOldForkedSnapSyncingDeepRepair(t, false) } -func TestLongOldForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncingDeepRepair(t, true) +func TestLongOldForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncingDeepRepair(t, true) } -func testLongOldForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1252,14 +1252,14 @@ func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) { // the side chain is above the committed block. In this case we expect the chain // to be rolled back to the committed block, with everything afterwads kept as fast // sync data; the side chain completely nuked by the freezer. -func TestLongNewerForkedFastSyncedShallowRepair(t *testing.T) { - testLongNewerForkedFastSyncedShallowRepair(t, false) +func TestLongNewerForkedSnapSyncedShallowRepair(t *testing.T) { + testLongNewerForkedSnapSyncedShallowRepair(t, false) } -func TestLongNewerForkedFastSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncedShallowRepair(t, true) +func TestLongNewerForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncedShallowRepair(t, true) } -func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1304,14 +1304,14 @@ func testLongNewerForkedFastSyncedShallowRepair(t *testing.T, snapshots bool) { // the side chain is above the committed block. In this case we expect the canonical // chain to be rolled back to the committed block, with everything afterwads deleted; // the side chain completely nuked by the freezer. -func TestLongNewerForkedFastSyncedDeepRepair(t *testing.T) { - testLongNewerForkedFastSyncedDeepRepair(t, false) +func TestLongNewerForkedSnapSyncedDeepRepair(t *testing.T) { + testLongNewerForkedSnapSyncedDeepRepair(t, false) } -func TestLongNewerForkedFastSyncedDeepRepairWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncedDeepRepair(t, true) +func TestLongNewerForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncedDeepRepair(t, true) } -func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1355,14 +1355,14 @@ func testLongNewerForkedFastSyncedDeepRepair(t *testing.T, snapshots bool) { // chain is above the committed block. In this case we expect the chain to detect // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. -func TestLongNewerForkedFastSyncingShallowRepair(t *testing.T) { - testLongNewerForkedFastSyncingShallowRepair(t, false) +func TestLongNewerForkedSnapSyncingShallowRepair(t *testing.T) { + testLongNewerForkedSnapSyncingShallowRepair(t, false) } -func TestLongNewerForkedFastSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncingShallowRepair(t, true) +func TestLongNewerForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncingShallowRepair(t, true) } -func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1407,14 +1407,14 @@ func testLongNewerForkedFastSyncingShallowRepair(t *testing.T, snapshots bool) { // chain is above the committed block. In this case we expect the chain to detect // that it was fast syncing and not delete anything. The side chain is completely // nuked by the freezer. -func TestLongNewerForkedFastSyncingDeepRepair(t *testing.T) { - testLongNewerForkedFastSyncingDeepRepair(t, false) +func TestLongNewerForkedSnapSyncingDeepRepair(t *testing.T) { + testLongNewerForkedSnapSyncingDeepRepair(t, false) } -func TestLongNewerForkedFastSyncingDeepRepairWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncingDeepRepair(t, true) +func TestLongNewerForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncingDeepRepair(t, true) } -func testLongNewerForkedFastSyncingDeepRepair(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1552,14 +1552,14 @@ func testLongReorgedDeepRepair(t *testing.T, snapshots bool) { // expect the chain to be rolled back to the committed block, with everything // afterwads kept as fast sync data. The side chain completely nuked by the // freezer. -func TestLongReorgedFastSyncedShallowRepair(t *testing.T) { - testLongReorgedFastSyncedShallowRepair(t, false) +func TestLongReorgedSnapSyncedShallowRepair(t *testing.T) { + testLongReorgedSnapSyncedShallowRepair(t, false) } -func TestLongReorgedFastSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongReorgedFastSyncedShallowRepair(t, true) +func TestLongReorgedSnapSyncedShallowRepairWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncedShallowRepair(t, true) } -func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1603,14 +1603,14 @@ func testLongReorgedFastSyncedShallowRepair(t *testing.T, snapshots bool) { // was already committed to disk and then the process crashed. In this case we // expect the canonical chains to be rolled back to the committed block, with // everything afterwads deleted. The side chain completely nuked by the freezer. -func TestLongReorgedFastSyncedDeepRepair(t *testing.T) { - testLongReorgedFastSyncedDeepRepair(t, false) +func TestLongReorgedSnapSyncedDeepRepair(t *testing.T) { + testLongReorgedSnapSyncedDeepRepair(t, false) } -func TestLongReorgedFastSyncedDeepRepairWithSnapshots(t *testing.T) { - testLongReorgedFastSyncedDeepRepair(t, true) +func TestLongReorgedSnapSyncedDeepRepairWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncedDeepRepair(t, true) } -func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncedDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1653,14 +1653,14 @@ func testLongReorgedFastSyncedDeepRepair(t *testing.T, snapshots bool) { // was not yet committed, but the process crashed. In this case we expect the // chain to detect that it was fast syncing and not delete anything, since we // can just pick up directly where we left off. -func TestLongReorgedFastSyncingShallowRepair(t *testing.T) { - testLongReorgedFastSyncingShallowRepair(t, false) +func TestLongReorgedSnapSyncingShallowRepair(t *testing.T) { + testLongReorgedSnapSyncingShallowRepair(t, false) } -func TestLongReorgedFastSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongReorgedFastSyncingShallowRepair(t, true) +func TestLongReorgedSnapSyncingShallowRepairWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncingShallowRepair(t, true) } -func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncingShallowRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1704,14 +1704,14 @@ func testLongReorgedFastSyncingShallowRepair(t *testing.T, snapshots bool) { // was not yet committed, but the process crashed. In this case we expect the // chain to detect that it was fast syncing and not delete anything, since we // can just pick up directly where we left off. -func TestLongReorgedFastSyncingDeepRepair(t *testing.T) { - testLongReorgedFastSyncingDeepRepair(t, false) +func TestLongReorgedSnapSyncingDeepRepair(t *testing.T) { + testLongReorgedSnapSyncingDeepRepair(t, false) } -func TestLongReorgedFastSyncingDeepRepairWithSnapshots(t *testing.T) { - testLongReorgedFastSyncingDeepRepair(t, true) +func TestLongReorgedSnapSyncingDeepRepairWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncingDeepRepair(t, true) } -func testLongReorgedFastSyncingDeepRepair(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1829,7 +1829,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { // Pull the plug on the database, simulating a hard crash db.Close() - // Start a new blockchain back up and see where the repait leads us + // Start a new blockchain back up and see where the repair leads us db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index 27b6be6e1..b2b3a058a 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -194,10 +194,10 @@ func testShortSetHead(t *testing.T, snapshots bool) { // Everything above the sethead point should be deleted. In between the committed // block and the requested head the data can remain as "fast sync" data to avoid // redownloading it. -func TestShortFastSyncedSetHead(t *testing.T) { testShortFastSyncedSetHead(t, false) } -func TestShortFastSyncedSetHeadWithSnapshots(t *testing.T) { testShortFastSyncedSetHead(t, true) } +func TestShortSnapSyncedSetHead(t *testing.T) { testShortSnapSyncedSetHead(t, false) } +func TestShortSnapSyncedSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncedSetHead(t, true) } -func testShortFastSyncedSetHead(t *testing.T, snapshots bool) { +func testShortSnapSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -236,10 +236,10 @@ func testShortFastSyncedSetHead(t *testing.T, snapshots bool) { // detect that it was fast syncing and delete everything from the new head, since // we can just pick up fast syncing from there. The head full block should be set // to the genesis. -func TestShortFastSyncingSetHead(t *testing.T) { testShortFastSyncingSetHead(t, false) } -func TestShortFastSyncingSetHeadWithSnapshots(t *testing.T) { testShortFastSyncingSetHead(t, true) } +func TestShortSnapSyncingSetHead(t *testing.T) { testShortSnapSyncingSetHead(t, false) } +func TestShortSnapSyncingSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncingSetHead(t, true) } -func testShortFastSyncingSetHead(t *testing.T, snapshots bool) { +func testShortSnapSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // @@ -326,14 +326,14 @@ func testShortOldForkedSetHead(t *testing.T, snapshots bool) { // block. Everything above the sethead point should be deleted. In between the // committed block and the requested head the data can remain as "fast sync" data // to avoid redownloading it. The side chain should be left alone as it was shorter. -func TestShortOldForkedFastSyncedSetHead(t *testing.T) { - testShortOldForkedFastSyncedSetHead(t, false) +func TestShortOldForkedSnapSyncedSetHead(t *testing.T) { + testShortOldForkedSnapSyncedSetHead(t, false) } -func TestShortOldForkedFastSyncedSetHeadWithSnapshots(t *testing.T) { - testShortOldForkedFastSyncedSetHead(t, true) +func TestShortOldForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) { + testShortOldForkedSnapSyncedSetHead(t, true) } -func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) { +func testShortOldForkedSnapSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -375,14 +375,14 @@ func testShortOldForkedFastSyncedSetHead(t *testing.T, snapshots bool) { // the chain to detect that it was fast syncing and delete everything from the new // head, since we can just pick up fast syncing from there. The head full block // should be set to the genesis. -func TestShortOldForkedFastSyncingSetHead(t *testing.T) { - testShortOldForkedFastSyncingSetHead(t, false) +func TestShortOldForkedSnapSyncingSetHead(t *testing.T) { + testShortOldForkedSnapSyncingSetHead(t, false) } -func TestShortOldForkedFastSyncingSetHeadWithSnapshots(t *testing.T) { - testShortOldForkedFastSyncingSetHead(t, true) +func TestShortOldForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) { + testShortOldForkedSnapSyncingSetHead(t, true) } -func testShortOldForkedFastSyncingSetHead(t *testing.T, snapshots bool) { +func testShortOldForkedSnapSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3 @@ -478,14 +478,14 @@ func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) { // The side chain could be left to be if the fork point was before the new head // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortNewlyForkedFastSyncedSetHead(t *testing.T) { - testShortNewlyForkedFastSyncedSetHead(t, false) +func TestShortNewlyForkedSnapSyncedSetHead(t *testing.T) { + testShortNewlyForkedSnapSyncedSetHead(t, false) } -func TestShortNewlyForkedFastSyncedSetHeadWithSnapshots(t *testing.T) { - testShortNewlyForkedFastSyncedSetHead(t, true) +func TestShortNewlyForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) { + testShortNewlyForkedSnapSyncedSetHead(t, true) } -func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) { +func testShortNewlyForkedSnapSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8 @@ -531,14 +531,14 @@ func testShortNewlyForkedFastSyncedSetHead(t *testing.T, snapshots bool) { // The side chain could be left to be if the fork point was before the new head // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortNewlyForkedFastSyncingSetHead(t *testing.T) { - testShortNewlyForkedFastSyncingSetHead(t, false) +func TestShortNewlyForkedSnapSyncingSetHead(t *testing.T) { + testShortNewlyForkedSnapSyncingSetHead(t, false) } -func TestShortNewlyForkedFastSyncingSetHeadWithSnapshots(t *testing.T) { - testShortNewlyForkedFastSyncingSetHead(t, true) +func TestShortNewlyForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) { + testShortNewlyForkedSnapSyncingSetHead(t, true) } -func testShortNewlyForkedFastSyncingSetHead(t *testing.T, snapshots bool) { +func testShortNewlyForkedSnapSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8 @@ -634,14 +634,14 @@ func testShortReorgedSetHead(t *testing.T, snapshots bool) { // The side chain could be left to be if the fork point was before the new head // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortReorgedFastSyncedSetHead(t *testing.T) { - testShortReorgedFastSyncedSetHead(t, false) +func TestShortReorgedSnapSyncedSetHead(t *testing.T) { + testShortReorgedSnapSyncedSetHead(t, false) } -func TestShortReorgedFastSyncedSetHeadWithSnapshots(t *testing.T) { - testShortReorgedFastSyncedSetHead(t, true) +func TestShortReorgedSnapSyncedSetHeadWithSnapshots(t *testing.T) { + testShortReorgedSnapSyncedSetHead(t, true) } -func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) { +func testShortReorgedSnapSyncedSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -686,14 +686,14 @@ func testShortReorgedFastSyncedSetHead(t *testing.T, snapshots bool) { // The side chain could be left to be if the fork point was before the new head // we are deleting to, but it would be exceedingly hard to detect that case and // properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortReorgedFastSyncingSetHead(t *testing.T) { - testShortReorgedFastSyncingSetHead(t, false) +func TestShortReorgedSnapSyncingSetHead(t *testing.T) { + testShortReorgedSnapSyncingSetHead(t, false) } -func TestShortReorgedFastSyncingSetHeadWithSnapshots(t *testing.T) { - testShortReorgedFastSyncingSetHead(t, true) +func TestShortReorgedSnapSyncingSetHeadWithSnapshots(t *testing.T) { + testShortReorgedSnapSyncingSetHead(t, true) } -func testShortReorgedFastSyncingSetHead(t *testing.T, snapshots bool) { +func testShortReorgedSnapSyncingSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 @@ -829,14 +829,14 @@ func testLongDeepSetHead(t *testing.T, snapshots bool) { // back to the committed block. Everything above the sethead point should be // deleted. In between the committed block and the requested head the data can // remain as "fast sync" data to avoid redownloading it. -func TestLongFastSyncedShallowSetHead(t *testing.T) { - testLongFastSyncedShallowSetHead(t, false) +func TestLongSnapSyncedShallowSetHead(t *testing.T) { + testLongSnapSyncedShallowSetHead(t, false) } -func TestLongFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongFastSyncedShallowSetHead(t, true) +func TestLongSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongSnapSyncedShallowSetHead(t, true) } -func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) { +func testLongSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -880,10 +880,10 @@ func testLongFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // which sethead was called. In this case we expect the full chain to be rolled // back to the committed block. Since the ancient limit was underflown, everything // needs to be deleted onwards to avoid creating a gap. -func TestLongFastSyncedDeepSetHead(t *testing.T) { testLongFastSyncedDeepSetHead(t, false) } -func TestLongFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongFastSyncedDeepSetHead(t, true) } +func TestLongSnapSyncedDeepSetHead(t *testing.T) { testLongSnapSyncedDeepSetHead(t, false) } +func TestLongSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongSnapSyncedDeepSetHead(t, true) } -func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) { +func testLongSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -926,14 +926,14 @@ func testLongFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // sethead was called. In this case we expect the chain to detect that it was fast // syncing and delete everything from the new head, since we can just pick up fast // syncing from there. -func TestLongFastSyncingShallowSetHead(t *testing.T) { - testLongFastSyncingShallowSetHead(t, false) +func TestLongSnapSyncingShallowSetHead(t *testing.T) { + testLongSnapSyncingShallowSetHead(t, false) } -func TestLongFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongFastSyncingShallowSetHead(t, true) +func TestLongSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongSnapSyncingShallowSetHead(t, true) } -func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) { +func testLongSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // @@ -977,14 +977,14 @@ func testLongFastSyncingShallowSetHead(t *testing.T, snapshots bool) { // sethead was called. In this case we expect the chain to detect that it was fast // syncing and delete everything from the new head, since we can just pick up fast // syncing from there. -func TestLongFastSyncingDeepSetHead(t *testing.T) { - testLongFastSyncingDeepSetHead(t, false) +func TestLongSnapSyncingDeepSetHead(t *testing.T) { + testLongSnapSyncingDeepSetHead(t, false) } -func TestLongFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongFastSyncingDeepSetHead(t, true) +func TestLongSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongSnapSyncingDeepSetHead(t, true) } -func testLongFastSyncingDeepSetHead(t *testing.T, snapshots bool) { +func testLongSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // @@ -1132,14 +1132,14 @@ func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) { // sethead point should be deleted. In between the committed block and the // requested head the data can remain as "fast sync" data to avoid redownloading // it. The side chain is nuked by the freezer. -func TestLongOldForkedFastSyncedShallowSetHead(t *testing.T) { - testLongOldForkedFastSyncedShallowSetHead(t, false) +func TestLongOldForkedSnapSyncedShallowSetHead(t *testing.T) { + testLongOldForkedSnapSyncedShallowSetHead(t, false) } -func TestLongOldForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncedShallowSetHead(t, true) +func TestLongOldForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncedShallowSetHead(t, true) } -func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -1186,14 +1186,14 @@ func testLongOldForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // full chain to be rolled back to the committed block. Since the ancient limit was // underflown, everything needs to be deleted onwards to avoid creating a gap. The // side chain is nuked by the freezer. -func TestLongOldForkedFastSyncedDeepSetHead(t *testing.T) { - testLongOldForkedFastSyncedDeepSetHead(t, false) +func TestLongOldForkedSnapSyncedDeepSetHead(t *testing.T) { + testLongOldForkedSnapSyncedDeepSetHead(t, false) } -func TestLongOldForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncedDeepSetHead(t, true) +func TestLongOldForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncedDeepSetHead(t, true) } -func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1239,14 +1239,14 @@ func testLongOldForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // that it was fast syncing and delete everything from the new head, since we can // just pick up fast syncing from there. The side chain is completely nuked by the // freezer. -func TestLongOldForkedFastSyncingShallowSetHead(t *testing.T) { - testLongOldForkedFastSyncingShallowSetHead(t, false) +func TestLongOldForkedSnapSyncingShallowSetHead(t *testing.T) { + testLongOldForkedSnapSyncingShallowSetHead(t, false) } -func TestLongOldForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncingShallowSetHead(t, true) +func TestLongOldForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncingShallowSetHead(t, true) } -func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3 @@ -1293,14 +1293,14 @@ func testLongOldForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { // that it was fast syncing and delete everything from the new head, since we can // just pick up fast syncing from there. The side chain is completely nuked by the // freezer. -func TestLongOldForkedFastSyncingDeepSetHead(t *testing.T) { - testLongOldForkedFastSyncingDeepSetHead(t, false) +func TestLongOldForkedSnapSyncingDeepSetHead(t *testing.T) { + testLongOldForkedSnapSyncingDeepSetHead(t, false) } -func TestLongOldForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedFastSyncingDeepSetHead(t, true) +func TestLongOldForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongOldForkedSnapSyncingDeepSetHead(t, true) } -func testLongOldForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) { +func testLongOldForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3 @@ -1446,15 +1446,15 @@ func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - newer than the ancient limit - // was already committed to disk and then sethead was called. In this test scenario // the side chain is above the committed block. In this case the freezer will delete -// the sidechain since it's dangling, reverting to TestLongFastSyncedShallowSetHead. -func TestLongNewerForkedFastSyncedShallowSetHead(t *testing.T) { - testLongNewerForkedFastSyncedShallowSetHead(t, false) +// the sidechain since it's dangling, reverting to TestLongSnapSyncedShallowSetHead. +func TestLongNewerForkedSnapSyncedShallowSetHead(t *testing.T) { + testLongNewerForkedSnapSyncedShallowSetHead(t, false) } -func TestLongNewerForkedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncedShallowSetHead(t, true) +func TestLongNewerForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncedShallowSetHead(t, true) } -func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1498,15 +1498,15 @@ func testLongNewerForkedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - older than the ancient limit - // was already committed to disk and then sethead was called. In this test scenario // the side chain is above the committed block. In this case the freezer will delete -// the sidechain since it's dangling, reverting to TestLongFastSyncedDeepSetHead. -func TestLongNewerForkedFastSyncedDeepSetHead(t *testing.T) { - testLongNewerForkedFastSyncedDeepSetHead(t, false) +// the sidechain since it's dangling, reverting to TestLongSnapSyncedDeepSetHead. +func TestLongNewerForkedSnapSyncedDeepSetHead(t *testing.T) { + testLongNewerForkedSnapSyncedDeepSetHead(t, false) } -func TestLongNewerForkedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncedDeepSetHead(t, true) +func TestLongNewerForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncedDeepSetHead(t, true) } -func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1549,15 +1549,15 @@ func testLongNewerForkedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - newer than the ancient limit - // was not yet committed, but sethead was called. In this test scenario the side // chain is above the committed block. In this case the freezer will delete the -// sidechain since it's dangling, reverting to TestLongFastSyncinghallowSetHead. -func TestLongNewerForkedFastSyncingShallowSetHead(t *testing.T) { - testLongNewerForkedFastSyncingShallowSetHead(t, false) +// sidechain since it's dangling, reverting to TestLongSnapSyncinghallowSetHead. +func TestLongNewerForkedSnapSyncingShallowSetHead(t *testing.T) { + testLongNewerForkedSnapSyncingShallowSetHead(t, false) } -func TestLongNewerForkedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncingShallowSetHead(t, true) +func TestLongNewerForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncingShallowSetHead(t, true) } -func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1601,15 +1601,15 @@ func testLongNewerForkedFastSyncingShallowSetHead(t *testing.T, snapshots bool) // side chain, where the fast sync pivot point - older than the ancient limit - // was not yet committed, but sethead was called. In this test scenario the side // chain is above the committed block. In this case the freezer will delete the -// sidechain since it's dangling, reverting to TestLongFastSyncingDeepSetHead. -func TestLongNewerForkedFastSyncingDeepSetHead(t *testing.T) { - testLongNewerForkedFastSyncingDeepSetHead(t, false) +// sidechain since it's dangling, reverting to TestLongSnapSyncingDeepSetHead. +func TestLongNewerForkedSnapSyncingDeepSetHead(t *testing.T) { + testLongNewerForkedSnapSyncingDeepSetHead(t, false) } -func TestLongNewerForkedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedFastSyncingDeepSetHead(t, true) +func TestLongNewerForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongNewerForkedSnapSyncingDeepSetHead(t, true) } -func testLongNewerForkedFastSyncingDeepSetHead(t *testing.T, snapshots bool) { +func testLongNewerForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 @@ -1745,15 +1745,15 @@ func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - newer than the ancient limit - // was already committed to disk and then sethead was called. In this case the // freezer will delete the sidechain since it's dangling, reverting to -// TestLongFastSyncedShallowSetHead. -func TestLongReorgedFastSyncedShallowSetHead(t *testing.T) { - testLongReorgedFastSyncedShallowSetHead(t, false) +// TestLongSnapSyncedShallowSetHead. +func TestLongReorgedSnapSyncedShallowSetHead(t *testing.T) { + testLongReorgedSnapSyncedShallowSetHead(t, false) } -func TestLongReorgedFastSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongReorgedFastSyncedShallowSetHead(t, true) +func TestLongReorgedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncedShallowSetHead(t, true) } -func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1797,15 +1797,15 @@ func testLongReorgedFastSyncedShallowSetHead(t *testing.T, snapshots bool) { // side chain, where the fast sync pivot point - older than the ancient limit - // was already committed to disk and then sethead was called. In this case the // freezer will delete the sidechain since it's dangling, reverting to -// TestLongFastSyncedDeepSetHead. -func TestLongReorgedFastSyncedDeepSetHead(t *testing.T) { - testLongReorgedFastSyncedDeepSetHead(t, false) +// TestLongSnapSyncedDeepSetHead. +func TestLongReorgedSnapSyncedDeepSetHead(t *testing.T) { + testLongReorgedSnapSyncedDeepSetHead(t, false) } -func TestLongReorgedFastSyncedDeepSetHeadWithSnapshots(t *testing.T) { - testLongReorgedFastSyncedDeepSetHead(t, true) +func TestLongReorgedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncedDeepSetHead(t, true) } -func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1850,14 +1850,14 @@ func testLongReorgedFastSyncedDeepSetHead(t *testing.T, snapshots bool) { // chain to detect that it was fast syncing and delete everything from the new // head, since we can just pick up fast syncing from there. The side chain is // completely nuked by the freezer. -func TestLongReorgedFastSyncingShallowSetHead(t *testing.T) { - testLongReorgedFastSyncingShallowSetHead(t, false) +func TestLongReorgedSnapSyncingShallowSetHead(t *testing.T) { + testLongReorgedSnapSyncingShallowSetHead(t, false) } -func TestLongReorgedFastSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongReorgedFastSyncingShallowSetHead(t, true) +func TestLongReorgedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncingShallowSetHead(t, true) } -func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 @@ -1903,14 +1903,14 @@ func testLongReorgedFastSyncingShallowSetHead(t *testing.T, snapshots bool) { // chain to detect that it was fast syncing and delete everything from the new // head, since we can just pick up fast syncing from there. The side chain is // completely nuked by the freezer. -func TestLongReorgedFastSyncingDeepSetHead(t *testing.T) { - testLongReorgedFastSyncingDeepSetHead(t, false) +func TestLongReorgedSnapSyncingDeepSetHead(t *testing.T) { + testLongReorgedSnapSyncingDeepSetHead(t, false) } -func TestLongReorgedFastSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongReorgedFastSyncingDeepSetHead(t, true) +func TestLongReorgedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { + testLongReorgedSnapSyncingDeepSetHead(t, true) } -func testLongReorgedFastSyncingDeepSetHead(t *testing.T, snapshots bool) { +func testLongReorgedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { // Chain: // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 80d07eb30..6e542fe2f 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -28,13 +28,16 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" @@ -210,6 +213,55 @@ func TestLastBlock(t *testing.T) { } } +// Test inserts the blocks/headers after the fork choice rule is changed. +// The chain is reorged to whatever specified. +func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool) { + // Copy old chain up to #i into a new db + db, blockchain2, err := newCanonical(ethash.NewFaker(), i, full) + if err != nil { + t.Fatal("could not make new canonical in testFork", err) + } + defer blockchain2.Stop() + + // Assert the chains have the same header/block at #i + var hash1, hash2 common.Hash + if full { + hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash() + hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash() + } else { + hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash() + hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash() + } + if hash1 != hash2 { + t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1) + } + + // Extend the newly created chain + if full { + blockChainB := makeBlockChain(blockchain2.CurrentBlock(), n, ethash.NewFaker(), db, forkSeed) + if _, err := blockchain2.InsertChain(blockChainB); err != nil { + t.Fatalf("failed to insert forking chain: %v", err) + } + if blockchain2.CurrentBlock().NumberU64() != blockChainB[len(blockChainB)-1].NumberU64() { + t.Fatalf("failed to reorg to the given chain") + } + if blockchain2.CurrentBlock().Hash() != blockChainB[len(blockChainB)-1].Hash() { + t.Fatalf("failed to reorg to the given chain") + } + } else { + headerChainB := makeHeaderChain(blockchain2.CurrentHeader(), n, ethash.NewFaker(), db, forkSeed) + if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil { + t.Fatalf("failed to insert forking chain: %v", err) + } + if blockchain2.CurrentHeader().Number.Uint64() != headerChainB[len(headerChainB)-1].Number.Uint64() { + t.Fatalf("failed to reorg to the given chain") + } + if blockchain2.CurrentHeader().Hash() != headerChainB[len(headerChainB)-1].Hash() { + t.Fatalf("failed to reorg to the given chain") + } + } +} + // Tests that given a starting canonical chain of a given size, it can be extended // with various length chains. func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) } @@ -238,6 +290,25 @@ func testExtendCanonical(t *testing.T, full bool) { testFork(t, processor, length, 10, full, better) } +// Tests that given a starting canonical chain of a given size, it can be extended +// with various length chains. +func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, false) } +func TestExtendCanonicalBlocksAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, true) } + +func testExtendCanonicalAfterMerge(t *testing.T, full bool) { + length := 5 + + // Make first chain starting from genesis + _, processor, err := newCanonical(ethash.NewFaker(), length, full) + if err != nil { + t.Fatalf("failed to make new canonical chain: %v", err) + } + defer processor.Stop() + + testInsertAfterMerge(t, processor, length, 1, full) + testInsertAfterMerge(t, processor, length, 10, full) +} + // Tests that given a starting canonical chain of a given size, creating shorter // forks do not take canonical ownership. func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) } @@ -268,6 +339,29 @@ func testShorterFork(t *testing.T, full bool) { testFork(t, processor, 5, 4, full, worse) } +// Tests that given a starting canonical chain of a given size, creating shorter +// forks do not take canonical ownership. +func TestShorterForkHeadersAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, false) } +func TestShorterForkBlocksAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, true) } + +func testShorterForkAfterMerge(t *testing.T, full bool) { + length := 10 + + // Make first chain starting from genesis + _, processor, err := newCanonical(ethash.NewFaker(), length, full) + if err != nil { + t.Fatalf("failed to make new canonical chain: %v", err) + } + defer processor.Stop() + + testInsertAfterMerge(t, processor, 0, 3, full) + testInsertAfterMerge(t, processor, 0, 7, full) + testInsertAfterMerge(t, processor, 1, 1, full) + testInsertAfterMerge(t, processor, 1, 7, full) + testInsertAfterMerge(t, processor, 5, 3, full) + testInsertAfterMerge(t, processor, 5, 4, full) +} + // Tests that given a starting canonical chain of a given size, creating longer // forks do take canonical ownership. func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) } @@ -283,19 +377,35 @@ func testLongerFork(t *testing.T, full bool) { } defer processor.Stop() - // Define the difficulty comparator - better := func(td1, td2 *big.Int) { - if td2.Cmp(td1) <= 0 { - t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1) - } + testInsertAfterMerge(t, processor, 0, 11, full) + testInsertAfterMerge(t, processor, 0, 15, full) + testInsertAfterMerge(t, processor, 1, 10, full) + testInsertAfterMerge(t, processor, 1, 12, full) + testInsertAfterMerge(t, processor, 5, 6, full) + testInsertAfterMerge(t, processor, 5, 8, full) +} + +// Tests that given a starting canonical chain of a given size, creating longer +// forks do take canonical ownership. +func TestLongerForkHeadersAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, false) } +func TestLongerForkBlocksAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, true) } + +func testLongerForkAfterMerge(t *testing.T, full bool) { + length := 10 + + // Make first chain starting from genesis + _, processor, err := newCanonical(ethash.NewFaker(), length, full) + if err != nil { + t.Fatalf("failed to make new canonical chain: %v", err) } - // Sum of numbers must be greater than `length` for this to be a longer fork - testFork(t, processor, 0, 11, full, better) - testFork(t, processor, 0, 15, full, better) - testFork(t, processor, 1, 10, full, better) - testFork(t, processor, 1, 12, full, better) - testFork(t, processor, 5, 6, full, better) - testFork(t, processor, 5, 8, full, better) + defer processor.Stop() + + testInsertAfterMerge(t, processor, 0, 11, full) + testInsertAfterMerge(t, processor, 0, 15, full) + testInsertAfterMerge(t, processor, 1, 10, full) + testInsertAfterMerge(t, processor, 1, 12, full) + testInsertAfterMerge(t, processor, 5, 6, full) + testInsertAfterMerge(t, processor, 5, 8, full) } // Tests that given a starting canonical chain of a given size, creating equal @@ -328,6 +438,29 @@ func testEqualFork(t *testing.T, full bool) { testFork(t, processor, 9, 1, full, equal) } +// Tests that given a starting canonical chain of a given size, creating equal +// forks do take canonical ownership. +func TestEqualForkHeadersAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, false) } +func TestEqualForkBlocksAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, true) } + +func testEqualForkAfterMerge(t *testing.T, full bool) { + length := 10 + + // Make first chain starting from genesis + _, processor, err := newCanonical(ethash.NewFaker(), length, full) + if err != nil { + t.Fatalf("failed to make new canonical chain: %v", err) + } + defer processor.Stop() + + testInsertAfterMerge(t, processor, 0, 10, full) + testInsertAfterMerge(t, processor, 1, 9, full) + testInsertAfterMerge(t, processor, 2, 8, full) + testInsertAfterMerge(t, processor, 5, 5, full) + testInsertAfterMerge(t, processor, 6, 4, full) + testInsertAfterMerge(t, processor, 9, 1, full) +} + // Tests that chains missing links do not get accepted by the processor. func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) } func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true) } @@ -1800,21 +1933,56 @@ func TestLowDiffLongChain(t *testing.T) { // - C is canon chain, containing blocks [G..Cn..Cm] // - A common ancestor is placed at prune-point + blocksBetweenCommonAncestorAndPruneblock // - The sidechain S is prepended with numCanonBlocksInSidechain blocks from the canon chain -func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommonAncestorAndPruneblock int) { - +// +// The mergePoint can be these values: +// -1: the transition won't happen +// 0: the transition happens since genesis +// 1: the transition happens after some chain segments +func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommonAncestorAndPruneblock int, mergePoint int) { + // Copy the TestChainConfig so we can modify it during tests + chainConfig := *params.TestChainConfig // Generate a canonical chain to act as the main dataset - engine := ethash.NewFaker() - db := rawdb.NewMemoryDatabase() - genesis := (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db) + var ( + merger = consensus.NewMerger(rawdb.NewMemoryDatabase()) + genEngine = beacon.New(ethash.NewFaker()) + runEngine = beacon.New(ethash.NewFaker()) + db = rawdb.NewMemoryDatabase() + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key.PublicKey) + nonce = uint64(0) + + gspec = &Genesis{ + Config: &chainConfig, + Alloc: GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}}, + BaseFee: big.NewInt(params.InitialBaseFee), + } + signer = types.LatestSigner(gspec.Config) + genesis, _ = gspec.Commit(db) + ) // Generate and import the canonical chain - blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil) diskdb := rawdb.NewMemoryDatabase() - (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb) - chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil) + gspec.MustCommit(diskdb) + chain, err := NewBlockChain(diskdb, nil, &chainConfig, runEngine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } + // Activate the transition since genesis if required + if mergePoint == 0 { + merger.ReachTTD() + merger.FinalizePoS() + + // Set the terminal total difficulty in the config + gspec.Config.TerminalTotalDifficulty = big.NewInt(0) + } + blocks, _ := GenerateChain(&chainConfig, genesis, genEngine, db, 2*TriesInMemory, func(i int, gen *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key) + if err != nil { + t.Fatalf("failed to create tx: %v", err) + } + gen.AddTx(tx) + nonce++ + }) if n, err := chain.InsertChain(blocks); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", n, err) } @@ -1831,6 +1999,15 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) { t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64()) } + + // Activate the transition in the middle of the chain + if mergePoint == 1 { + merger.ReachTTD() + merger.FinalizePoS() + // Set the terminal total difficulty in the config + gspec.Config.TerminalTotalDifficulty = big.NewInt(int64(len(blocks))) + } + // Generate the sidechain // First block should be a known block, block after should be a pruned block. So // canon(pruned), side, side... @@ -1838,7 +2015,7 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon // Generate fork chain, make it longer than canon parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock parent := blocks[parentIndex] - fork, _ := GenerateChain(params.TestChainConfig, parent, engine, db, 2*TriesInMemory, func(i int, b *BlockGen) { + fork, _ := GenerateChain(&chainConfig, parent, genEngine, db, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) // Prepend the parent(s) @@ -1847,9 +2024,9 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon sidechain = append(sidechain, blocks[parentIndex+1-i]) } sidechain = append(sidechain, fork...) - _, err = chain.InsertChain(sidechain) + n, err := chain.InsertChain(sidechain) if err != nil { - t.Errorf("Got error, %v", err) + t.Errorf("Got error, %v number %d - %d", err, sidechain[n].NumberU64(), n) } head := chain.CurrentBlock() if got := fork[len(fork)-1].Hash(); got != head.Hash() { @@ -1870,11 +2047,28 @@ func TestPrunedImportSide(t *testing.T) { //glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false))) //glogger.Verbosity(3) //log.Root().SetHandler(log.Handler(glogger)) - testSideImport(t, 3, 3) - testSideImport(t, 3, -3) - testSideImport(t, 10, 0) - testSideImport(t, 1, 10) - testSideImport(t, 1, -10) + testSideImport(t, 3, 3, -1) + testSideImport(t, 3, -3, -1) + testSideImport(t, 10, 0, -1) + testSideImport(t, 1, 10, -1) + testSideImport(t, 1, -10, -1) +} + +func TestPrunedImportSideWithMerging(t *testing.T) { + //glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false))) + //glogger.Verbosity(3) + //log.Root().SetHandler(log.Handler(glogger)) + testSideImport(t, 3, 3, 0) + testSideImport(t, 3, -3, 0) + testSideImport(t, 10, 0, 0) + testSideImport(t, 1, 10, 0) + testSideImport(t, 1, -10, 0) + + testSideImport(t, 3, 3, 1) + testSideImport(t, 3, -3, 1) + testSideImport(t, 10, 0, 1) + testSideImport(t, 1, 10, 1) + testSideImport(t, 1, -10, 1) } func TestInsertKnownHeaders(t *testing.T) { testInsertKnownChainData(t, "headers") } @@ -2002,6 +2196,179 @@ func testInsertKnownChainData(t *testing.T, typ string) { asserter(t, blocks2[len(blocks2)-1]) } +func TestInsertKnownHeadersWithMerging(t *testing.T) { + testInsertKnownChainDataWithMerging(t, "headers", 0) +} +func TestInsertKnownReceiptChainWithMerging(t *testing.T) { + testInsertKnownChainDataWithMerging(t, "receipts", 0) +} +func TestInsertKnownBlocksWithMerging(t *testing.T) { + testInsertKnownChainDataWithMerging(t, "blocks", 0) +} +func TestInsertKnownHeadersAfterMerging(t *testing.T) { + testInsertKnownChainDataWithMerging(t, "headers", 1) +} +func TestInsertKnownReceiptChainAfterMerging(t *testing.T) { + testInsertKnownChainDataWithMerging(t, "receipts", 1) +} +func TestInsertKnownBlocksAfterMerging(t *testing.T) { + testInsertKnownChainDataWithMerging(t, "blocks", 1) +} + +// mergeHeight can be assigned in these values: +// 0: means the merging is applied since genesis +// 1: means the merging is applied after the first segment +func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight int) { + // Copy the TestChainConfig so we can modify it during tests + chainConfig := *params.TestChainConfig + var ( + db = rawdb.NewMemoryDatabase() + genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: &chainConfig}).MustCommit(db) + runMerger = consensus.NewMerger(db) + runEngine = beacon.New(ethash.NewFaker()) + genEngine = beacon.New(ethash.NewFaker()) + ) + applyMerge := func(engine *beacon.Beacon, height int) { + if engine != nil { + runMerger.FinalizePoS() + // Set the terminal total difficulty in the config + chainConfig.TerminalTotalDifficulty = big.NewInt(int64(height)) + } + } + + // Apply merging since genesis + if mergeHeight == 0 { + applyMerge(genEngine, 0) + } + blocks, receipts := GenerateChain(&chainConfig, genesis, genEngine, db, 32, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) + + // Apply merging after the first segment + if mergeHeight == 1 { + applyMerge(genEngine, len(blocks)) + } + // Longer chain and shorter chain + blocks2, receipts2 := GenerateChain(&chainConfig, blocks[len(blocks)-1], genEngine, db, 65, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) + blocks3, receipts3 := GenerateChain(&chainConfig, blocks[len(blocks)-1], genEngine, db, 64, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{1}) + b.OffsetTime(-9) // Time shifted, difficulty shouldn't be changed + }) + + // Import the shared chain and the original canonical one + dir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("failed to create temp freezer dir: %v", err) + } + defer os.Remove(dir) + chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false) + if err != nil { + t.Fatalf("failed to create temp freezer db: %v", err) + } + (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(chaindb) + defer os.RemoveAll(dir) + + chain, err := NewBlockChain(chaindb, nil, &chainConfig, runEngine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + var ( + inserter func(blocks []*types.Block, receipts []types.Receipts) error + asserter func(t *testing.T, block *types.Block) + ) + if typ == "headers" { + inserter = func(blocks []*types.Block, receipts []types.Receipts) error { + headers := make([]*types.Header, 0, len(blocks)) + for _, block := range blocks { + headers = append(headers, block.Header()) + } + _, err := chain.InsertHeaderChain(headers, 1) + return err + } + asserter = func(t *testing.T, block *types.Block) { + if chain.CurrentHeader().Hash() != block.Hash() { + t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex()) + } + } + } else if typ == "receipts" { + inserter = func(blocks []*types.Block, receipts []types.Receipts) error { + headers := make([]*types.Header, 0, len(blocks)) + for _, block := range blocks { + headers = append(headers, block.Header()) + } + _, err := chain.InsertHeaderChain(headers, 1) + if err != nil { + return err + } + _, err = chain.InsertReceiptChain(blocks, receipts, 0) + return err + } + asserter = func(t *testing.T, block *types.Block) { + if chain.CurrentFastBlock().Hash() != block.Hash() { + t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentFastBlock().Hash().Hex(), block.Hash().Hex()) + } + } + } else { + inserter = func(blocks []*types.Block, receipts []types.Receipts) error { + _, err := chain.InsertChain(blocks) + return err + } + asserter = func(t *testing.T, block *types.Block) { + if chain.CurrentBlock().Hash() != block.Hash() { + t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex()) + } + } + } + + // Apply merging since genesis if required + if mergeHeight == 0 { + applyMerge(runEngine, 0) + } + if err := inserter(blocks, receipts); err != nil { + t.Fatalf("failed to insert chain data: %v", err) + } + + // Reimport the chain data again. All the imported + // chain data are regarded "known" data. + if err := inserter(blocks, receipts); err != nil { + t.Fatalf("failed to insert chain data: %v", err) + } + asserter(t, blocks[len(blocks)-1]) + + // Import a long canonical chain with some known data as prefix. + rollback := blocks[len(blocks)/2].NumberU64() + chain.SetHead(rollback - 1) + if err := inserter(blocks, receipts); err != nil { + t.Fatalf("failed to insert chain data: %v", err) + } + asserter(t, blocks[len(blocks)-1]) + + // Apply merging after the first segment + if mergeHeight == 1 { + applyMerge(runEngine, len(blocks)) + } + + // Import a longer chain with some known data as prefix. + if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { + t.Fatalf("failed to insert chain data: %v", err) + } + asserter(t, blocks2[len(blocks2)-1]) + + // Import a shorter chain with some known data as prefix. + // The reorg is expected since the fork choice rule is + // already changed. + if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil { + t.Fatalf("failed to insert chain data: %v", err) + } + // The head shouldn't change. + asserter(t, blocks3[len(blocks3)-1]) + + // Reimport the longer chain again, the reorg is still expected + chain.SetHead(rollback - 1) + if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { + t.Fatalf("failed to insert chain data: %v", err) + } + asserter(t, blocks2[len(blocks2)-1]) +} + // getLongAndShortChains returns two chains: A is longer, B is heavier. func getLongAndShortChains() (bc *BlockChain, longChain []*types.Block, heavyChain []*types.Block, err error) { // Generate a canonical chain to act as the main dataset @@ -2270,7 +2637,7 @@ func TestTransactionIndices(t *testing.T) { } } -func TestSkipStaleTxIndicesInFastSync(t *testing.T) { +func TestSkipStaleTxIndicesInSnapSync(t *testing.T) { // Configure and generate a sample block chain var ( gendb = rawdb.NewMemoryDatabase() @@ -2482,6 +2849,7 @@ func TestSideImportPrunedBlocks(t *testing.T) { // Generate and import the canonical chain blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, 2*TriesInMemory, nil) diskdb := rawdb.NewMemoryDatabase() + (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(diskdb) chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil) if err != nil { @@ -2690,7 +3058,7 @@ func TestDeleteRecreateSlots(t *testing.T) { gspec.MustCommit(diskdb) chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{ Debug: true, - Tracer: vm.NewJSONLogger(nil, os.Stdout), + Tracer: logger.NewJSONLogger(nil, os.Stdout), }, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) @@ -2770,7 +3138,7 @@ func TestDeleteRecreateAccount(t *testing.T) { gspec.MustCommit(diskdb) chain, err := NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{ Debug: true, - Tracer: vm.NewJSONLogger(nil, os.Stdout), + Tracer: logger.NewJSONLogger(nil, os.Stdout), }, nil, nil) if err != nil { t.Fatalf("failed to create tester chain: %v", err) diff --git a/core/chain_makers.go b/core/chain_makers.go index b113c0d1b..c7bf60a4b 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -155,6 +155,28 @@ func (b *BlockGen) TxNonce(addr common.Address) uint64 { // AddUncle adds an uncle header to the generated block. func (b *BlockGen) AddUncle(h *types.Header) { + // The uncle will have the same timestamp and auto-generated difficulty + h.Time = b.header.Time + + var parent *types.Header + for i := b.i - 1; i >= 0; i-- { + if b.chain[i].Hash() == h.ParentHash { + parent = b.chain[i].Header() + break + } + } + chainreader := &fakeChainReader{config: b.config} + h.Difficulty = b.engine.CalcDifficulty(chainreader, b.header.Time, parent) + + // The gas limit and price should be derived from the parent + h.GasLimit = parent.GasLimit + if b.config.IsLondon(h.Number) { + h.BaseFee = misc.CalcBaseFee(b.config, parent) + if !b.config.IsLondon(parent.Number) { + parentGasLimit := parent.GasLimit * params.ElasticityMultiplier + h.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) + } + } b.uncles = append(b.uncles, h) } @@ -205,6 +227,18 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} b.header = makeHeader(chainreader, parent, statedb, b.engine) + // Set the difficulty for clique block. The chain maker doesn't have access + // to a chain, so the difficulty will be left unset (nil). Set it here to the + // correct value. + if b.header.Difficulty == nil { + if config.TerminalTotalDifficulty == nil { + // Clique chain + b.header.Difficulty = big.NewInt(2) + } else { + // Post-merge chain + b.header.Difficulty = big.NewInt(0) + } + } // Mutate the state and block according to any hard-fork specs if daoBlock := config.DAOForkBlock; daoBlock != nil { limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) @@ -313,3 +347,4 @@ func (cr *fakeChainReader) GetHeaderByNumber(number uint64) *types.Header func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil } func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil } func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil } +func (cr *fakeChainReader) GetTd(hash common.Hash, number uint64) *big.Int { return nil } diff --git a/core/forkchoice.go b/core/forkchoice.go new file mode 100644 index 000000000..b0dbb200e --- /dev/null +++ b/core/forkchoice.go @@ -0,0 +1,108 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + crand "crypto/rand" + "errors" + "math/big" + mrand "math/rand" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +// ChainReader defines a small collection of methods needed to access the local +// blockchain during header verification. It's implemented by both blockchain +// and lightchain. +type ChainReader interface { + // Config retrieves the header chain's chain configuration. + Config() *params.ChainConfig + + // GetTd returns the total difficulty of a local block. + GetTd(common.Hash, uint64) *big.Int +} + +// ForkChoice is the fork chooser based on the highest total difficulty of the +// chain(the fork choice used in the eth1) and the external fork choice (the fork +// choice used in the eth2). This main goal of this ForkChoice is not only for +// offering fork choice during the eth1/2 merge phase, but also keep the compatibility +// for all other proof-of-work networks. +type ForkChoice struct { + chain ChainReader + rand *mrand.Rand + + // preserve is a helper function used in td fork choice. + // Miners will prefer to choose the local mined block if the + // local td is equal to the extern one. It can be nil for light + // client + preserve func(header *types.Header) bool +} + +func NewForkChoice(chainReader ChainReader, preserve func(header *types.Header) bool) *ForkChoice { + // Seed a fast but crypto originating random generator + seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + log.Crit("Failed to initialize random seed", "err", err) + } + return &ForkChoice{ + chain: chainReader, + rand: mrand.New(mrand.NewSource(seed.Int64())), + preserve: preserve, + } +} + +// ReorgNeeded returns whether the reorg should be applied +// based on the given external header and local canonical chain. +// In the td mode, the new head is chosen if the corresponding +// total difficulty is higher. In the extern mode, the trusted +// header is always selected as the head. +func (f *ForkChoice) ReorgNeeded(current *types.Header, header *types.Header) (bool, error) { + var ( + localTD = f.chain.GetTd(current.Hash(), current.Number.Uint64()) + externTd = f.chain.GetTd(header.Hash(), header.Number.Uint64()) + ) + if localTD == nil || externTd == nil { + return false, errors.New("missing td") + } + // Accept the new header as the chain head if the transition + // is already triggered. We assume all the headers after the + // transition come from the trusted consensus layer. + if ttd := f.chain.Config().TerminalTotalDifficulty; ttd != nil && ttd.Cmp(externTd) <= 0 { + return true, nil + } + // If the total difficulty is higher than our known, add it to the canonical chain + // Second clause in the if statement reduces the vulnerability to selfish mining. + // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf + reorg := externTd.Cmp(localTD) > 0 + if !reorg && externTd.Cmp(localTD) == 0 { + number, headNumber := header.Number.Uint64(), current.Number.Uint64() + if number < headNumber { + reorg = true + } else if number == headNumber { + var currentPreserve, externPreserve bool + if f.preserve != nil { + currentPreserve, externPreserve = f.preserve(current), f.preserve(header) + } + reorg = !currentPreserve && (externPreserve || f.rand.Float64() < 0.5) + } + } + return reorg, nil +} diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 84c34561d..b0ee59b9e 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -19,6 +19,7 @@ package forkid import ( "bytes" "math" + "math/big" "testing" "github.com/ethereum/go-ethereum/common" @@ -29,6 +30,8 @@ import ( // TestCreation tests that different genesis and fork rule combinations result in // the correct fork ID. func TestCreation(t *testing.T) { + mergeConfig := *params.MainnetChainConfig + mergeConfig.MergeForkBlock = big.NewInt(15000000) type testcase struct { head uint64 want ID @@ -65,7 +68,7 @@ func TestCreation(t *testing.T) { {12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block {12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block {13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block - {13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, /// First Arrow Glacier block + {13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // First Arrow Glacier block {20000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // Future Arrow Glacier block }, }, @@ -133,6 +136,38 @@ func TestCreation(t *testing.T) { {6000000, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // Future London block }, }, + // Merge test cases + { + &mergeConfig, + params.MainnetGenesisHash, + []testcase{ + {0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced + {1149999, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block + {1150000, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block + {1919999, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block + {1920000, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block + {2462999, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block + {2463000, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block + {2674999, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block + {2675000, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block + {4369999, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block + {4370000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block + {7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block + {7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block + {9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block + {9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block + {9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block + {9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block + {12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block + {12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block + {12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block + {12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block + {13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block + {13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 15000000}}, // First Arrow Glacier block + {15000000, ID{Hash: checksumToBytes(0xe3abe201), Next: 0}}, // First Merge Start block + {20000000, ID{Hash: checksumToBytes(0xe3abe201), Next: 0}}, // Future Merge Start block + }, + }, } for i, tt := range tests { for j, ttt := range tt.cases { diff --git a/core/genesis.go b/core/genesis.go index 85d01ec87..557440d08 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -155,10 +155,10 @@ func (e *GenesisMismatchError) Error() string { // // The returned chain configuration is never nil. func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) { - return SetupGenesisBlockWithOverride(db, genesis, nil) + return SetupGenesisBlockWithOverride(db, genesis, nil, nil) } -func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideArrowGlacier *big.Int) (*params.ChainConfig, common.Hash, error) { +func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideArrowGlacier, overrideTerminalTotalDifficulty *big.Int) (*params.ChainConfig, common.Hash, error) { if genesis != nil && genesis.Config == nil { return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig } @@ -207,6 +207,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override if overrideArrowGlacier != nil { newcfg.ArrowGlacierBlock = overrideArrowGlacier } + if overrideTerminalTotalDifficulty != nil { + newcfg.TerminalTotalDifficulty = overrideTerminalTotalDifficulty + } if err := newcfg.CheckConfigForkOrder(); err != nil { return newcfg, common.Hash{}, err } diff --git a/core/headerchain.go b/core/headerchain.go index 9f2b708d0..99364f638 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" lru "github.com/hashicorp/golang-lru" ) @@ -49,15 +50,14 @@ const ( // HeaderChain is responsible for maintaining the header chain including the // header query and updating. // -// The components maintained by headerchain includes: (1) total difficult +// The components maintained by headerchain includes: (1) total difficulty // (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping // and (5) head header flag. // // It is not thread safe either, the encapsulating chain structures should do // the necessary mutex locking/unlocking. type HeaderChain struct { - config *params.ChainConfig - + config *params.ChainConfig chainDb ethdb.Database genesisHeader *types.Header @@ -86,7 +86,6 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c if err != nil { return nil, err } - hc := &HeaderChain{ config: config, chainDb: chainDb, @@ -97,12 +96,10 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c rand: mrand.New(mrand.NewSource(seed.Int64())), engine: engine, } - hc.genesisHeader = hc.GetHeaderByNumber(0) if hc.genesisHeader == nil { return nil, ErrNoGenesis } - hc.currentHeader.Store(hc.genesisHeader) if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) { if chead := hc.GetHeaderByHash(head); chead != nil { @@ -111,7 +108,6 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c } hc.currentHeaderHash = hc.CurrentHeader().Hash() headHeaderGauge.Update(hc.CurrentHeader().Number.Int64()) - return hc, nil } @@ -137,35 +133,93 @@ type headerWriteResult struct { lastHeader *types.Header } -// WriteHeaders writes a chain of headers into the local chain, given that the parents -// are already known. If the total difficulty of the newly inserted chain becomes -// greater than the current known TD, the canonical chain is reorged. -// -// Note: This method is not concurrent-safe with inserting blocks simultaneously -// into the chain, as side effects caused by reorganisations cannot be emulated -// without the real blocks. Hence, writing headers directly should only be done -// in two scenarios: pure-header mode of operation (light clients), or properly -// separated header/block phases (non-archive clients). -func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWriteResult, err error) { +// Reorg reorgs the local canonical chain into the specified chain. The reorg +// can be classified into two cases: (a) extend the local chain (b) switch the +// head to the given header. +func (hc *HeaderChain) Reorg(headers []*types.Header) error { + // Short circuit if nothing to reorg. if len(headers) == 0 { - return &headerWriteResult{}, nil + return nil + } + // If the parent of the (first) block is already the canon header, + // we don't have to go backwards to delete canon blocks, but simply + // pile them onto the existing chain. Otherwise, do the necessary + // reorgs. + var ( + first = headers[0] + last = headers[len(headers)-1] + batch = hc.chainDb.NewBatch() + ) + if first.ParentHash != hc.currentHeaderHash { + // Delete any canonical number assignments above the new head + for i := last.Number.Uint64() + 1; ; i++ { + hash := rawdb.ReadCanonicalHash(hc.chainDb, i) + if hash == (common.Hash{}) { + break + } + rawdb.DeleteCanonicalHash(batch, i) + } + // Overwrite any stale canonical number assignments, going + // backwards from the first header in this import until the + // cross link between two chains. + var ( + header = first + headNumber = header.Number.Uint64() + headHash = header.Hash() + ) + for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { + rawdb.WriteCanonicalHash(batch, headHash, headNumber) + if headNumber == 0 { + break // It shouldn't be reached + } + headHash, headNumber = header.ParentHash, header.Number.Uint64()-1 + header = hc.GetHeader(headHash, headNumber) + if header == nil { + return fmt.Errorf("missing parent %d %x", headNumber, headHash) + } + } + } + // Extend the canonical chain with the new headers + for i := 0; i < len(headers)-1; i++ { + hash := headers[i+1].ParentHash // Save some extra hashing + num := headers[i].Number.Uint64() + rawdb.WriteCanonicalHash(batch, hash, num) + rawdb.WriteHeadHeaderHash(batch, hash) + } + // Write the last header + hash := headers[len(headers)-1].Hash() + num := headers[len(headers)-1].Number.Uint64() + rawdb.WriteCanonicalHash(batch, hash, num) + rawdb.WriteHeadHeaderHash(batch, hash) + + if err := batch.Write(); err != nil { + return err + } + // Last step update all in-memory head header markers + hc.currentHeaderHash = last.Hash() + hc.currentHeader.Store(types.CopyHeader(last)) + headHeaderGauge.Update(last.Number.Int64()) + return nil +} + +// WriteHeaders writes a chain of headers into the local chain, given that the +// parents are already known. The chain head header won't be updated in this +// function, the additional setChainHead is expected in order to finish the entire +// procedure. +func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) { + if len(headers) == 0 { + return 0, nil } ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1) if ptd == nil { - return &headerWriteResult{}, consensus.ErrUnknownAncestor + return 0, consensus.ErrUnknownAncestor } var ( - lastNumber = headers[0].Number.Uint64() - 1 // Last successfully imported number - lastHash = headers[0].ParentHash // Last imported header hash - newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain - - lastHeader *types.Header - inserted []numberHash // Ephemeral lookup of number/hash for the chain - firstInserted = -1 // Index of the first non-ignored header + newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain + inserted []rawdb.NumberHash // Ephemeral lookup of number/hash for the chain + parentKnown = true // Set to true to force hc.HasHeader check the first iteration + batch = hc.chainDb.NewBatch() ) - - batch := hc.chainDb.NewBatch() - parentKnown := true // Set to true to force hc.HasHeader check the first iteration for i, header := range headers { var hash common.Hash // The headers have already been validated at this point, so we already @@ -188,116 +242,67 @@ func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWrit hc.tdCache.Add(hash, new(big.Int).Set(newTD)) rawdb.WriteHeader(batch, header) - inserted = append(inserted, numberHash{number, hash}) + inserted = append(inserted, rawdb.NumberHash{Number: number, Hash: hash}) hc.headerCache.Add(hash, header) hc.numberCache.Add(hash, number) - if firstInserted < 0 { - firstInserted = i - } } parentKnown = alreadyKnown - lastHeader, lastHash, lastNumber = header, hash, number } - // Skip the slow disk write of all headers if interrupted. if hc.procInterrupt() { log.Debug("Premature abort during headers import") - return &headerWriteResult{}, errors.New("aborted") + return 0, errors.New("aborted") } // Commit to disk! if err := batch.Write(); err != nil { log.Crit("Failed to write headers", "error", err) } - batch.Reset() + return len(inserted), nil +} +// writeHeadersAndSetHead writes a batch of block headers and applies the last +// header as the chain head if the fork choicer says it's ok to update the chain. +// Note: This method is not concurrent-safe with inserting blocks simultaneously +// into the chain, as side effects caused by reorganisations cannot be emulated +// without the real blocks. Hence, writing headers directly should only be done +// in two scenarios: pure-header mode of operation (light clients), or properly +// separated header/block phases (non-archive clients). +func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *ForkChoice) (*headerWriteResult, error) { + inserted, err := hc.WriteHeaders(headers) + if err != nil { + return nil, err + } var ( - head = hc.CurrentHeader().Number.Uint64() - localTD = hc.GetTd(hc.currentHeaderHash, head) - status = SideStatTy + lastHeader = headers[len(headers)-1] + lastHash = headers[len(headers)-1].Hash() + result = &headerWriteResult{ + status: NonStatTy, + ignored: len(headers) - inserted, + imported: inserted, + lastHash: lastHash, + lastHeader: lastHeader, + } ) - // If the total difficulty is higher than our known, add it to the canonical chain - // Second clause in the if statement reduces the vulnerability to selfish mining. - // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf - reorg := newTD.Cmp(localTD) > 0 - if !reorg && newTD.Cmp(localTD) == 0 { - if lastNumber < head { - reorg = true - } else if lastNumber == head { - reorg = mrand.Float64() < 0.5 + // Ask the fork choicer if the reorg is necessary + if reorg, err := forker.ReorgNeeded(hc.CurrentHeader(), lastHeader); err != nil { + return nil, err + } else if !reorg { + if inserted != 0 { + result.status = SideStatTy } + return result, nil } - // If the parent of the (first) block is already the canon header, - // we don't have to go backwards to delete canon blocks, but - // simply pile them onto the existing chain - chainAlreadyCanon := headers[0].ParentHash == hc.currentHeaderHash - if reorg { - // If the header can be added into canonical chain, adjust the - // header chain markers(canonical indexes and head header flag). - // - // Note all markers should be written atomically. - markerBatch := batch // we can reuse the batch to keep allocs down - if !chainAlreadyCanon { - // Delete any canonical number assignments above the new head - for i := lastNumber + 1; ; i++ { - hash := rawdb.ReadCanonicalHash(hc.chainDb, i) - if hash == (common.Hash{}) { - break - } - rawdb.DeleteCanonicalHash(markerBatch, i) - } - // Overwrite any stale canonical number assignments, going - // backwards from the first header in this import - var ( - headHash = headers[0].ParentHash // inserted[0].parent? - headNumber = headers[0].Number.Uint64() - 1 // inserted[0].num-1 ? - headHeader = hc.GetHeader(headHash, headNumber) - ) - for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { - rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber) - headHash = headHeader.ParentHash - headNumber = headHeader.Number.Uint64() - 1 - headHeader = hc.GetHeader(headHash, headNumber) - } - // If some of the older headers were already known, but obtained canon-status - // during this import batch, then we need to write that now - // Further down, we continue writing the staus for the ones that - // were not already known - for i := 0; i < firstInserted; i++ { - hash := headers[i].Hash() - num := headers[i].Number.Uint64() - rawdb.WriteCanonicalHash(markerBatch, hash, num) - rawdb.WriteHeadHeaderHash(markerBatch, hash) - } - } - // Extend the canonical chain with the new headers - for _, hn := range inserted { - rawdb.WriteCanonicalHash(markerBatch, hn.hash, hn.number) - rawdb.WriteHeadHeaderHash(markerBatch, hn.hash) - } - if err := markerBatch.Write(); err != nil { - log.Crit("Failed to write header markers into disk", "err", err) - } - markerBatch.Reset() - // Last step update all in-memory head header markers - hc.currentHeaderHash = lastHash - hc.currentHeader.Store(types.CopyHeader(lastHeader)) - headHeaderGauge.Update(lastHeader.Number.Int64()) - - // Chain status is canonical since this insert was a reorg. - // Note that all inserts which have higher TD than existing are 'reorg'. - status = CanonStatTy + // Special case, all the inserted headers are already on the canonical + // header chain, skip the reorg operation. + if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() { + return result, nil } - - if len(inserted) == 0 { - status = NonStatTy + // Apply the reorg operation + if err := hc.Reorg(headers); err != nil { + return nil, err } - return &headerWriteResult{ - status: status, - ignored: len(headers) - len(inserted), - imported: len(inserted), - lastHash: lastHash, - lastHeader: lastHeader, - }, nil + result.status = CanonStatTy + return result, nil } func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) { @@ -357,7 +362,7 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) return 0, nil } -// InsertHeaderChain inserts the given headers. +// InsertHeaderChain inserts the given headers and does the reorganisations. // // The validity of the headers is NOT CHECKED by this method, i.e. they need to be // validated by ValidateHeaderChain before calling InsertHeaderChain. @@ -367,20 +372,19 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) // // The returned 'write status' says if the inserted headers are part of the canonical chain // or a side chain. -func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) { +func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, forker *ForkChoice) (WriteStatus, error) { if hc.procInterrupt() { return 0, errors.New("aborted") } - res, err := hc.writeHeaders(chain) - + res, err := hc.writeHeadersAndSetHead(chain, forker) + if err != nil { + return 0, err + } // Report some public statistics so the user has a clue what's going on context := []interface{}{ "count", res.imported, "elapsed", common.PrettyDuration(time.Since(start)), } - if err != nil { - context = append(context, "err", err) - } if last := res.lastHeader; last != nil { context = append(context, "number", last.Number, "hash", res.lastHash) if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { @@ -495,6 +499,46 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { return hc.GetHeader(hash, number) } +// GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going +// backwards from the given number. +// If the 'number' is higher than the highest local header, this method will +// return a best-effort response, containing the headers that we do have. +func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue { + // If the request is for future headers, we still return the portion of + // headers that we are able to serve + if current := hc.CurrentHeader().Number.Uint64(); current < number { + if count > number-current { + count -= number - current + number = current + } else { + return nil + } + } + var headers []rlp.RawValue + // If we have some of the headers in cache already, use that before going to db. + hash := rawdb.ReadCanonicalHash(hc.chainDb, number) + if hash == (common.Hash{}) { + return nil + } + for count > 0 { + header, ok := hc.headerCache.Get(hash) + if !ok { + break + } + h := header.(*types.Header) + rlpData, _ := rlp.EncodeToBytes(h) + headers = append(headers, rlpData) + hash = h.ParentHash + count-- + number-- + } + // Read remaining from db + if count > 0 { + headers = append(headers, rawdb.ReadHeaderRange(hc.chainDb, number, count)...) + } + return headers +} + func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash { return rawdb.ReadCanonicalHash(hc.chainDb, number) } diff --git a/core/headerchain_test.go b/core/headerchain_test.go index f3e40b621..ed0522671 100644 --- a/core/headerchain_test.go +++ b/core/headerchain_test.go @@ -51,10 +51,10 @@ func verifyUnbrokenCanonchain(hc *HeaderChain) error { return nil } -func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error) { +func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error, forker *ForkChoice) { t.Helper() - status, err := hc.InsertHeaderChain(chain, time.Now()) + status, err := hc.InsertHeaderChain(chain, time.Now(), forker) if status != wantStatus { t.Errorf("wrong write status from InsertHeaderChain: got %v, want %v", status, wantStatus) } @@ -80,37 +80,38 @@ func TestHeaderInsertion(t *testing.T) { } // chain A: G->A1->A2...A128 chainA := makeHeaderChain(genesis.Header(), 128, ethash.NewFaker(), db, 10) - // chain B: G->A1->B2...B128 + // chain B: G->A1->B1...B128 chainB := makeHeaderChain(chainA[0], 128, ethash.NewFaker(), db, 10) log.Root().SetHandler(log.StdoutHandler) + forker := NewForkChoice(hc, nil) // Inserting 64 headers on an empty chain, expecting // 1 callbacks, 1 canon-status, 0 sidestatus, - testInsert(t, hc, chainA[:64], CanonStatTy, nil) + testInsert(t, hc, chainA[:64], CanonStatTy, nil, forker) // Inserting 64 identical headers, expecting // 0 callbacks, 0 canon-status, 0 sidestatus, - testInsert(t, hc, chainA[:64], NonStatTy, nil) + testInsert(t, hc, chainA[:64], NonStatTy, nil, forker) // Inserting the same some old, some new headers // 1 callbacks, 1 canon, 0 side - testInsert(t, hc, chainA[32:96], CanonStatTy, nil) + testInsert(t, hc, chainA[32:96], CanonStatTy, nil, forker) // Inserting side blocks, but not overtaking the canon chain - testInsert(t, hc, chainB[0:32], SideStatTy, nil) + testInsert(t, hc, chainB[0:32], SideStatTy, nil, forker) // Inserting more side blocks, but we don't have the parent - testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor) + testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor, forker) // Inserting more sideblocks, overtaking the canon chain - testInsert(t, hc, chainB[32:97], CanonStatTy, nil) + testInsert(t, hc, chainB[32:97], CanonStatTy, nil, forker) // Inserting more A-headers, taking back the canonicality - testInsert(t, hc, chainA[90:100], CanonStatTy, nil) + testInsert(t, hc, chainA[90:100], CanonStatTy, nil, forker) // And B becomes canon again - testInsert(t, hc, chainB[97:107], CanonStatTy, nil) + testInsert(t, hc, chainB[97:107], CanonStatTy, nil, forker) // And B becomes even longer - testInsert(t, hc, chainB[107:128], CanonStatTy, nil) + testInsert(t, hc, chainB[107:128], CanonStatTy, nil, forker) } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 4028191b7..891349d5f 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -242,24 +242,6 @@ func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) { } } -// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow -// reporting correct numbers across restarts. -func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 { - data, _ := db.Get(fastTrieProgressKey) - if len(data) == 0 { - return 0 - } - return new(big.Int).SetBytes(data).Uint64() -} - -// WriteFastTrieProgress stores the fast sync trie process counter to support -// retrieving it across restarts. -func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) { - if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil { - log.Crit("Failed to store fast sync trie progress", "err", err) - } -} - // ReadTxIndexTail retrieves the number of oldest indexed block // whose transaction indices has been indexed. If the corresponding entry // is non-existent in database it means the indexing has been finished. @@ -297,6 +279,56 @@ func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) { } } +// ReadHeaderRange returns the rlp-encoded headers, starting at 'number', and going +// backwards towards genesis. This method assumes that the caller already has +// placed a cap on count, to prevent DoS issues. +// Since this method operates in head-towards-genesis mode, it will return an empty +// slice in case the head ('number') is missing. Hence, the caller must ensure that +// the head ('number') argument is actually an existing header. +// +// N.B: Since the input is a number, as opposed to a hash, it's implicit that +// this method only operates on canon headers. +func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValue { + var rlpHeaders []rlp.RawValue + if count == 0 { + return rlpHeaders + } + i := number + if count-1 > number { + // It's ok to request block 0, 1 item + count = number + 1 + } + limit, _ := db.Ancients() + // First read live blocks + if i >= limit { + // If we need to read live blocks, we need to figure out the hash first + hash := ReadCanonicalHash(db, number) + for ; i >= limit && count > 0; i-- { + if data, _ := db.Get(headerKey(i, hash)); len(data) > 0 { + rlpHeaders = append(rlpHeaders, data) + // Get the parent hash for next query + hash = types.HeaderParentHashFromRLP(data) + } else { + break // Maybe got moved to ancients + } + count-- + } + } + if count == 0 { + return rlpHeaders + } + // read remaining from ancients + max := count * 700 + data, err := db.AncientRange(freezerHeaderTable, i+1-count, count, max) + if err == nil && uint64(len(data)) == count { + // the data is on the order [h, h+1, .., n] -- reordering needed + for i := range data { + rlpHeaders = append(rlpHeaders, data[len(data)-1-i]) + } + } + return rlpHeaders +} + // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { var data []byte @@ -682,7 +714,7 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.C if logs := readLegacyLogs(db, hash, number, config); logs != nil { return logs } - log.Error("Invalid receipt array RLP", "hash", "err", err) + log.Error("Invalid receipt array RLP", "hash", hash, "err", err) return nil } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 50b0d5390..2c36de898 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -883,3 +883,67 @@ func BenchmarkDecodeRLPLogs(b *testing.B) { } }) } + +func TestHeadersRLPStorage(t *testing.T) { + // Have N headers in the freezer + frdir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("failed to create temp freezer dir: %v", err) + } + defer os.Remove(frdir) + + db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false) + if err != nil { + t.Fatalf("failed to create database with ancient backend") + } + defer db.Close() + // Create blocks + var chain []*types.Block + var pHash common.Hash + for i := 0; i < 100; i++ { + block := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(int64(i)), + Extra: []byte("test block"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + ParentHash: pHash, + }) + chain = append(chain, block) + pHash = block.Hash() + } + var receipts []types.Receipts = make([]types.Receipts, 100) + // Write first half to ancients + WriteAncientBlocks(db, chain[:50], receipts[:50], big.NewInt(100)) + // Write second half to db + for i := 50; i < 100; i++ { + WriteCanonicalHash(db, chain[i].Hash(), chain[i].NumberU64()) + WriteBlock(db, chain[i]) + } + checkSequence := func(from, amount int) { + headersRlp := ReadHeaderRange(db, uint64(from), uint64(amount)) + if have, want := len(headersRlp), amount; have != want { + t.Fatalf("have %d headers, want %d", have, want) + } + for i, headerRlp := range headersRlp { + var header types.Header + if err := rlp.DecodeBytes(headerRlp, &header); err != nil { + t.Fatal(err) + } + if have, want := header.Number.Uint64(), uint64(from-i); have != want { + t.Fatalf("wrong number, have %d want %d", have, want) + } + } + } + checkSequence(99, 20) // Latest block and 19 parents + checkSequence(99, 50) // Latest block -> all db blocks + checkSequence(99, 51) // Latest block -> one from ancients + checkSequence(99, 52) // Latest blocks -> two from ancients + checkSequence(50, 2) // One from db, one from ancients + checkSequence(49, 1) // One from ancients + checkSequence(49, 50) // All ancient ones + checkSequence(99, 100) // All blocks + checkSequence(0, 1) // Only genesis + checkSequence(1, 1) // Only block 1 + checkSequence(1, 2) // Genesis + block 1 +} diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 079e335fa..3b0fcf0f2 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -138,3 +138,38 @@ func PopUncleanShutdownMarker(db ethdb.KeyValueStore) { log.Warn("Failed to clear unclean-shutdown marker", "err", err) } } + +// UpdateUncleanShutdownMarker updates the last marker's timestamp to now. +func UpdateUncleanShutdownMarker(db ethdb.KeyValueStore) { + var uncleanShutdowns crashList + // Read old data + if data, err := db.Get(uncleanShutdownKey); err != nil { + log.Warn("Error reading unclean shutdown markers", "error", err) + } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil { + log.Warn("Error decoding unclean shutdown markers", "error", err) + } + // This shouldn't happen because we push a marker on Backend instantiation + count := len(uncleanShutdowns.Recent) + if count == 0 { + log.Warn("No unclean shutdown marker to update") + return + } + uncleanShutdowns.Recent[count-1] = uint64(time.Now().Unix()) + data, _ := rlp.EncodeToBytes(uncleanShutdowns) + if err := db.Put(uncleanShutdownKey, data); err != nil { + log.Warn("Failed to write unclean-shutdown marker", "err", err) + } +} + +// ReadTransitionStatus retrieves the eth2 transition status from the database +func ReadTransitionStatus(db ethdb.KeyValueReader) []byte { + data, _ := db.Get(transitionStatusKey) + return data +} + +// WriteTransitionStatus stores the eth2 transition status to the database +func WriteTransitionStatus(db ethdb.KeyValueWriter, data []byte) { + if err := db.Put(transitionStatusKey, data); err != nil { + log.Crit("Failed to store the eth2 transition status", "err", err) + } +} diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go index df140de0c..1c828662c 100644 --- a/core/rawdb/accessors_snapshot.go +++ b/core/rawdb/accessors_snapshot.go @@ -208,11 +208,3 @@ func WriteSnapshotSyncStatus(db ethdb.KeyValueWriter, status []byte) { log.Crit("Failed to store snapshot sync status", "err", err) } } - -// DeleteSnapshotSyncStatus deletes the serialized sync status saved at the last -// shutdown -func DeleteSnapshotSyncStatus(db ethdb.KeyValueWriter) { - if err := db.Delete(snapshotSyncStatusKey); err != nil { - log.Crit("Failed to remove snapshot sync status", "err", err) - } -} diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index daee72159..0d5a5ee6a 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -247,7 +247,8 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan } } -// IndexTransactions creates txlookup indices of the specified block range. +// IndexTransactions creates txlookup indices of the specified block range. The from +// is included while to is excluded. // // This function iterates canonical chain in reverse order, it has one main advantage: // We can write tx index tail flag periodically even without the whole indexing @@ -339,6 +340,7 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch } // UnindexTransactions removes txlookup indices of the specified block range. +// The from is included while to is excluded. // // There is a passed channel, the whole procedure will be interrupted if any // signal received. diff --git a/core/rawdb/database.go b/core/rawdb/database.go index c5af77667..5ef64d26a 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -395,7 +395,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, - uncleanShutdownKey, badBlockKey, + uncleanShutdownKey, badBlockKey, transitionStatusKey, } { if bytes.Equal(key, meta) { metadata.Add(size) diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index d432db2ab..b35fcba45 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -75,6 +75,9 @@ var ( // uncleanShutdownKey tracks the list of local crashes uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db + // transitionStatusKey tracks the eth2 transition status. + transitionStatusKey = []byte("eth2-transition") + // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index a4373d8bc..9d74ca4d9 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -48,13 +48,13 @@ var ( // accountCheckRange is the upper limit of the number of accounts involved in // each range check. This is a value estimated based on experience. If this // value is too large, the failure rate of range prove will increase. Otherwise - // the the value is too small, the efficiency of the state recovery will decrease. + // the value is too small, the efficiency of the state recovery will decrease. accountCheckRange = 128 // storageCheckRange is the upper limit of the number of storage slots involved // in each range check. This is a value estimated based on experience. If this // value is too large, the failure rate of range prove will increase. Otherwise - // the the value is too small, the efficiency of the state recovery will decrease. + // the value is too small, the efficiency of the state recovery will decrease. storageCheckRange = 1024 // errMissingTrie is returned if the target trie is missing while the generation diff --git a/core/state/snapshot/wipe.go b/core/state/snapshot/wipe.go index 2cab57393..b774c37a4 100644 --- a/core/state/snapshot/wipe.go +++ b/core/state/snapshot/wipe.go @@ -21,67 +21,11 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" ) -// wipeSnapshot starts a goroutine to iterate over the entire key-value database -// and delete all the data associated with the snapshot (accounts, storage, -// metadata). After all is done, the snapshot range of the database is compacted -// to free up unused data blocks. -func wipeSnapshot(db ethdb.KeyValueStore, full bool) chan struct{} { - // Wipe the snapshot root marker synchronously - if full { - rawdb.DeleteSnapshotRoot(db) - } - // Wipe everything else asynchronously - wiper := make(chan struct{}, 1) - go func() { - if err := wipeContent(db); err != nil { - log.Error("Failed to wipe state snapshot", "err", err) // Database close will trigger this - return - } - close(wiper) - }() - return wiper -} - -// wipeContent iterates over the entire key-value database and deletes all the -// data associated with the snapshot (accounts, storage), but not the root hash -// as the wiper is meant to run on a background thread but the root needs to be -// removed in sync to avoid data races. After all is done, the snapshot range of -// the database is compacted to free up unused data blocks. -func wipeContent(db ethdb.KeyValueStore) error { - if err := wipeKeyRange(db, "accounts", rawdb.SnapshotAccountPrefix, nil, nil, len(rawdb.SnapshotAccountPrefix)+common.HashLength, snapWipedAccountMeter, true); err != nil { - return err - } - if err := wipeKeyRange(db, "storage", rawdb.SnapshotStoragePrefix, nil, nil, len(rawdb.SnapshotStoragePrefix)+2*common.HashLength, snapWipedStorageMeter, true); err != nil { - return err - } - // Compact the snapshot section of the database to get rid of unused space - start := time.Now() - - log.Info("Compacting snapshot account area ") - end := common.CopyBytes(rawdb.SnapshotAccountPrefix) - end[len(end)-1]++ - - if err := db.Compact(rawdb.SnapshotAccountPrefix, end); err != nil { - return err - } - log.Info("Compacting snapshot storage area ") - end = common.CopyBytes(rawdb.SnapshotStoragePrefix) - end[len(end)-1]++ - - if err := db.Compact(rawdb.SnapshotStoragePrefix, end); err != nil { - return err - } - log.Info("Compacted snapshot area in database", "elapsed", common.PrettyDuration(time.Since(start))) - - return nil -} - // wipeKeyRange deletes a range of keys from the database starting with prefix // and having a specific total key length. The start and limit is optional for // specifying a particular key range for deletion. diff --git a/core/state/snapshot/wipe_test.go b/core/state/snapshot/wipe_test.go index 2c45652a9..c5b340136 100644 --- a/core/state/snapshot/wipe_test.go +++ b/core/state/snapshot/wipe_test.go @@ -30,95 +30,50 @@ import ( func TestWipe(t *testing.T) { // Create a database with some random snapshot data db := memorydb.New() - for i := 0; i < 128; i++ { - account := randomHash() - rawdb.WriteAccountSnapshot(db, account, randomHash().Bytes()) - for j := 0; j < 1024; j++ { - rawdb.WriteStorageSnapshot(db, account, randomHash(), randomHash().Bytes()) - } + rawdb.WriteAccountSnapshot(db, randomHash(), randomHash().Bytes()) } - rawdb.WriteSnapshotRoot(db, randomHash()) - // Add some random non-snapshot data too to make wiping harder - for i := 0; i < 65536; i++ { - // Generate a key that's the wrong length for a state snapshot item - var keysize int - for keysize == 0 || keysize == 32 || keysize == 64 { - keysize = 8 + rand.Intn(64) // +8 to ensure we will "never" randomize duplicates - } - // Randomize the suffix, dedup and inject it under the snapshot namespace - keysuffix := make([]byte, keysize) + for i := 0; i < 500; i++ { + // Generate keys with wrong length for a state snapshot item + keysuffix := make([]byte, 31) rand.Read(keysuffix) - - if rand.Int31n(2) == 0 { - db.Put(append(rawdb.SnapshotAccountPrefix, keysuffix...), randomHash().Bytes()) - } else { - db.Put(append(rawdb.SnapshotStoragePrefix, keysuffix...), randomHash().Bytes()) + db.Put(append(rawdb.SnapshotAccountPrefix, keysuffix...), randomHash().Bytes()) + keysuffix = make([]byte, 33) + rand.Read(keysuffix) + db.Put(append(rawdb.SnapshotAccountPrefix, keysuffix...), randomHash().Bytes()) + } + count := func() (items int) { + it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil) + defer it.Release() + for it.Next() { + if len(it.Key()) == len(rawdb.SnapshotAccountPrefix)+common.HashLength { + items++ + } } + return items } // Sanity check that all the keys are present - var items int - - it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil) - defer it.Release() - - for it.Next() { - key := it.Key() - if len(key) == len(rawdb.SnapshotAccountPrefix)+common.HashLength { - items++ - } + if items := count(); items != 128 { + t.Fatalf("snapshot size mismatch: have %d, want %d", items, 128) } - it = db.NewIterator(rawdb.SnapshotStoragePrefix, nil) - defer it.Release() - - for it.Next() { - key := it.Key() - if len(key) == len(rawdb.SnapshotStoragePrefix)+2*common.HashLength { - items++ - } + // Wipe the accounts + if err := wipeKeyRange(db, "accounts", rawdb.SnapshotAccountPrefix, nil, nil, + len(rawdb.SnapshotAccountPrefix)+common.HashLength, snapWipedAccountMeter, true); err != nil { + t.Fatal(err) } - if items != 128+128*1024 { - t.Fatalf("snapshot size mismatch: have %d, want %d", items, 128+128*1024) - } - if hash := rawdb.ReadSnapshotRoot(db); hash == (common.Hash{}) { - t.Errorf("snapshot block marker mismatch: have %#x, want ", hash) - } - // Wipe all snapshot entries from the database - <-wipeSnapshot(db, true) - // Iterate over the database end ensure no snapshot information remains - it = db.NewIterator(rawdb.SnapshotAccountPrefix, nil) - defer it.Release() - - for it.Next() { - key := it.Key() - if len(key) == len(rawdb.SnapshotAccountPrefix)+common.HashLength { - t.Errorf("snapshot entry remained after wipe: %x", key) - } - } - it = db.NewIterator(rawdb.SnapshotStoragePrefix, nil) - defer it.Release() - - for it.Next() { - key := it.Key() - if len(key) == len(rawdb.SnapshotStoragePrefix)+2*common.HashLength { - t.Errorf("snapshot entry remained after wipe: %x", key) - } - } - if hash := rawdb.ReadSnapshotRoot(db); hash != (common.Hash{}) { - t.Errorf("snapshot block marker remained after wipe: %#x", hash) + if items := count(); items != 0 { + t.Fatalf("snapshot size mismatch: have %d, want %d", items, 0) } // Iterate over the database and ensure miscellaneous items are present - items = 0 - - it = db.NewIterator(nil, nil) + items := 0 + it := db.NewIterator(nil, nil) defer it.Release() - for it.Next() { items++ } - if items != 65536 { - t.Fatalf("misc item count mismatch: have %d, want %d", items, 65536) + if items != 1000 { + t.Fatalf("misc item count mismatch: have %d, want %d", items, 1000) } } diff --git a/core/state/sync.go b/core/state/sync.go index 734961d9c..cc7d01a21 100644 --- a/core/state/sync.go +++ b/core/state/sync.go @@ -27,7 +27,7 @@ import ( ) // NewStateSync create a new state trie download scheduler. -func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync { +func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync { // Register the storage slot callback if the external callback is specified. var onSlot func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error if onLeaf != nil { @@ -52,6 +52,6 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.S syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), hexpath, parent) return nil } - syncer = trie.NewSync(root, database, onAccount, bloom) + syncer = trie.NewSync(root, database, onAccount) return syncer } diff --git a/core/state/sync_test.go b/core/state/sync_test.go index beb8fcfd9..007590c76 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -26,7 +26,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) @@ -134,7 +133,7 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error { // Tests that an empty state is not scheduled for syncing. func TestEmptyStateSync(t *testing.T) { empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()), nil) + sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), nil) if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 { t.Errorf(" content requested for empty state: %v, %v, %v", nodes, paths, codes) } @@ -171,7 +170,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil) + sched := NewStateSync(srcRoot, dstDb, nil) nodes, paths, codes := sched.Missing(count) var ( @@ -250,7 +249,7 @@ func TestIterativeDelayedStateSync(t *testing.T) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil) + sched := NewStateSync(srcRoot, dstDb, nil) nodes, _, codes := sched.Missing(0) queue := append(append([]common.Hash{}, nodes...), codes...) @@ -298,7 +297,7 @@ func testIterativeRandomStateSync(t *testing.T, count int) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil) + sched := NewStateSync(srcRoot, dstDb, nil) queue := make(map[common.Hash]struct{}) nodes, _, codes := sched.Missing(count) @@ -348,7 +347,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil) + sched := NewStateSync(srcRoot, dstDb, nil) queue := make(map[common.Hash]struct{}) nodes, _, codes := sched.Missing(0) @@ -415,7 +414,7 @@ func TestIncompleteStateSync(t *testing.T) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil) + sched := NewStateSync(srcRoot, dstDb, nil) var added []common.Hash diff --git a/core/tx_pool.go b/core/tx_pool.go index 0e3844bcb..3329d736a 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -621,9 +621,8 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if err != nil { return ErrInvalidSender } - // Drop non-local transactions under our own minimal accepted gas price or tip. - pendingBaseFee := pool.priced.urgent.baseFee - if !local && tx.EffectiveGasTipIntCmp(pool.gasPrice, pendingBaseFee) < 0 { + // Drop non-local transactions under our own minimal accepted gas price or tip + if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { return ErrUnderpriced } // Ensure the transaction adheres to nonce ordering diff --git a/core/types/block.go b/core/types/block.go index 360f1eb47..f38c55c1f 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -85,6 +85,12 @@ type Header struct { // BaseFee was added by EIP-1559 and is ignored in legacy headers. BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"` + + /* + TODO (MariusVanDerWijden) Add this field once needed + // Random was added during the merge and contains the BeaconState randomness + Random common.Hash `json:"random" rlp:"optional"` + */ } // field type overrides for gencodec @@ -383,3 +389,21 @@ func (b *Block) Hash() common.Hash { } type Blocks []*Block + +// HeaderParentHashFromRLP returns the parentHash of an RLP-encoded +// header. If 'header' is invalid, the zero hash is returned. +func HeaderParentHashFromRLP(header []byte) common.Hash { + // parentHash is the first list element. + listContent, _, err := rlp.SplitList(header) + if err != nil { + return common.Hash{} + } + parentHash, _, err := rlp.SplitString(listContent) + if err != nil { + return common.Hash{} + } + if len(parentHash) != 32 { + return common.Hash{} + } + return common.BytesToHash(parentHash) +} diff --git a/core/types/block_test.go b/core/types/block_test.go index 0b9a4def8..5cdea3fc0 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -281,3 +281,64 @@ func makeBenchBlock() *Block { } return NewBlock(header, txs, uncles, receipts, newHasher()) } + +func TestRlpDecodeParentHash(t *testing.T) { + // A minimum one + want := common.HexToHash("0x112233445566778899001122334455667788990011223344556677889900aabb") + if rlpData, err := rlp.EncodeToBytes(Header{ParentHash: want}); err != nil { + t.Fatal(err) + } else { + if have := HeaderParentHashFromRLP(rlpData); have != want { + t.Fatalf("have %x, want %x", have, want) + } + } + // And a maximum one + // | Difficulty | dynamic| *big.Int | 0x5ad3c2c71bbff854908 (current mainnet TD: 76 bits) | + // | Number | dynamic| *big.Int | 64 bits | + // | Extra | dynamic| []byte | 65+32 byte (clique) | + // | BaseFee | dynamic| *big.Int | 64 bits | + mainnetTd := new(big.Int) + mainnetTd.SetString("5ad3c2c71bbff854908", 16) + if rlpData, err := rlp.EncodeToBytes(Header{ + ParentHash: want, + Difficulty: mainnetTd, + Number: new(big.Int).SetUint64(math.MaxUint64), + Extra: make([]byte, 65+32), + BaseFee: new(big.Int).SetUint64(math.MaxUint64), + }); err != nil { + t.Fatal(err) + } else { + if have := HeaderParentHashFromRLP(rlpData); have != want { + t.Fatalf("have %x, want %x", have, want) + } + } + // Also test a very very large header. + { + // The rlp-encoding of the heder belowCauses _total_ length of 65540, + // which is the first to blow the fast-path. + h := Header{ + ParentHash: want, + Extra: make([]byte, 65041), + } + if rlpData, err := rlp.EncodeToBytes(h); err != nil { + t.Fatal(err) + } else { + if have := HeaderParentHashFromRLP(rlpData); have != want { + t.Fatalf("have %x, want %x", have, want) + } + } + } + { + // Test some invalid erroneous stuff + for i, rlpData := range [][]byte{ + nil, + common.FromHex("0x"), + common.FromHex("0x01"), + common.FromHex("0x3031323334"), + } { + if have, want := HeaderParentHashFromRLP(rlpData), (common.Hash{}); have != want { + t.Fatalf("invalid %d: have %x, want %x", i, have, want) + } + } + } +} diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go index 585c029d8..53f246ea1 100644 --- a/core/types/dynamic_fee_tx.go +++ b/core/types/dynamic_fee_tx.go @@ -25,8 +25,8 @@ import ( type DynamicFeeTx struct { ChainID *big.Int Nonce uint64 - GasTipCap *big.Int - GasFeeCap *big.Int + GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas + GasFeeCap *big.Int // a.k.a. maxFeePerGas Gas uint64 To *common.Address `rlp:"nil"` // nil means contract creation Value *big.Int diff --git a/core/vm/analysis.go b/core/vm/analysis.go index 449cded2a..3733bab6a 100644 --- a/core/vm/analysis.go +++ b/core/vm/analysis.go @@ -17,12 +17,12 @@ package vm const ( - set2BitsMask = uint16(0b1100_0000_0000_0000) - set3BitsMask = uint16(0b1110_0000_0000_0000) - set4BitsMask = uint16(0b1111_0000_0000_0000) - set5BitsMask = uint16(0b1111_1000_0000_0000) - set6BitsMask = uint16(0b1111_1100_0000_0000) - set7BitsMask = uint16(0b1111_1110_0000_0000) + set2BitsMask = uint16(0b11) + set3BitsMask = uint16(0b111) + set4BitsMask = uint16(0b1111) + set5BitsMask = uint16(0b1_1111) + set6BitsMask = uint16(0b11_1111) + set7BitsMask = uint16(0b111_1111) ) // bitvec is a bit vector which maps bytes in a program. @@ -30,32 +30,26 @@ const ( // it's data (i.e. argument of PUSHxx). type bitvec []byte -var lookup = [8]byte{ - 0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1, -} - func (bits bitvec) set1(pos uint64) { - bits[pos/8] |= lookup[pos%8] + bits[pos/8] |= 1 << (pos % 8) } func (bits bitvec) setN(flag uint16, pos uint64) { - a := flag >> (pos % 8) - bits[pos/8] |= byte(a >> 8) - if b := byte(a); b != 0 { - // If the bit-setting affects the neighbouring byte, we can assign - no need to OR it, - // since it's the first write to that byte + a := flag << (pos % 8) + bits[pos/8] |= byte(a) + if b := byte(a >> 8); b != 0 { bits[pos/8+1] = b } } func (bits bitvec) set8(pos uint64) { - a := byte(0xFF >> (pos % 8)) + a := byte(0xFF << (pos % 8)) bits[pos/8] |= a bits[pos/8+1] = ^a } func (bits bitvec) set16(pos uint64) { - a := byte(0xFF >> (pos % 8)) + a := byte(0xFF << (pos % 8)) bits[pos/8] |= a bits[pos/8+1] = 0xFF bits[pos/8+2] = ^a @@ -63,7 +57,7 @@ func (bits bitvec) set16(pos uint64) { // codeSegment checks if the position is in a code segment. func (bits *bitvec) codeSegment(pos uint64) bool { - return ((*bits)[pos/8] & (0x80 >> (pos % 8))) == 0 + return (((*bits)[pos/8] >> (pos % 8)) & 1) == 0 } // codeBitmap collects data locations in code. diff --git a/core/vm/analysis_test.go b/core/vm/analysis_test.go index d7f21e04a..398861f8a 100644 --- a/core/vm/analysis_test.go +++ b/core/vm/analysis_test.go @@ -17,6 +17,7 @@ package vm import ( + "math/bits" "testing" "github.com/ethereum/go-ethereum/crypto" @@ -28,24 +29,27 @@ func TestJumpDestAnalysis(t *testing.T) { exp byte which int }{ - {[]byte{byte(PUSH1), 0x01, 0x01, 0x01}, 0x40, 0}, - {[]byte{byte(PUSH1), byte(PUSH1), byte(PUSH1), byte(PUSH1)}, 0x50, 0}, - {[]byte{byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), 0x01, 0x01, 0x01}, 0x7F, 0}, - {[]byte{byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x80, 1}, - {[]byte{0x01, 0x01, 0x01, 0x01, 0x01, byte(PUSH2), byte(PUSH2), byte(PUSH2), 0x01, 0x01, 0x01}, 0x03, 0}, - {[]byte{0x01, 0x01, 0x01, 0x01, 0x01, byte(PUSH2), 0x01, 0x01, 0x01, 0x01, 0x01}, 0x00, 1}, - {[]byte{byte(PUSH3), 0x01, 0x01, 0x01, byte(PUSH1), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x74, 0}, - {[]byte{byte(PUSH3), 0x01, 0x01, 0x01, byte(PUSH1), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x00, 1}, - {[]byte{0x01, byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x3F, 0}, - {[]byte{0x01, byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0xC0, 1}, - {[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x7F, 0}, - {[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0xFF, 1}, - {[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x80, 2}, - {[]byte{byte(PUSH8), 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, byte(PUSH1), 0x01}, 0x7f, 0}, - {[]byte{byte(PUSH8), 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, byte(PUSH1), 0x01}, 0xA0, 1}, - {[]byte{byte(PUSH32)}, 0x7F, 0}, - {[]byte{byte(PUSH32)}, 0xFF, 1}, - {[]byte{byte(PUSH32)}, 0xFF, 2}, + {[]byte{byte(PUSH1), 0x01, 0x01, 0x01}, 0b0000_0010, 0}, + {[]byte{byte(PUSH1), byte(PUSH1), byte(PUSH1), byte(PUSH1)}, 0b0000_1010, 0}, + {[]byte{0x00, byte(PUSH1), 0x00, byte(PUSH1), 0x00, byte(PUSH1), 0x00, byte(PUSH1)}, 0b0101_0100, 0}, + {[]byte{byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), 0x01, 0x01, 0x01}, bits.Reverse8(0x7F), 0}, + {[]byte{byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0000_0001, 1}, + {[]byte{0x01, 0x01, 0x01, 0x01, 0x01, byte(PUSH2), byte(PUSH2), byte(PUSH2), 0x01, 0x01, 0x01}, 0b1100_0000, 0}, + {[]byte{0x01, 0x01, 0x01, 0x01, 0x01, byte(PUSH2), 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0000_0000, 1}, + {[]byte{byte(PUSH3), 0x01, 0x01, 0x01, byte(PUSH1), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0010_1110, 0}, + {[]byte{byte(PUSH3), 0x01, 0x01, 0x01, byte(PUSH1), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0000_0000, 1}, + {[]byte{0x01, byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b1111_1100, 0}, + {[]byte{0x01, byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0000_0011, 1}, + {[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b1111_1110, 0}, + {[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b1111_1111, 1}, + {[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0000_0001, 2}, + {[]byte{byte(PUSH8), 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, byte(PUSH1), 0x01}, 0b1111_1110, 0}, + {[]byte{byte(PUSH8), 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, byte(PUSH1), 0x01}, 0b0000_0101, 1}, + {[]byte{byte(PUSH32)}, 0b1111_1110, 0}, + {[]byte{byte(PUSH32)}, 0b1111_1111, 1}, + {[]byte{byte(PUSH32)}, 0b1111_1111, 2}, + {[]byte{byte(PUSH32)}, 0b1111_1111, 3}, + {[]byte{byte(PUSH32)}, 0b0000_0001, 4}, } for i, test := range tests { ret := codeBitmap(test.code) diff --git a/core/vm/contract.go b/core/vm/contract.go index 61dbd5007..bb0902969 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -143,16 +143,11 @@ func (c *Contract) AsDelegate() *Contract { // GetOp returns the n'th element in the contract's byte array func (c *Contract) GetOp(n uint64) OpCode { - return OpCode(c.GetByte(n)) -} - -// GetByte returns the n'th byte in the contract's byte array -func (c *Contract) GetByte(n uint64) byte { if n < uint64(len(c.Code)) { - return c.Code[n] + return OpCode(c.Code[n]) } - return 0 + return STOP } // Caller returns the caller of the contract. diff --git a/core/vm/errors.go b/core/vm/errors.go index 565eecdd7..004f8ef1c 100644 --- a/core/vm/errors.go +++ b/core/vm/errors.go @@ -36,6 +36,10 @@ var ( ErrGasUintOverflow = errors.New("gas uint64 overflow") ErrInvalidCode = errors.New("invalid code: must not begin with 0xef") ErrNonceUintOverflow = errors.New("nonce uint64 overflow") + + // errStopToken is an internal token indicating interpreter loop termination, + // never returned to outside callers. + errStopToken = errors.New("stop token") ) // ErrStackUnderflow wraps an evm error when the items on the stack less diff --git a/core/vm/evm.go b/core/vm/evm.go index 618bbcf17..2c7880b3b 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -165,9 +165,6 @@ func (evm *EVM) Interpreter() *EVMInterpreter { // the necessary steps to create accounts and reverses the state in case of an // execution error or failed value transfer. func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) { - if evm.Config.NoRecursion && evm.depth > 0 { - return nil, gas, nil - } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, ErrDepth @@ -254,9 +251,6 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // CallCode differs from Call in the sense that it executes the given address' // code with the caller as context. func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) { - if evm.Config.NoRecursion && evm.depth > 0 { - return nil, gas, nil - } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, ErrDepth @@ -305,9 +299,6 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, // DelegateCall differs from CallCode in the sense that it executes the given address' // code with the caller as context and the caller is set to the caller of the caller. func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) { - if evm.Config.NoRecursion && evm.depth > 0 { - return nil, gas, nil - } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, ErrDepth @@ -347,9 +338,6 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by // Opcodes that attempt to perform such modifications will result in exceptions // instead of performing the modifications. func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) { - if evm.Config.NoRecursion && evm.depth > 0 { - return nil, gas, nil - } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, ErrDepth @@ -451,10 +439,6 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, contract := NewContract(caller, AccountRef(address), value, gas) contract.SetCodeOptionalHash(&address, codeAndHash) - if evm.Config.NoRecursion && evm.depth > 0 { - return nil, address, gas, nil - } - if evm.Config.Debug { if evm.depth == 0 { evm.Config.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value) @@ -518,7 +502,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I // Create2 creates a new contract using code as deployment code. // -// The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:] +// The different between Create2 with Create is Create2 uses keccak256(0xff ++ msg.sender ++ salt ++ keccak256(init_code))[12:] // instead of the usual sender-and-nonce-hash as the address where the contract is initialized at. func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { codeAndHash := &codeAndHash{code: code} diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 19d2198af..4c2cb3e5c 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -247,7 +247,7 @@ func makeGasLog(n uint64) gasFunc { } } -func gasSha3(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { +func gasKeccak256(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { gas, err := memoryGasCost(mem, memorySize) if err != nil { return 0, err @@ -256,7 +256,7 @@ func gasSha3(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if overflow { return 0, ErrGasUintOverflow } - if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Sha3WordGas); overflow { + if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Keccak256WordGas); overflow { return 0, ErrGasUintOverflow } if gas, overflow = math.SafeAdd(gas, wordGas); overflow { @@ -290,7 +290,7 @@ func gasCreate2(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memoryS if overflow { return 0, ErrGasUintOverflow } - if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Sha3WordGas); overflow { + if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Keccak256WordGas); overflow { return 0, ErrGasUintOverflow } if gas, overflow = math.SafeAdd(gas, wordGas); overflow { diff --git a/core/vm/instructions.go b/core/vm/instructions.go index bda480f08..4eda3bf53 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -17,6 +17,8 @@ package vm import ( + "sync/atomic" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" @@ -231,7 +233,7 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte return nil, nil } -func opSha3(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { +func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { offset, size := scope.Stack.pop(), scope.Stack.peek() data := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) @@ -514,6 +516,9 @@ func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by } func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + if interpreter.readOnly { + return nil, ErrWriteProtection + } loc := scope.Stack.pop() val := scope.Stack.pop() interpreter.evm.StateDB.SetState(scope.Contract.Address(), @@ -522,23 +527,27 @@ func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b } func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + if atomic.LoadInt32(&interpreter.evm.abort) != 0 { + return nil, errStopToken + } pos := scope.Stack.pop() if !scope.Contract.validJumpdest(&pos) { return nil, ErrInvalidJump } - *pc = pos.Uint64() + *pc = pos.Uint64() - 1 // pc will be increased by the interpreter loop return nil, nil } func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + if atomic.LoadInt32(&interpreter.evm.abort) != 0 { + return nil, errStopToken + } pos, cond := scope.Stack.pop(), scope.Stack.pop() if !cond.IsZero() { if !scope.Contract.validJumpdest(&pos) { return nil, ErrInvalidJump } - *pc = pos.Uint64() - } else { - *pc++ + *pc = pos.Uint64() - 1 // pc will be increased by the interpreter loop } return nil, nil } @@ -563,6 +572,9 @@ func opGas(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte } func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + if interpreter.readOnly { + return nil, ErrWriteProtection + } var ( value = scope.Stack.pop() offset, size = scope.Stack.pop(), scope.Stack.pop() @@ -598,12 +610,17 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b scope.Contract.Gas += returnGas if suberr == ErrExecutionReverted { + interpreter.returnData = res // set REVERT data to return data buffer return res, nil } + interpreter.returnData = nil // clear dirty return data buffer return nil, nil } func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + if interpreter.readOnly { + return nil, ErrWriteProtection + } var ( endowment = scope.Stack.pop() offset, size = scope.Stack.pop(), scope.Stack.pop() @@ -634,8 +651,10 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] scope.Contract.Gas += returnGas if suberr == ErrExecutionReverted { + interpreter.returnData = res // set REVERT data to return data buffer return res, nil } + interpreter.returnData = nil // clear dirty return data buffer return nil, nil } @@ -651,6 +670,9 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt // Get the arguments from the memory. args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) + if interpreter.readOnly && !value.IsZero() { + return nil, ErrWriteProtection + } var bigVal = big0 //TODO: use uint256.Int instead of converting with toBig() // By using big0 here, we save an alloc for the most common case (non-ether-transferring contract calls), @@ -674,6 +696,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt } scope.Contract.Gas += returnGas + interpreter.returnData = ret return ret, nil } @@ -709,6 +732,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } scope.Contract.Gas += returnGas + interpreter.returnData = ret return ret, nil } @@ -737,6 +761,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext } scope.Contract.Gas += returnGas + interpreter.returnData = ret return ret, nil } @@ -765,6 +790,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) } scope.Contract.Gas += returnGas + interpreter.returnData = ret return ret, nil } @@ -772,21 +798,29 @@ func opReturn(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b offset, size := scope.Stack.pop(), scope.Stack.pop() ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) - return ret, nil + return ret, errStopToken } func opRevert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { offset, size := scope.Stack.pop(), scope.Stack.pop() ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64())) - return ret, nil + interpreter.returnData = ret + return ret, ErrExecutionReverted +} + +func opUndefined(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + return nil, &ErrInvalidOpCode{opcode: OpCode(scope.Contract.Code[*pc])} } func opStop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - return nil, nil + return nil, errStopToken } -func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { +func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + if interpreter.readOnly { + return nil, ErrWriteProtection + } beneficiary := scope.Stack.pop() balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) @@ -795,7 +829,7 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil) } - return nil, nil + return nil, errStopToken } // following functions are used by the instruction jump table @@ -803,6 +837,9 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] // make log instruction function func makeLog(size int) executionFunc { return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { + if interpreter.readOnly { + return nil, ErrWriteProtection + } topics := make([]common.Hash, size) stack := scope.Stack mStart, mSize := stack.pop(), stack.pop() diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 560d26a0b..e67acd832 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -525,12 +525,14 @@ func TestOpMstore(t *testing.T) { mem.Resize(64) pc := uint64(0) v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700" - stack.pushN(*new(uint256.Int).SetBytes(common.Hex2Bytes(v)), *new(uint256.Int)) + stack.push(new(uint256.Int).SetBytes(common.Hex2Bytes(v))) + stack.push(new(uint256.Int)) opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v { t.Fatalf("Mstore fail, got %v, expected %v", got, v) } - stack.pushN(*new(uint256.Int).SetUint64(0x1), *new(uint256.Int)) + stack.push(new(uint256.Int).SetUint64(0x1)) + stack.push(new(uint256.Int)) opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" { t.Fatalf("Mstore failed to overwrite previous value") @@ -553,12 +555,13 @@ func BenchmarkOpMstore(bench *testing.B) { bench.ResetTimer() for i := 0; i < bench.N; i++ { - stack.pushN(*value, *memStart) + stack.push(value) + stack.push(memStart) opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) } } -func BenchmarkOpSHA3(bench *testing.B) { +func BenchmarkOpKeccak256(bench *testing.B) { var ( env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() @@ -572,8 +575,9 @@ func BenchmarkOpSHA3(bench *testing.B) { bench.ResetTimer() for i := 0; i < bench.N; i++ { - stack.pushN(*uint256.NewInt(32), *start) - opSha3(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) + stack.push(uint256.NewInt(32)) + stack.push(start) + opKeccak256(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) } } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 4315750ba..1660e3ce0 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -18,7 +18,6 @@ package vm import ( "hash" - "sync/atomic" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" @@ -29,11 +28,10 @@ import ( type Config struct { Debug bool // Enables debugging Tracer EVMLogger // Opcode logger - NoRecursion bool // Disables call, callcode, delegate call and create NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages - JumpTable [256]*operation // EVM instruction table, automatically populated if unset + JumpTable *JumpTable // EVM instruction table, automatically populated if unset ExtraEips []int // Additional EIPS that are to be enabled } @@ -68,39 +66,37 @@ type EVMInterpreter struct { // NewEVMInterpreter returns a new instance of the Interpreter. func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { - // We use the STOP instruction whether to see - // the jump table was initialised. If it was not - // we'll set the default jump table. - if cfg.JumpTable[STOP] == nil { - var jt JumpTable + // If jump table was not initialised we set the default one. + if cfg.JumpTable == nil { switch { case evm.chainRules.IsLondon: - jt = londonInstructionSet + cfg.JumpTable = &londonInstructionSet case evm.chainRules.IsBerlin: - jt = berlinInstructionSet + cfg.JumpTable = &berlinInstructionSet case evm.chainRules.IsIstanbul: - jt = istanbulInstructionSet + cfg.JumpTable = &istanbulInstructionSet case evm.chainRules.IsConstantinople: - jt = constantinopleInstructionSet + cfg.JumpTable = &constantinopleInstructionSet case evm.chainRules.IsByzantium: - jt = byzantiumInstructionSet + cfg.JumpTable = &byzantiumInstructionSet case evm.chainRules.IsEIP158: - jt = spuriousDragonInstructionSet + cfg.JumpTable = &spuriousDragonInstructionSet case evm.chainRules.IsEIP150: - jt = tangerineWhistleInstructionSet + cfg.JumpTable = &tangerineWhistleInstructionSet case evm.chainRules.IsHomestead: - jt = homesteadInstructionSet + cfg.JumpTable = &homesteadInstructionSet default: - jt = frontierInstructionSet + cfg.JumpTable = &frontierInstructionSet } for i, eip := range cfg.ExtraEips { - if err := EnableEIP(eip, &jt); err != nil { + copy := *cfg.JumpTable + if err := EnableEIP(eip, ©); err != nil { // Disable it, so caller can check if it's activated or not cfg.ExtraEips = append(cfg.ExtraEips[:i], cfg.ExtraEips[i+1:]...) log.Error("EIP activation failed", "eip", eip, "error", err) } + cfg.JumpTable = © } - cfg.JumpTable = jt } return &EVMInterpreter{ @@ -180,101 +176,70 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( // explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during // the execution of one of the operations or until the done flag is set by the // parent context. - steps := 0 for { - steps++ - if steps%1000 == 0 && atomic.LoadInt32(&in.evm.abort) != 0 { - break - } if in.cfg.Debug { // Capture pre-execution values for tracing. logged, pcCopy, gasCopy = false, pc, contract.Gas } - // Get the operation from the jump table and validate the stack to ensure there are // enough stack items available to perform the operation. op = contract.GetOp(pc) operation := in.cfg.JumpTable[op] - if operation == nil { - return nil, &ErrInvalidOpCode{opcode: op} - } + cost = operation.constantGas // For tracing // Validate stack if sLen := stack.len(); sLen < operation.minStack { return nil, &ErrStackUnderflow{stackLen: sLen, required: operation.minStack} } else if sLen > operation.maxStack { return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack} } - // If the operation is valid, enforce write restrictions - if in.readOnly && in.evm.chainRules.IsByzantium { - // If the interpreter is operating in readonly mode, make sure no - // state-modifying operation is performed. The 3rd stack item - // for a call operation is the value. Transferring value from one - // account to the others means the state is modified and should also - // return with an error. - if operation.writes || (op == CALL && stack.Back(2).Sign() != 0) { - return nil, ErrWriteProtection - } - } - // Static portion of gas - cost = operation.constantGas // For tracing - if !contract.UseGas(operation.constantGas) { + if !contract.UseGas(cost) { return nil, ErrOutOfGas } - - var memorySize uint64 - // calculate the new memory size and expand the memory to fit - // the operation - // Memory check needs to be done prior to evaluating the dynamic gas portion, - // to detect calculation overflows - if operation.memorySize != nil { - memSize, overflow := operation.memorySize(stack) - if overflow { - return nil, ErrGasUintOverflow - } - // memory is expanded in words of 32 bytes. Gas - // is also calculated in words. - if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow { - return nil, ErrGasUintOverflow - } - } - // Dynamic portion of gas - // consume the gas and return an error if not enough gas is available. - // cost is explicitly set so that the capture state defer method can get the proper cost if operation.dynamicGas != nil { + // All ops with a dynamic memory usage also has a dynamic gas cost. + var memorySize uint64 + // calculate the new memory size and expand the memory to fit + // the operation + // Memory check needs to be done prior to evaluating the dynamic gas portion, + // to detect calculation overflows + if operation.memorySize != nil { + memSize, overflow := operation.memorySize(stack) + if overflow { + return nil, ErrGasUintOverflow + } + // memory is expanded in words of 32 bytes. Gas + // is also calculated in words. + if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow { + return nil, ErrGasUintOverflow + } + } + // Consume the gas and return an error if not enough gas is available. + // cost is explicitly set so that the capture state defer method can get the proper cost var dynamicCost uint64 dynamicCost, err = operation.dynamicGas(in.evm, contract, stack, mem, memorySize) - cost += dynamicCost // total cost, for debug tracing + cost += dynamicCost // for tracing if err != nil || !contract.UseGas(dynamicCost) { return nil, ErrOutOfGas } + if memorySize > 0 { + mem.Resize(memorySize) + } } - if memorySize > 0 { - mem.Resize(memorySize) - } - if in.cfg.Debug { in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err) logged = true } - // execute the operation res, err = operation.execute(&pc, in, callContext) - // if the operation clears the return data (e.g. it has returning data) - // set the last return to the result of the operation. - if operation.returns { - in.returnData = res - } - - switch { - case err != nil: - return nil, err - case operation.reverts: - return res, ErrExecutionReverted - case operation.halts: - return res, nil - case !operation.jumps: - pc++ + if err != nil { + break } + pc++ } - return nil, nil + + if err == errStopToken { + err = nil // clear stop token error + } + + return res, err } diff --git a/core/vm/interpreter_test.go b/core/vm/interpreter_test.go new file mode 100644 index 000000000..dfae0f2e2 --- /dev/null +++ b/core/vm/interpreter_test.go @@ -0,0 +1,77 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vm + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/params" +) + +var loopInterruptTests = []string{ + // infinite loop using JUMP: push(2) jumpdest dup1 jump + "60025b8056", + // infinite loop using JUMPI: push(1) push(4) jumpdest dup2 dup2 jumpi + "600160045b818157", +} + +func TestLoopInterrupt(t *testing.T) { + address := common.BytesToAddress([]byte("contract")) + vmctx := BlockContext{ + Transfer: func(StateDB, common.Address, common.Address, *big.Int) {}, + } + + for i, tt := range loopInterruptTests { + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb.CreateAccount(address) + statedb.SetCode(address, common.Hex2Bytes(tt)) + statedb.Finalise(true) + + evm := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, Config{}) + + errChannel := make(chan error) + timeout := make(chan bool) + + go func(evm *EVM) { + _, _, err := evm.Call(AccountRef(common.Address{}), address, nil, math.MaxUint64, new(big.Int)) + errChannel <- err + }(evm) + + go func() { + <-time.After(time.Second) + timeout <- true + }() + + evm.Cancel() + + select { + case <-timeout: + t.Errorf("test %d timed out", i) + case err := <-errChannel: + if err != nil { + t.Errorf("test %d failure: %v", i, err) + } + } + } + +} diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 329ad77cb..6dea5d81f 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -17,6 +17,8 @@ package vm import ( + "fmt" + "github.com/ethereum/go-ethereum/params" ) @@ -40,12 +42,6 @@ type operation struct { // memorySize returns the memory size required for the operation memorySize memorySizeFunc - - halts bool // indicates whether the operation should halt further execution - jumps bool // indicates whether the program counter should not increment - writes bool // determines whether this a state modifying operation - reverts bool // determines whether the operation reverts state (implicitly halts) - returns bool // determines whether the operations sets the return data content } var ( @@ -63,13 +59,31 @@ var ( // JumpTable contains the EVM opcodes supported at a given fork. type JumpTable [256]*operation +func validate(jt JumpTable) JumpTable { + for i, op := range jt { + if op == nil { + panic(fmt.Sprintf("op 0x%x is not set", i)) + } + // The interpreter has an assumption that if the memorySize function is + // set, then the dynamicGas function is also set. This is a somewhat + // arbitrary assumption, and can be removed if we need to -- but it + // allows us to avoid a condition check. As long as we have that assumption + // in there, this little sanity check prevents us from merging in a + // change which violates it. + if op.memorySize != nil && op.dynamicGas == nil { + panic(fmt.Sprintf("op %v has dynamic memory but not dynamic gas", OpCode(i).String())) + } + } + return jt +} + // newLondonInstructionSet returns the frontier, homestead, byzantium, // contantinople, istanbul, petersburg, berlin and london instructions. func newLondonInstructionSet() JumpTable { instructionSet := newBerlinInstructionSet() enable3529(&instructionSet) // EIP-3529: Reduction in refunds https://eips.ethereum.org/EIPS/eip-3529 enable3198(&instructionSet) // Base fee opcode https://eips.ethereum.org/EIPS/eip-3198 - return instructionSet + return validate(instructionSet) } // newBerlinInstructionSet returns the frontier, homestead, byzantium, @@ -77,7 +91,7 @@ func newLondonInstructionSet() JumpTable { func newBerlinInstructionSet() JumpTable { instructionSet := newIstanbulInstructionSet() enable2929(&instructionSet) // Access lists for trie accesses https://eips.ethereum.org/EIPS/eip-2929 - return instructionSet + return validate(instructionSet) } // newIstanbulInstructionSet returns the frontier, homestead, byzantium, @@ -89,7 +103,7 @@ func newIstanbulInstructionSet() JumpTable { enable1884(&instructionSet) // Reprice reader opcodes - https://eips.ethereum.org/EIPS/eip-1884 enable2200(&instructionSet) // Net metered SSTORE - https://eips.ethereum.org/EIPS/eip-2200 - return instructionSet + return validate(instructionSet) } // newConstantinopleInstructionSet returns the frontier, homestead, @@ -127,10 +141,8 @@ func newConstantinopleInstructionSet() JumpTable { minStack: minStack(4, 1), maxStack: maxStack(4, 1), memorySize: memoryCreate2, - writes: true, - returns: true, } - return instructionSet + return validate(instructionSet) } // newByzantiumInstructionSet returns the frontier, homestead and @@ -144,7 +156,6 @@ func newByzantiumInstructionSet() JumpTable { minStack: minStack(6, 1), maxStack: maxStack(6, 1), memorySize: memoryStaticCall, - returns: true, } instructionSet[RETURNDATASIZE] = &operation{ execute: opReturnDataSize, @@ -166,17 +177,15 @@ func newByzantiumInstructionSet() JumpTable { minStack: minStack(2, 0), maxStack: maxStack(2, 0), memorySize: memoryRevert, - reverts: true, - returns: true, } - return instructionSet + return validate(instructionSet) } // EIP 158 a.k.a Spurious Dragon func newSpuriousDragonInstructionSet() JumpTable { instructionSet := newTangerineWhistleInstructionSet() instructionSet[EXP].dynamicGas = gasExpEIP158 - return instructionSet + return validate(instructionSet) } @@ -190,7 +199,7 @@ func newTangerineWhistleInstructionSet() JumpTable { instructionSet[CALL].constantGas = params.CallGasEIP150 instructionSet[CALLCODE].constantGas = params.CallGasEIP150 instructionSet[DELEGATECALL].constantGas = params.CallGasEIP150 - return instructionSet + return validate(instructionSet) } // newHomesteadInstructionSet returns the frontier and homestead @@ -204,21 +213,19 @@ func newHomesteadInstructionSet() JumpTable { minStack: minStack(6, 1), maxStack: maxStack(6, 1), memorySize: memoryDelegateCall, - returns: true, } - return instructionSet + return validate(instructionSet) } // newFrontierInstructionSet returns the frontier instructions // that can be executed during the frontier phase. func newFrontierInstructionSet() JumpTable { - return JumpTable{ + tbl := JumpTable{ STOP: { execute: opStop, constantGas: 0, minStack: minStack(0, 0), maxStack: maxStack(0, 0), - halts: true, }, ADD: { execute: opAdd, @@ -352,13 +359,13 @@ func newFrontierInstructionSet() JumpTable { minStack: minStack(2, 1), maxStack: maxStack(2, 1), }, - SHA3: { - execute: opSha3, - constantGas: params.Sha3Gas, - dynamicGas: gasSha3, + KECCAK256: { + execute: opKeccak256, + constantGas: params.Keccak256Gas, + dynamicGas: gasKeccak256, minStack: minStack(2, 1), maxStack: maxStack(2, 1), - memorySize: memorySha3, + memorySize: memoryKeccak256, }, ADDRESS: { execute: opAddress, @@ -521,21 +528,18 @@ func newFrontierInstructionSet() JumpTable { dynamicGas: gasSStore, minStack: minStack(2, 0), maxStack: maxStack(2, 0), - writes: true, }, JUMP: { execute: opJump, constantGas: GasMidStep, minStack: minStack(1, 0), maxStack: maxStack(1, 0), - jumps: true, }, JUMPI: { execute: opJumpi, constantGas: GasSlowStep, minStack: minStack(2, 0), maxStack: maxStack(2, 0), - jumps: true, }, PC: { execute: opPc, @@ -951,7 +955,6 @@ func newFrontierInstructionSet() JumpTable { minStack: minStack(2, 0), maxStack: maxStack(2, 0), memorySize: memoryLog, - writes: true, }, LOG1: { execute: makeLog(1), @@ -959,7 +962,6 @@ func newFrontierInstructionSet() JumpTable { minStack: minStack(3, 0), maxStack: maxStack(3, 0), memorySize: memoryLog, - writes: true, }, LOG2: { execute: makeLog(2), @@ -967,7 +969,6 @@ func newFrontierInstructionSet() JumpTable { minStack: minStack(4, 0), maxStack: maxStack(4, 0), memorySize: memoryLog, - writes: true, }, LOG3: { execute: makeLog(3), @@ -975,7 +976,6 @@ func newFrontierInstructionSet() JumpTable { minStack: minStack(5, 0), maxStack: maxStack(5, 0), memorySize: memoryLog, - writes: true, }, LOG4: { execute: makeLog(4), @@ -983,7 +983,6 @@ func newFrontierInstructionSet() JumpTable { minStack: minStack(6, 0), maxStack: maxStack(6, 0), memorySize: memoryLog, - writes: true, }, CREATE: { execute: opCreate, @@ -992,8 +991,6 @@ func newFrontierInstructionSet() JumpTable { minStack: minStack(3, 1), maxStack: maxStack(3, 1), memorySize: memoryCreate, - writes: true, - returns: true, }, CALL: { execute: opCall, @@ -1002,7 +999,6 @@ func newFrontierInstructionSet() JumpTable { minStack: minStack(7, 1), maxStack: maxStack(7, 1), memorySize: memoryCall, - returns: true, }, CALLCODE: { execute: opCallCode, @@ -1011,7 +1007,6 @@ func newFrontierInstructionSet() JumpTable { minStack: minStack(7, 1), maxStack: maxStack(7, 1), memorySize: memoryCall, - returns: true, }, RETURN: { execute: opReturn, @@ -1019,15 +1014,21 @@ func newFrontierInstructionSet() JumpTable { minStack: minStack(2, 0), maxStack: maxStack(2, 0), memorySize: memoryReturn, - halts: true, }, SELFDESTRUCT: { - execute: opSuicide, + execute: opSelfdestruct, dynamicGas: gasSelfdestruct, minStack: minStack(1, 0), maxStack: maxStack(1, 0), - halts: true, - writes: true, }, } + + // Fill all unassigned slots with opUndefined. + for i, entry := range tbl { + if entry == nil { + tbl[i] = &operation{execute: opUndefined, maxStack: maxStack(0, 0)} + } + } + + return validate(tbl) } diff --git a/core/vm/logger.go b/core/vm/logger.go index 98ff967ee..3af5aec19 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -1,4 +1,4 @@ -// Copyright 2015 The go-ethereum Authors +// Copyright 2021 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -17,87 +17,12 @@ package vm import ( - "encoding/hex" - "fmt" - "io" "math/big" - "strings" "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/params" - "github.com/holiman/uint256" ) -// Storage represents a contract's storage. -type Storage map[common.Hash]common.Hash - -// Copy duplicates the current storage. -func (s Storage) Copy() Storage { - cpy := make(Storage) - for key, value := range s { - cpy[key] = value - } - return cpy -} - -// LogConfig are the configuration options for structured logger the EVM -type LogConfig struct { - EnableMemory bool // enable memory capture - DisableStack bool // disable stack capture - DisableStorage bool // disable storage capture - EnableReturnData bool // enable return data capture - Debug bool // print output during capture end - Limit int // maximum length of output, but zero means unlimited - // Chain overrides, can be used to execute a trace using future fork rules - Overrides *params.ChainConfig `json:"overrides,omitempty"` -} - -//go:generate gencodec -type StructLog -field-override structLogMarshaling -out gen_structlog.go - -// StructLog is emitted to the EVM each cycle and lists information about the current internal state -// prior to the execution of the statement. -type StructLog struct { - Pc uint64 `json:"pc"` - Op OpCode `json:"op"` - Gas uint64 `json:"gas"` - GasCost uint64 `json:"gasCost"` - Memory []byte `json:"memory"` - MemorySize int `json:"memSize"` - Stack []uint256.Int `json:"stack"` - ReturnData []byte `json:"returnData"` - Storage map[common.Hash]common.Hash `json:"-"` - Depth int `json:"depth"` - RefundCounter uint64 `json:"refund"` - Err error `json:"-"` -} - -// overrides for gencodec -type structLogMarshaling struct { - Gas math.HexOrDecimal64 - GasCost math.HexOrDecimal64 - Memory hexutil.Bytes - ReturnData hexutil.Bytes - OpName string `json:"opName"` // adds call to OpName() in MarshalJSON - ErrorString string `json:"error"` // adds call to ErrorString() in MarshalJSON -} - -// OpName formats the operand name in a human-readable format. -func (s *StructLog) OpName() string { - return s.Op.String() -} - -// ErrorString formats the log's error as a string. -func (s *StructLog) ErrorString() string { - if s.Err != nil { - return s.Err.Error() - } - return "" -} - // EVMLogger is used to collect execution traces from an EVM transaction // execution. CaptureState is called for each step of the VM with the // current VM state. @@ -111,250 +36,3 @@ type EVMLogger interface { CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) } - -// StructLogger is an EVM state logger and implements EVMLogger. -// -// StructLogger can capture state based on the given Log configuration and also keeps -// a track record of modified storage which is used in reporting snapshots of the -// contract their storage. -type StructLogger struct { - cfg LogConfig - env *EVM - - storage map[common.Address]Storage - logs []StructLog - output []byte - err error -} - -// NewStructLogger returns a new logger -func NewStructLogger(cfg *LogConfig) *StructLogger { - logger := &StructLogger{ - storage: make(map[common.Address]Storage), - } - if cfg != nil { - logger.cfg = *cfg - } - return logger -} - -// Reset clears the data held by the logger. -func (l *StructLogger) Reset() { - l.storage = make(map[common.Address]Storage) - l.output = make([]byte, 0) - l.logs = l.logs[:0] - l.err = nil -} - -// CaptureStart implements the EVMLogger interface to initialize the tracing operation. -func (l *StructLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { - l.env = env -} - -// CaptureState logs a new structured log message and pushes it out to the environment -// -// CaptureState also tracks SLOAD/SSTORE ops to track storage change. -func (l *StructLogger) CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { - memory := scope.Memory - stack := scope.Stack - contract := scope.Contract - // check if already accumulated the specified number of logs - if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) { - return - } - // Copy a snapshot of the current memory state to a new buffer - var mem []byte - if l.cfg.EnableMemory { - mem = make([]byte, len(memory.Data())) - copy(mem, memory.Data()) - } - // Copy a snapshot of the current stack state to a new buffer - var stck []uint256.Int - if !l.cfg.DisableStack { - stck = make([]uint256.Int, len(stack.Data())) - for i, item := range stack.Data() { - stck[i] = item - } - } - // Copy a snapshot of the current storage to a new container - var storage Storage - if !l.cfg.DisableStorage && (op == SLOAD || op == SSTORE) { - // initialise new changed values storage container for this contract - // if not present. - if l.storage[contract.Address()] == nil { - l.storage[contract.Address()] = make(Storage) - } - // capture SLOAD opcodes and record the read entry in the local storage - if op == SLOAD && stack.len() >= 1 { - var ( - address = common.Hash(stack.data[stack.len()-1].Bytes32()) - value = l.env.StateDB.GetState(contract.Address(), address) - ) - l.storage[contract.Address()][address] = value - storage = l.storage[contract.Address()].Copy() - } else if op == SSTORE && stack.len() >= 2 { - // capture SSTORE opcodes and record the written entry in the local storage. - var ( - value = common.Hash(stack.data[stack.len()-2].Bytes32()) - address = common.Hash(stack.data[stack.len()-1].Bytes32()) - ) - l.storage[contract.Address()][address] = value - storage = l.storage[contract.Address()].Copy() - } - } - var rdata []byte - if l.cfg.EnableReturnData { - rdata = make([]byte, len(rData)) - copy(rdata, rData) - } - // create a new snapshot of the EVM. - log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, rdata, storage, depth, l.env.StateDB.GetRefund(), err} - l.logs = append(l.logs, log) -} - -// CaptureFault implements the EVMLogger interface to trace an execution fault -// while running an opcode. -func (l *StructLogger) CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { -} - -// CaptureEnd is called after the call finishes to finalize the tracing. -func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { - l.output = output - l.err = err - if l.cfg.Debug { - fmt.Printf("0x%x\n", output) - if err != nil { - fmt.Printf(" error: %v\n", err) - } - } -} - -func (l *StructLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { -} - -func (l *StructLogger) CaptureExit(output []byte, gasUsed uint64, err error) {} - -// StructLogs returns the captured log entries. -func (l *StructLogger) StructLogs() []StructLog { return l.logs } - -// Error returns the VM error captured by the trace. -func (l *StructLogger) Error() error { return l.err } - -// Output returns the VM return value captured by the trace. -func (l *StructLogger) Output() []byte { return l.output } - -// WriteTrace writes a formatted trace to the given writer -func WriteTrace(writer io.Writer, logs []StructLog) { - for _, log := range logs { - fmt.Fprintf(writer, "%-16spc=%08d gas=%v cost=%v", log.Op, log.Pc, log.Gas, log.GasCost) - if log.Err != nil { - fmt.Fprintf(writer, " ERROR: %v", log.Err) - } - fmt.Fprintln(writer) - - if len(log.Stack) > 0 { - fmt.Fprintln(writer, "Stack:") - for i := len(log.Stack) - 1; i >= 0; i-- { - fmt.Fprintf(writer, "%08d %s\n", len(log.Stack)-i-1, log.Stack[i].Hex()) - } - } - if len(log.Memory) > 0 { - fmt.Fprintln(writer, "Memory:") - fmt.Fprint(writer, hex.Dump(log.Memory)) - } - if len(log.Storage) > 0 { - fmt.Fprintln(writer, "Storage:") - for h, item := range log.Storage { - fmt.Fprintf(writer, "%x: %x\n", h, item) - } - } - if len(log.ReturnData) > 0 { - fmt.Fprintln(writer, "ReturnData:") - fmt.Fprint(writer, hex.Dump(log.ReturnData)) - } - fmt.Fprintln(writer) - } -} - -// WriteLogs writes vm logs in a readable format to the given writer -func WriteLogs(writer io.Writer, logs []*types.Log) { - for _, log := range logs { - fmt.Fprintf(writer, "LOG%d: %x bn=%d txi=%x\n", len(log.Topics), log.Address, log.BlockNumber, log.TxIndex) - - for i, topic := range log.Topics { - fmt.Fprintf(writer, "%08d %x\n", i, topic) - } - - fmt.Fprint(writer, hex.Dump(log.Data)) - fmt.Fprintln(writer) - } -} - -type mdLogger struct { - out io.Writer - cfg *LogConfig - env *EVM -} - -// NewMarkdownLogger creates a logger which outputs information in a format adapted -// for human readability, and is also a valid markdown table -func NewMarkdownLogger(cfg *LogConfig, writer io.Writer) *mdLogger { - l := &mdLogger{out: writer, cfg: cfg} - if l.cfg == nil { - l.cfg = &LogConfig{} - } - return l -} - -func (t *mdLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { - t.env = env - if !create { - fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n", - from.String(), to.String(), - input, gas, value) - } else { - fmt.Fprintf(t.out, "From: `%v`\nCreate at: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n", - from.String(), to.String(), - input, gas, value) - } - - fmt.Fprintf(t.out, ` -| Pc | Op | Cost | Stack | RStack | Refund | -|-------|-------------|------|-----------|-----------|---------| -`) -} - -// CaptureState also tracks SLOAD/SSTORE ops to track storage change. -func (t *mdLogger) CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { - stack := scope.Stack - fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, op, cost) - - if !t.cfg.DisableStack { - // format stack - var a []string - for _, elem := range stack.data { - a = append(a, elem.Hex()) - } - b := fmt.Sprintf("[%v]", strings.Join(a, ",")) - fmt.Fprintf(t.out, "%10v |", b) - } - fmt.Fprintf(t.out, "%10v |", t.env.StateDB.GetRefund()) - fmt.Fprintln(t.out, "") - if err != nil { - fmt.Fprintf(t.out, "Error: %v\n", err) - } -} - -func (t *mdLogger) CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { - fmt.Fprintf(t.out, "\nError: at pc=%d, op=%v: %v\n", pc, op, err) -} - -func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, err error) { - fmt.Fprintf(t.out, "\nOutput: `0x%x`\nConsumed gas: `%d`\nError: `%v`\n", - output, gasUsed, err) -} - -func (t *mdLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { -} - -func (t *mdLogger) CaptureExit(output []byte, gasUsed uint64, err error) {} diff --git a/core/vm/memory_table.go b/core/vm/memory_table.go index 4fcb41442..e35ca84e0 100644 --- a/core/vm/memory_table.go +++ b/core/vm/memory_table.go @@ -16,7 +16,7 @@ package vm -func memorySha3(stack *Stack) (uint64, bool) { +func memoryKeccak256(stack *Stack) (uint64, bool) { return calcMemSize64(stack.Back(0), stack.Back(1)) } diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index a6c89d833..a1833e510 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -32,11 +32,6 @@ func (op OpCode) IsPush() bool { return false } -// IsStaticJump specifies if an opcode is JUMP. -func (op OpCode) IsStaticJump() bool { - return op == JUMP -} - // 0x0 range - arithmetic ops. const ( STOP OpCode = 0x0 @@ -70,7 +65,7 @@ const ( SHR OpCode = 0x1c SAR OpCode = 0x1d - SHA3 OpCode = 0x20 + KECCAK256 OpCode = 0x20 ) // 0x30 range - closure state. @@ -207,13 +202,6 @@ const ( LOG4 ) -// unofficial opcodes used for parsing. -const ( - PUSH OpCode = 0xb0 + iota - DUP - SWAP -) - // 0xf0 range - closures. const ( CREATE OpCode = 0xf0 @@ -225,6 +213,7 @@ const ( STATICCALL OpCode = 0xfa REVERT OpCode = 0xfd + INVALID OpCode = 0xfe SELFDESTRUCT OpCode = 0xff ) @@ -261,7 +250,7 @@ var opCodeToString = map[OpCode]string{ MULMOD: "MULMOD", // 0x20 range - crypto. - SHA3: "SHA3", + KECCAK256: "KECCAK256", // 0x30 range - closure state. ADDRESS: "ADDRESS", @@ -390,11 +379,8 @@ var opCodeToString = map[OpCode]string{ CREATE2: "CREATE2", STATICCALL: "STATICCALL", REVERT: "REVERT", + INVALID: "INVALID", SELFDESTRUCT: "SELFDESTRUCT", - - PUSH: "PUSH", - DUP: "DUP", - SWAP: "SWAP", } func (op OpCode) String() string { @@ -433,7 +419,7 @@ var stringToOp = map[string]OpCode{ "SAR": SAR, "ADDMOD": ADDMOD, "MULMOD": MULMOD, - "SHA3": SHA3, + "KECCAK256": KECCAK256, "ADDRESS": ADDRESS, "BALANCE": BALANCE, "ORIGIN": ORIGIN, @@ -548,6 +534,7 @@ var stringToOp = map[string]OpCode{ "RETURN": RETURN, "CALLCODE": CALLCODE, "REVERT": REVERT, + "INVALID": INVALID, "SELFDESTRUCT": SELFDESTRUCT, } diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index fea7817ff..97673b490 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/params" // force-load js tracers to trigger registration @@ -326,7 +327,7 @@ func TestBlockhash(t *testing.T) { } type stepCounter struct { - inner *vm.JSONLogger + inner *logger.JSONLogger steps int } @@ -493,7 +494,7 @@ func BenchmarkSimpleLoop(b *testing.B) { byte(vm.JUMP), } - //tracer := vm.NewJSONLogger(nil, os.Stdout) + //tracer := logger.NewJSONLogger(nil, os.Stdout) //Execute(loopingCode, nil, &Config{ // EVMConfig: vm.Config{ // Debug: true, @@ -536,7 +537,7 @@ func TestEip2929Cases(t *testing.T) { Execute(code, nil, &Config{ EVMConfig: vm.Config{ Debug: true, - Tracer: vm.NewMarkdownLogger(nil, os.Stdout), + Tracer: logger.NewMarkdownLogger(nil, os.Stdout), ExtraEips: []int{2929}, }, }) @@ -686,7 +687,7 @@ func TestColdAccountAccessCost(t *testing.T) { want: 7600, }, } { - tracer := vm.NewStructLogger(nil) + tracer := logger.NewStructLogger(nil) Execute(tc.code, nil, &Config{ EVMConfig: vm.Config{ Debug: true, diff --git a/core/vm/stack.go b/core/vm/stack.go index 220f97c89..9e7e887cc 100644 --- a/core/vm/stack.go +++ b/core/vm/stack.go @@ -54,10 +54,6 @@ func (st *Stack) push(d *uint256.Int) { // NOTE push limit (1024) is checked in baseCheck st.data = append(st.data, *d) } -func (st *Stack) pushN(ds ...uint256.Int) { - // FIXME: Is there a way to pass args by pointers. - st.data = append(st.data, ds...) -} func (st *Stack) pop() (ret uint256.Int) { ret = st.data[len(st.data)-1] diff --git a/crypto/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s b/crypto/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s index 1e2d7ff96..5a9cc3ffc 100644 --- a/crypto/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s +++ b/crypto/secp256k1/libsecp256k1/src/asm/field_10x26_arm.s @@ -11,7 +11,7 @@ Note: - To avoid unnecessary loads and make use of available registers, two 'passes' have every time been interleaved, with the odd passes accumulating c' and d' - which will be added to c and d respectively in the the even passes + which will be added to c and d respectively in the even passes */ diff --git a/eth/api_backend.go b/eth/api_backend.go index 6a19fb36a..6577ac1e1 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -353,7 +353,7 @@ func (b *EthAPIBackend) StartMining(threads int) error { } func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error) { - return b.eth.stateAtBlock(block, reexec, base, checkLive, preferDisk) + return b.eth.StateAtBlock(block, reexec, base, checkLive, preferDisk) } func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { diff --git a/eth/backend.go b/eth/backend.go index ae4e6e85d..a53982166 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/bloombits" @@ -46,6 +47,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/internal/shutdowncheck" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/node" @@ -71,6 +73,7 @@ type Ethereum struct { handler *handler ethDialCandidates enode.Iterator snapDialCandidates enode.Iterator + merger *consensus.Merger // DB interfaces chainDb ethdb.Database // Block chain database @@ -95,6 +98,8 @@ type Ethereum struct { p2pServer *p2p.Server lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase) + + shutdownTracker *shutdowncheck.ShutdownTracker // Tracks if and when the node has shutdown ungracefully } // New creates a new Ethereum object (including the @@ -131,7 +136,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier, config.OverrideTerminalTotalDifficulty) if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { return nil, genesisErr } @@ -140,8 +145,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil { log.Error("Failed to recover state", "error", err) } + merger := consensus.NewMerger(chainDb) eth := &Ethereum{ config: config, + merger: merger, chainDb: chainDb, eventMux: stack.EventMux(), accountManager: stack.AccountManager(), @@ -153,6 +160,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { bloomRequests: make(chan chan *bloombits.Retrieval), bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms), p2pServer: stack.Server(), + shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb), } bcVersion := rawdb.ReadDatabaseVersion(chainDb) @@ -215,6 +223,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { Database: chainDb, Chain: eth.blockchain, TxPool: eth.txPool, + Merger: merger, Network: config.NetworkId, Sync: config.SyncMode, BloomCache: uint64(cacheLimit), @@ -225,7 +234,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { return nil, err } - eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock) + eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock, merger) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} @@ -256,19 +265,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { stack.RegisterAPIs(eth.APIs()) stack.RegisterProtocols(eth.Protocols()) stack.RegisterLifecycle(eth) - // Check for unclean shutdown - if uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(chainDb); err != nil { - log.Error("Could not update unclean-shutdown-marker list", "error", err) - } else { - if discards > 0 { - log.Warn("Old unclean shutdowns found", "count", discards) - } - for _, tstamp := range uncleanShutdowns { - t := time.Unix(int64(tstamp), 0) - log.Warn("Unclean shutdown detected", "booted", t, - "age", common.PrettyAge(t)) - } - } + + // Successful startup; push a marker and check previous unclean shutdowns. + eth.shutdownTracker.MarkStartup() + return eth, nil } @@ -378,10 +378,10 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) { // // We regard two types of accounts as local miner account: etherbase // and accounts specified via `txpool.locals` flag. -func (s *Ethereum) isLocalBlock(block *types.Block) bool { - author, err := s.engine.Author(block.Header()) +func (s *Ethereum) isLocalBlock(header *types.Header) bool { + author, err := s.engine.Author(header) if err != nil { - log.Warn("Failed to retrieve block author", "number", block.NumberU64(), "hash", block.Hash(), "err", err) + log.Warn("Failed to retrieve block author", "number", header.Number.Uint64(), "hash", header.Hash(), "err", err) return false } // Check whether the given address is etherbase. @@ -404,7 +404,7 @@ func (s *Ethereum) isLocalBlock(block *types.Block) bool { // shouldPreserve checks whether we should preserve the given block // during the chain reorg depending on whether the author of block // is a local account. -func (s *Ethereum) shouldPreserve(block *types.Block) bool { +func (s *Ethereum) shouldPreserve(header *types.Header) bool { // The reason we need to disable the self-reorg preserving for clique // is it can be probable to introduce a deadlock. // @@ -424,7 +424,7 @@ func (s *Ethereum) shouldPreserve(block *types.Block) bool { if _, ok := s.engine.(*clique.Clique); ok { return false } - return s.isLocalBlock(block) + return s.isLocalBlock(header) } // SetEtherbase sets the mining reward address. @@ -465,13 +465,21 @@ func (s *Ethereum) StartMining(threads int) error { log.Error("Cannot start mining without etherbase", "err", err) return fmt.Errorf("etherbase missing: %v", err) } - if clique, ok := s.engine.(*clique.Clique); ok { + var cli *clique.Clique + if c, ok := s.engine.(*clique.Clique); ok { + cli = c + } else if cl, ok := s.engine.(*beacon.Beacon); ok { + if c, ok := cl.InnerEngine().(*clique.Clique); ok { + cli = c + } + } + if cli != nil { wallet, err := s.accountManager.Find(accounts.Account{Address: eb}) if wallet == nil || err != nil { log.Error("Etherbase account unavailable locally", "err", err) return fmt.Errorf("signer missing: %v", err) } - clique.Authorize(eb, wallet.SignData) + cli.Authorize(eb, wallet.SignData) } // If mining is started, we can disable the transaction rejection mechanism // introduced to speed sync times. @@ -508,8 +516,14 @@ func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb } func (s *Ethereum) IsListening() bool { return true } // Always listening func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader } func (s *Ethereum) Synced() bool { return atomic.LoadUint32(&s.handler.acceptTxs) == 1 } +func (s *Ethereum) SetSynced() { atomic.StoreUint32(&s.handler.acceptTxs, 1) } func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning } func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer } +func (s *Ethereum) Merger() *consensus.Merger { return s.merger } +func (s *Ethereum) SyncMode() downloader.SyncMode { + mode, _ := s.handler.chainSync.modeAndLocalHead() + return mode +} // Protocols returns all the currently configured // network protocols to start. @@ -529,6 +543,9 @@ func (s *Ethereum) Start() error { // Start the bloom bits servicing goroutines s.startBloomHandlers(params.BloomBitsBlocks) + // Regularly update shutdown marker + s.shutdownTracker.Start() + // Figure out a max peers count based on the server limits maxPeers := s.p2pServer.MaxPeers if s.config.LightServ > 0 { @@ -557,7 +574,10 @@ func (s *Ethereum) Stop() error { s.miner.Close() s.blockchain.Stop() s.engine.Close() - rawdb.PopUncleanShutdownMarker(s.chainDb) + + // Clean shutdown marker as the last thing before closing db + s.shutdownTracker.Stop() + s.chainDb.Close() s.eventMux.Stop() diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 3913da757..3c0b6d9e4 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -18,17 +18,23 @@ package catalyst import ( + "crypto/sha256" + "encoding/binary" "errors" "fmt" "math/big" "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/misc" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" chainParams "github.com/ethereum/go-ethereum/params" @@ -36,31 +42,81 @@ import ( "github.com/ethereum/go-ethereum/trie" ) -// Register adds catalyst APIs to the node. -func Register(stack *node.Node, backend *eth.Ethereum) error { - chainconfig := backend.BlockChain().Config() - if chainconfig.TerminalTotalDifficulty == nil { - return errors.New("catalyst started without valid total difficulty") - } +var ( + VALID = GenericStringResponse{"VALID"} + SUCCESS = GenericStringResponse{"SUCCESS"} + INVALID = ForkChoiceResponse{Status: "INVALID", PayloadID: nil} + SYNCING = ForkChoiceResponse{Status: "SYNCING", PayloadID: nil} + GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"} + UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"} + InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"} + InvalidPayloadID = rpc.CustomError{Code: 1, ValidationError: "invalid payload id"} +) - log.Warn("Catalyst mode enabled") +// Register adds catalyst APIs to the full node. +func Register(stack *node.Node, backend *eth.Ethereum) error { + log.Warn("Catalyst mode enabled", "protocol", "eth") stack.RegisterAPIs([]rpc.API{ { - Namespace: "consensus", + Namespace: "engine", Version: "1.0", - Service: newConsensusAPI(backend), + Service: NewConsensusAPI(backend, nil), Public: true, }, }) return nil } -type consensusAPI struct { - eth *eth.Ethereum +// RegisterLight adds catalyst APIs to the light client. +func RegisterLight(stack *node.Node, backend *les.LightEthereum) error { + log.Warn("Catalyst mode enabled", "protocol", "les") + stack.RegisterAPIs([]rpc.API{ + { + Namespace: "engine", + Version: "1.0", + Service: NewConsensusAPI(nil, backend), + Public: true, + }, + }) + return nil } -func newConsensusAPI(eth *eth.Ethereum) *consensusAPI { - return &consensusAPI{eth: eth} +type ConsensusAPI struct { + light bool + eth *eth.Ethereum + les *les.LightEthereum + engine consensus.Engine // engine is the post-merge consensus engine, only for block creation + preparedBlocks map[uint64]*ExecutableDataV1 +} + +func NewConsensusAPI(eth *eth.Ethereum, les *les.LightEthereum) *ConsensusAPI { + var engine consensus.Engine + if eth == nil { + if les.BlockChain().Config().TerminalTotalDifficulty == nil { + panic("Catalyst started without valid total difficulty") + } + if b, ok := les.Engine().(*beacon.Beacon); ok { + engine = beacon.New(b.InnerEngine()) + } else { + engine = beacon.New(les.Engine()) + } + } else { + if eth.BlockChain().Config().TerminalTotalDifficulty == nil { + panic("Catalyst started without valid total difficulty") + } + if b, ok := eth.Engine().(*beacon.Beacon); ok { + engine = beacon.New(b.InnerEngine()) + } else { + engine = beacon.New(eth.Engine()) + } + } + return &ConsensusAPI{ + light: eth == nil, + eth: eth, + les: les, + engine: engine, + preparedBlocks: make(map[uint64]*ExecutableDataV1), + } } // blockExecutionEnv gathers all the data required to execute @@ -89,8 +145,24 @@ func (env *blockExecutionEnv) commitTransaction(tx *types.Transaction, coinbase return nil } -func (api *consensusAPI) makeEnv(parent *types.Block, header *types.Header) (*blockExecutionEnv, error) { - state, err := api.eth.BlockChain().StateAt(parent.Root()) +func (api *ConsensusAPI) makeEnv(parent *types.Block, header *types.Header) (*blockExecutionEnv, error) { + // The parent state might be missing. It can be the special scenario + // that consensus layer tries to build a new block based on the very + // old side chain block and the relevant state is already pruned. So + // try to retrieve the live state from the chain, if it's not existent, + // do the necessary recovery work. + var ( + err error + state *state.StateDB + ) + if api.eth.BlockChain().HasState(parent.Root()) { + state, err = api.eth.BlockChain().StateAt(parent.Root()) + } else { + // The maximum acceptable reorg depth can be limited by the + // finalised block somehow. TODO(rjl493456442) fix the hard- + // coded number here later. + state, err = api.eth.StateAtBlock(parent, 1000, nil, false, false) + } if err != nil { return nil, err } @@ -103,57 +175,160 @@ func (api *consensusAPI) makeEnv(parent *types.Block, header *types.Header) (*bl return env, nil } +func (api *ConsensusAPI) GetPayloadV1(payloadID hexutil.Bytes) (*ExecutableDataV1, error) { + hash := []byte(payloadID) + if len(hash) < 8 { + return nil, &InvalidPayloadID + } + id := binary.BigEndian.Uint64(hash[:8]) + data, ok := api.preparedBlocks[id] + if !ok { + return nil, &UnknownPayload + } + return data, nil +} + +func (api *ConsensusAPI) ForkchoiceUpdatedV1(heads ForkchoiceStateV1, PayloadAttributes *PayloadAttributesV1) (ForkChoiceResponse, error) { + if heads.HeadBlockHash == (common.Hash{}) { + return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: nil}, nil + } + if err := api.checkTerminalTotalDifficulty(heads.HeadBlockHash); err != nil { + if block := api.eth.BlockChain().GetBlockByHash(heads.HeadBlockHash); block == nil { + // TODO (MariusVanDerWijden) trigger sync + return SYNCING, nil + } + return INVALID, err + } + // If the finalized block is set, check if it is in our blockchain + if heads.FinalizedBlockHash != (common.Hash{}) { + if block := api.eth.BlockChain().GetBlockByHash(heads.FinalizedBlockHash); block == nil { + // TODO (MariusVanDerWijden) trigger sync + return SYNCING, nil + } + } + // SetHead + if err := api.setHead(heads.HeadBlockHash); err != nil { + return INVALID, err + } + // Assemble block (if needed) + if PayloadAttributes != nil { + data, err := api.assembleBlock(heads.HeadBlockHash, PayloadAttributes) + if err != nil { + return INVALID, err + } + hash := computePayloadId(heads.HeadBlockHash, PayloadAttributes) + id := binary.BigEndian.Uint64(hash) + api.preparedBlocks[id] = data + log.Info("Created payload", "payloadid", id) + // TODO (MariusVanDerWijden) do something with the payloadID? + hex := hexutil.Bytes(hash) + return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: &hex}, nil + } + return ForkChoiceResponse{Status: SUCCESS.Status, PayloadID: nil}, nil +} + +func computePayloadId(headBlockHash common.Hash, params *PayloadAttributesV1) []byte { + // Hash + hasher := sha256.New() + hasher.Write(headBlockHash[:]) + binary.Write(hasher, binary.BigEndian, params.Timestamp) + hasher.Write(params.Random[:]) + hasher.Write(params.SuggestedFeeRecipient[:]) + return hasher.Sum([]byte{})[:8] +} + +func (api *ConsensusAPI) invalid() ExecutePayloadResponse { + if api.light { + return ExecutePayloadResponse{Status: INVALID.Status, LatestValidHash: api.les.BlockChain().CurrentHeader().Hash()} + } + return ExecutePayloadResponse{Status: INVALID.Status, LatestValidHash: api.eth.BlockChain().CurrentHeader().Hash()} +} + +// ExecutePayload creates an Eth1 block, inserts it in the chain, and returns the status of the chain. +func (api *ConsensusAPI) ExecutePayloadV1(params ExecutableDataV1) (ExecutePayloadResponse, error) { + block, err := ExecutableDataToBlock(params) + if err != nil { + return api.invalid(), err + } + if api.light { + parent := api.les.BlockChain().GetHeaderByHash(params.ParentHash) + if parent == nil { + return api.invalid(), fmt.Errorf("could not find parent %x", params.ParentHash) + } + if err = api.les.BlockChain().InsertHeader(block.Header()); err != nil { + return api.invalid(), err + } + return ExecutePayloadResponse{Status: VALID.Status, LatestValidHash: block.Hash()}, nil + } + if !api.eth.BlockChain().HasBlock(block.ParentHash(), block.NumberU64()-1) { + /* + TODO (MariusVanDerWijden) reenable once sync is merged + if err := api.eth.Downloader().BeaconSync(api.eth.SyncMode(), block.Header()); err != nil { + return SYNCING, err + } + */ + // TODO (MariusVanDerWijden) we should return nil here not empty hash + return ExecutePayloadResponse{Status: SYNCING.Status, LatestValidHash: common.Hash{}}, nil + } + parent := api.eth.BlockChain().GetBlockByHash(params.ParentHash) + td := api.eth.BlockChain().GetTd(parent.Hash(), block.NumberU64()-1) + ttd := api.eth.BlockChain().Config().TerminalTotalDifficulty + if td.Cmp(ttd) < 0 { + return api.invalid(), fmt.Errorf("can not execute payload on top of block with low td got: %v threshold %v", td, ttd) + } + if err := api.eth.BlockChain().InsertBlockWithoutSetHead(block); err != nil { + return api.invalid(), err + } + + if merger := api.merger(); !merger.TDDReached() { + merger.ReachTTD() + } + return ExecutePayloadResponse{Status: VALID.Status, LatestValidHash: block.Hash()}, nil +} + // AssembleBlock creates a new block, inserts it into the chain, and returns the "execution // data" required for eth2 clients to process the new block. -func (api *consensusAPI) AssembleBlock(params assembleBlockParams) (*executableData, error) { - log.Info("Producing block", "parentHash", params.ParentHash) +func (api *ConsensusAPI) assembleBlock(parentHash common.Hash, params *PayloadAttributesV1) (*ExecutableDataV1, error) { + if api.light { + return nil, errors.New("not supported") + } + log.Info("Producing block", "parentHash", parentHash) bc := api.eth.BlockChain() - parent := bc.GetBlockByHash(params.ParentHash) + parent := bc.GetBlockByHash(parentHash) if parent == nil { - log.Warn("Cannot assemble block with parent hash to unknown block", "parentHash", params.ParentHash) - return nil, fmt.Errorf("cannot assemble block with unknown parent %s", params.ParentHash) + log.Warn("Cannot assemble block with parent hash to unknown block", "parentHash", parentHash) + return nil, fmt.Errorf("cannot assemble block with unknown parent %s", parentHash) } - pool := api.eth.TxPool() - - if parent.Time() >= params.Timestamp { - return nil, fmt.Errorf("child timestamp lower than parent's: %d >= %d", parent.Time(), params.Timestamp) + if params.Timestamp < parent.Time() { + return nil, fmt.Errorf("child timestamp lower than parent's: %d < %d", params.Timestamp, parent.Time()) } if now := uint64(time.Now().Unix()); params.Timestamp > now+1 { - wait := time.Duration(params.Timestamp-now) * time.Second - log.Info("Producing block too far in the future", "wait", common.PrettyDuration(wait)) - time.Sleep(wait) - } - - pending := pool.Pending(true) - - coinbase, err := api.eth.Etherbase() - if err != nil { - return nil, err + diff := time.Duration(params.Timestamp-now) * time.Second + log.Warn("Producing block too far in the future", "diff", common.PrettyDuration(diff)) } + pending := api.eth.TxPool().Pending(true) + coinbase := params.SuggestedFeeRecipient num := parent.Number() header := &types.Header{ ParentHash: parent.Hash(), Number: num.Add(num, common.Big1), Coinbase: coinbase, GasLimit: parent.GasLimit(), // Keep the gas limit constant in this prototype - Extra: []byte{}, + Extra: []byte{}, // TODO (MariusVanDerWijden) properly set extra data Time: params.Timestamp, } if config := api.eth.BlockChain().Config(); config.IsLondon(header.Number) { header.BaseFee = misc.CalcBaseFee(config, parent.Header()) } - err = api.eth.Engine().Prepare(bc, header) - if err != nil { + if err := api.engine.Prepare(bc, header); err != nil { return nil, err } - env, err := api.makeEnv(parent, header) if err != nil { return nil, err } - var ( signer = types.MakeSigner(bc.Config(), header.Number) txHeap = types.NewTransactionsByPriceAndNonce(signer, pending, nil) @@ -204,25 +379,12 @@ func (api *consensusAPI) AssembleBlock(params assembleBlockParams) (*executableD txHeap.Shift() } } - // Create the block. - block, err := api.eth.Engine().FinalizeAndAssemble(bc, header, env.state, transactions, nil /* uncles */, env.receipts) + block, err := api.engine.FinalizeAndAssemble(bc, header, env.state, transactions, nil /* uncles */, env.receipts) if err != nil { return nil, err } - return &executableData{ - BlockHash: block.Hash(), - ParentHash: block.ParentHash(), - Miner: block.Coinbase(), - StateRoot: block.Root(), - Number: block.NumberU64(), - GasLimit: block.GasLimit(), - GasUsed: block.GasUsed(), - Timestamp: block.Time(), - ReceiptRoot: block.ReceiptHash(), - LogsBloom: block.Bloom().Bytes(), - Transactions: encodeTransactions(block.Transactions()), - }, nil + return BlockToExecutableData(block, params.Random), nil } func encodeTransactions(txs []*types.Transaction) [][]byte { @@ -245,66 +407,130 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) { return txs, nil } -func insertBlockParamsToBlock(config *chainParams.ChainConfig, parent *types.Header, params executableData) (*types.Block, error) { +func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) { txs, err := decodeTransactions(params.Transactions) if err != nil { return nil, err } - + if len(params.ExtraData) > 32 { + return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData)) + } number := big.NewInt(0) number.SetUint64(params.Number) header := &types.Header{ ParentHash: params.ParentHash, UncleHash: types.EmptyUncleHash, - Coinbase: params.Miner, + Coinbase: params.FeeRecipient, Root: params.StateRoot, TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)), - ReceiptHash: params.ReceiptRoot, + ReceiptHash: params.ReceiptsRoot, Bloom: types.BytesToBloom(params.LogsBloom), - Difficulty: big.NewInt(1), + Difficulty: common.Big0, Number: number, GasLimit: params.GasLimit, GasUsed: params.GasUsed, Time: params.Timestamp, - } - if config.IsLondon(number) { - header.BaseFee = misc.CalcBaseFee(config, parent) + BaseFee: params.BaseFeePerGas, + Extra: params.ExtraData, + // TODO (MariusVanDerWijden) add params.Random to header once required } block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */) + if block.Hash() != params.BlockHash { + return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash()) + } return block, nil } -// NewBlock creates an Eth1 block, inserts it in the chain, and either returns true, -// or false + an error. This is a bit redundant for go, but simplifies things on the -// eth2 side. -func (api *consensusAPI) NewBlock(params executableData) (*newBlockResponse, error) { - parent := api.eth.BlockChain().GetBlockByHash(params.ParentHash) - if parent == nil { - return &newBlockResponse{false}, fmt.Errorf("could not find parent %x", params.ParentHash) +func BlockToExecutableData(block *types.Block, random common.Hash) *ExecutableDataV1 { + return &ExecutableDataV1{ + BlockHash: block.Hash(), + ParentHash: block.ParentHash(), + FeeRecipient: block.Coinbase(), + StateRoot: block.Root(), + Number: block.NumberU64(), + GasLimit: block.GasLimit(), + GasUsed: block.GasUsed(), + BaseFeePerGas: block.BaseFee(), + Timestamp: block.Time(), + ReceiptsRoot: block.ReceiptHash(), + LogsBloom: block.Bloom().Bytes(), + Transactions: encodeTransactions(block.Transactions()), + Random: random, + ExtraData: block.Extra(), } - block, err := insertBlockParamsToBlock(api.eth.BlockChain().Config(), parent.Header(), params) - if err != nil { - return nil, err - } - _, err = api.eth.BlockChain().InsertChainWithoutSealVerification(block) - return &newBlockResponse{err == nil}, err } // Used in tests to add a the list of transactions from a block to the tx pool. -func (api *consensusAPI) addBlockTxs(block *types.Block) error { - for _, tx := range block.Transactions() { +func (api *ConsensusAPI) insertTransactions(txs types.Transactions) error { + for _, tx := range txs { api.eth.TxPool().AddLocal(tx) } return nil } -// FinalizeBlock is called to mark a block as synchronized, so -// that data that is no longer needed can be removed. -func (api *consensusAPI) FinalizeBlock(blockHash common.Hash) (*genericResponse, error) { - return &genericResponse{true}, nil +func (api *ConsensusAPI) checkTerminalTotalDifficulty(head common.Hash) error { + // shortcut if we entered PoS already + if api.merger().PoSFinalized() { + return nil + } + // make sure the parent has enough terminal total difficulty + newHeadBlock := api.eth.BlockChain().GetBlockByHash(head) + if newHeadBlock == nil { + return &GenericServerError + } + td := api.eth.BlockChain().GetTd(newHeadBlock.Hash(), newHeadBlock.NumberU64()) + if td != nil && td.Cmp(api.eth.BlockChain().Config().TerminalTotalDifficulty) < 0 { + return &InvalidTB + } + return nil } -// SetHead is called to perform a force choice. -func (api *consensusAPI) SetHead(newHead common.Hash) (*genericResponse, error) { - return &genericResponse{true}, nil +// setHead is called to perform a force choice. +func (api *ConsensusAPI) setHead(newHead common.Hash) error { + log.Info("Setting head", "head", newHead) + if api.light { + headHeader := api.les.BlockChain().CurrentHeader() + if headHeader.Hash() == newHead { + return nil + } + newHeadHeader := api.les.BlockChain().GetHeaderByHash(newHead) + if newHeadHeader == nil { + return &GenericServerError + } + if err := api.les.BlockChain().SetChainHead(newHeadHeader); err != nil { + return err + } + // Trigger the transition if it's the first `NewHead` event. + merger := api.merger() + if !merger.PoSFinalized() { + merger.FinalizePoS() + } + return nil + } + headBlock := api.eth.BlockChain().CurrentBlock() + if headBlock.Hash() == newHead { + return nil + } + newHeadBlock := api.eth.BlockChain().GetBlockByHash(newHead) + if newHeadBlock == nil { + return &GenericServerError + } + if err := api.eth.BlockChain().SetChainHead(newHeadBlock); err != nil { + return err + } + // Trigger the transition if it's the first `NewHead` event. + if merger := api.merger(); !merger.PoSFinalized() { + merger.FinalizePoS() + } + // TODO (MariusVanDerWijden) are we really synced now? + api.eth.SetSynced() + return nil +} + +// Helper function, return the merger instance. +func (api *ConsensusAPI) merger() *consensus.Merger { + if api.light { + return api.les.Merger() + } + return api.eth.Merger() } diff --git a/eth/catalyst/api_test.go b/eth/catalyst/api_test.go index 110420863..6e52c4fea 100644 --- a/eth/catalyst/api_test.go +++ b/eth/catalyst/api_test.go @@ -19,7 +19,10 @@ package catalyst import ( "math/big" "testing" + "time" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -38,10 +41,10 @@ var ( // testAddr is the Ethereum address of the tester account. testAddr = crypto.PubkeyToAddress(testKey.PublicKey) - testBalance = big.NewInt(2e15) + testBalance = big.NewInt(2e18) ) -func generateTestChain() (*core.Genesis, []*types.Block) { +func generatePreMergeChain(n int) (*core.Genesis, []*types.Block) { db := rawdb.NewMemoryDatabase() config := params.AllEthashProtocolChanges genesis := &core.Genesis{ @@ -51,177 +54,280 @@ func generateTestChain() (*core.Genesis, []*types.Block) { Timestamp: 9000, BaseFee: big.NewInt(params.InitialBaseFee), } + testNonce := uint64(0) generate := func(i int, g *core.BlockGen) { g.OffsetTime(5) g.SetExtra([]byte("test")) - } - gblock := genesis.ToBlock(db) - engine := ethash.NewFaker() - blocks, _ := core.GenerateChain(config, gblock, engine, db, 10, generate) - blocks = append([]*types.Block{gblock}, blocks...) - return genesis, blocks -} - -// TODO (MariusVanDerWijden) reenable once engine api is updated to the latest spec -/* -func generateTestChainWithFork(n int, fork int) (*core.Genesis, []*types.Block, []*types.Block) { - if fork >= n { - fork = n - 1 - } - db := rawdb.NewMemoryDatabase() - config := ¶ms.ChainConfig{ - ChainID: big.NewInt(1337), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - TerminalTotalDifficulty: big.NewInt(0), - Ethash: new(params.EthashConfig), - } - genesis := &core.Genesis{ - Config: config, - Alloc: core.GenesisAlloc{testAddr: {Balance: testBalance}}, - ExtraData: []byte("test genesis"), - Timestamp: 9000, - BaseFee: big.NewInt(params.InitialBaseFee), - } - generate := func(i int, g *core.BlockGen) { - g.OffsetTime(5) - g.SetExtra([]byte("test")) - } - generateFork := func(i int, g *core.BlockGen) { - g.OffsetTime(5) - g.SetExtra([]byte("testF")) + tx, _ := types.SignTx(types.NewTransaction(testNonce, common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a"), big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil), types.LatestSigner(config), testKey) + g.AddTx(tx) + testNonce++ } gblock := genesis.ToBlock(db) engine := ethash.NewFaker() blocks, _ := core.GenerateChain(config, gblock, engine, db, n, generate) - blocks = append([]*types.Block{gblock}, blocks...) - forkedBlocks, _ := core.GenerateChain(config, blocks[fork], engine, db, n-fork, generateFork) - return genesis, blocks, forkedBlocks + totalDifficulty := big.NewInt(0) + for _, b := range blocks { + totalDifficulty.Add(totalDifficulty, b.Difficulty()) + } + config.TerminalTotalDifficulty = totalDifficulty + return genesis, blocks } -*/ func TestEth2AssembleBlock(t *testing.T) { - genesis, blocks := generateTestChain() - n, ethservice := startEthService(t, genesis, blocks[1:9]) + genesis, blocks := generatePreMergeChain(10) + n, ethservice := startEthService(t, genesis, blocks) defer n.Close() - api := newConsensusAPI(ethservice) + api := NewConsensusAPI(ethservice, nil) signer := types.NewEIP155Signer(ethservice.BlockChain().Config().ChainID) - tx, err := types.SignTx(types.NewTransaction(0, blocks[8].Coinbase(), big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, testKey) + tx, err := types.SignTx(types.NewTransaction(uint64(10), blocks[9].Coinbase(), big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, testKey) if err != nil { t.Fatalf("error signing transaction, err=%v", err) } ethservice.TxPool().AddLocal(tx) - blockParams := assembleBlockParams{ - ParentHash: blocks[8].ParentHash(), - Timestamp: blocks[8].Time(), + blockParams := PayloadAttributesV1{ + Timestamp: blocks[9].Time() + 5, } - execData, err := api.AssembleBlock(blockParams) - + execData, err := api.assembleBlock(blocks[9].Hash(), &blockParams) if err != nil { t.Fatalf("error producing block, err=%v", err) } - if len(execData.Transactions) != 1 { t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions)) } } func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) { - genesis, blocks := generateTestChain() - n, ethservice := startEthService(t, genesis, blocks[1:9]) + genesis, blocks := generatePreMergeChain(10) + n, ethservice := startEthService(t, genesis, blocks[:9]) defer n.Close() - api := newConsensusAPI(ethservice) + api := NewConsensusAPI(ethservice, nil) // Put the 10th block's tx in the pool and produce a new block - api.addBlockTxs(blocks[9]) - blockParams := assembleBlockParams{ - ParentHash: blocks[9].ParentHash(), - Timestamp: blocks[9].Time(), + api.insertTransactions(blocks[9].Transactions()) + blockParams := PayloadAttributesV1{ + Timestamp: blocks[8].Time() + 5, } - execData, err := api.AssembleBlock(blockParams) + execData, err := api.assembleBlock(blocks[8].Hash(), &blockParams) if err != nil { t.Fatalf("error producing block, err=%v", err) } - if len(execData.Transactions) != blocks[9].Transactions().Len() { t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions)) } } -// TODO (MariusVanDerWijden) reenable once engine api is updated to the latest spec -/* -func TestEth2NewBlock(t *testing.T) { - genesis, blocks, forkedBlocks := generateTestChainWithFork(10, 4) - n, ethservice := startEthService(t, genesis, blocks[1:5]) +func TestSetHeadBeforeTotalDifficulty(t *testing.T) { + genesis, blocks := generatePreMergeChain(10) + n, ethservice := startEthService(t, genesis, blocks) defer n.Close() - api := newConsensusAPI(ethservice) - for i := 5; i < 10; i++ { - p := executableData{ - ParentHash: ethservice.BlockChain().CurrentBlock().Hash(), - Miner: blocks[i].Coinbase(), - StateRoot: blocks[i].Root(), - GasLimit: blocks[i].GasLimit(), - GasUsed: blocks[i].GasUsed(), - Transactions: encodeTransactions(blocks[i].Transactions()), - ReceiptRoot: blocks[i].ReceiptHash(), - LogsBloom: blocks[i].Bloom().Bytes(), - BlockHash: blocks[i].Hash(), - Timestamp: blocks[i].Time(), - Number: uint64(i), - } - success, err := api.NewBlock(p) - if err != nil || !success.Valid { - t.Fatalf("Failed to insert block: %v", err) - } + api := NewConsensusAPI(ethservice, nil) + fcState := ForkchoiceStateV1{ + HeadBlockHash: blocks[5].Hash(), + SafeBlockHash: common.Hash{}, + FinalizedBlockHash: common.Hash{}, } - - exp := ethservice.BlockChain().CurrentBlock().Hash() - - // Introduce the fork point. - lastBlockNum := blocks[4].Number() - lastBlock := blocks[4] - for i := 0; i < 4; i++ { - lastBlockNum.Add(lastBlockNum, big.NewInt(1)) - p := executableData{ - ParentHash: lastBlock.Hash(), - Miner: forkedBlocks[i].Coinbase(), - StateRoot: forkedBlocks[i].Root(), - Number: lastBlockNum.Uint64(), - GasLimit: forkedBlocks[i].GasLimit(), - GasUsed: forkedBlocks[i].GasUsed(), - Transactions: encodeTransactions(blocks[i].Transactions()), - ReceiptRoot: forkedBlocks[i].ReceiptHash(), - LogsBloom: forkedBlocks[i].Bloom().Bytes(), - BlockHash: forkedBlocks[i].Hash(), - Timestamp: forkedBlocks[i].Time(), - } - success, err := api.NewBlock(p) - if err != nil || !success.Valid { - t.Fatalf("Failed to insert forked block #%d: %v", i, err) - } - lastBlock, err = insertBlockParamsToBlock(ethservice.BlockChain().Config(), lastBlock.Header(), p) - if err != nil { - t.Fatal(err) - } - } - - if ethservice.BlockChain().CurrentBlock().Hash() != exp { - t.Fatalf("Wrong head after inserting fork %x != %x", exp, ethservice.BlockChain().CurrentBlock().Hash()) + if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err == nil { + t.Errorf("fork choice updated before total terminal difficulty should fail") } } -*/ + +func TestEth2PrepareAndGetPayload(t *testing.T) { + genesis, blocks := generatePreMergeChain(10) + // We need to properly set the terminal total difficulty + genesis.Config.TerminalTotalDifficulty.Sub(genesis.Config.TerminalTotalDifficulty, blocks[9].Difficulty()) + n, ethservice := startEthService(t, genesis, blocks[:9]) + defer n.Close() + + api := NewConsensusAPI(ethservice, nil) + + // Put the 10th block's tx in the pool and produce a new block + api.insertTransactions(blocks[9].Transactions()) + blockParams := PayloadAttributesV1{ + Timestamp: blocks[8].Time() + 5, + } + fcState := ForkchoiceStateV1{ + HeadBlockHash: blocks[8].Hash(), + SafeBlockHash: common.Hash{}, + FinalizedBlockHash: common.Hash{}, + } + _, err := api.ForkchoiceUpdatedV1(fcState, &blockParams) + if err != nil { + t.Fatalf("error preparing payload, err=%v", err) + } + payloadID := computePayloadId(fcState.HeadBlockHash, &blockParams) + execData, err := api.GetPayloadV1(hexutil.Bytes(payloadID)) + if err != nil { + t.Fatalf("error getting payload, err=%v", err) + } + if len(execData.Transactions) != blocks[9].Transactions().Len() { + t.Fatalf("invalid number of transactions %d != 1", len(execData.Transactions)) + } +} + +func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan core.RemovedLogsEvent, wantNew, wantRemoved int) { + t.Helper() + + if len(logsCh) != wantNew { + t.Fatalf("wrong number of log events: got %d, want %d", len(logsCh), wantNew) + } + if len(rmLogsCh) != wantRemoved { + t.Fatalf("wrong number of removed log events: got %d, want %d", len(rmLogsCh), wantRemoved) + } + // Drain events. + for i := 0; i < len(logsCh); i++ { + <-logsCh + } + for i := 0; i < len(rmLogsCh); i++ { + <-rmLogsCh + } +} + +func TestEth2NewBlock(t *testing.T) { + genesis, preMergeBlocks := generatePreMergeChain(10) + n, ethservice := startEthService(t, genesis, preMergeBlocks) + ethservice.Merger().ReachTTD() + defer n.Close() + + var ( + api = NewConsensusAPI(ethservice, nil) + parent = preMergeBlocks[len(preMergeBlocks)-1] + + // This EVM code generates a log when the contract is created. + logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") + ) + // The event channels. + newLogCh := make(chan []*types.Log, 10) + rmLogsCh := make(chan core.RemovedLogsEvent, 10) + ethservice.BlockChain().SubscribeLogsEvent(newLogCh) + ethservice.BlockChain().SubscribeRemovedLogsEvent(rmLogsCh) + + for i := 0; i < 10; i++ { + statedb, _ := ethservice.BlockChain().StateAt(parent.Root()) + nonce := statedb.GetNonce(testAddr) + tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) + ethservice.TxPool().AddLocal(tx) + + execData, err := api.assembleBlock(parent.Hash(), &PayloadAttributesV1{ + Timestamp: parent.Time() + 5, + }) + if err != nil { + t.Fatalf("Failed to create the executable data %v", err) + } + block, err := ExecutableDataToBlock(*execData) + if err != nil { + t.Fatalf("Failed to convert executable data to block %v", err) + } + newResp, err := api.ExecutePayloadV1(*execData) + if err != nil || newResp.Status != "VALID" { + t.Fatalf("Failed to insert block: %v", err) + } + if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64()-1 { + t.Fatalf("Chain head shouldn't be updated") + } + checkLogEvents(t, newLogCh, rmLogsCh, 0, 0) + fcState := ForkchoiceStateV1{ + HeadBlockHash: block.Hash(), + SafeBlockHash: block.Hash(), + FinalizedBlockHash: block.Hash(), + } + if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { + t.Fatalf("Failed to insert block: %v", err) + } + if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64() { + t.Fatalf("Chain head should be updated") + } + checkLogEvents(t, newLogCh, rmLogsCh, 1, 0) + + parent = block + } + + // Introduce fork chain + var ( + head = ethservice.BlockChain().CurrentBlock().NumberU64() + ) + parent = preMergeBlocks[len(preMergeBlocks)-1] + for i := 0; i < 10; i++ { + execData, err := api.assembleBlock(parent.Hash(), &PayloadAttributesV1{ + Timestamp: parent.Time() + 6, + }) + if err != nil { + t.Fatalf("Failed to create the executable data %v", err) + } + block, err := ExecutableDataToBlock(*execData) + if err != nil { + t.Fatalf("Failed to convert executable data to block %v", err) + } + newResp, err := api.ExecutePayloadV1(*execData) + if err != nil || newResp.Status != "VALID" { + t.Fatalf("Failed to insert block: %v", err) + } + if ethservice.BlockChain().CurrentBlock().NumberU64() != head { + t.Fatalf("Chain head shouldn't be updated") + } + + fcState := ForkchoiceStateV1{ + HeadBlockHash: block.Hash(), + SafeBlockHash: block.Hash(), + FinalizedBlockHash: block.Hash(), + } + if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { + t.Fatalf("Failed to insert block: %v", err) + } + if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64() { + t.Fatalf("Chain head should be updated") + } + parent, head = block, block.NumberU64() + } +} + +func TestEth2DeepReorg(t *testing.T) { + // TODO (MariusVanDerWijden) TestEth2DeepReorg is currently broken, because it tries to reorg + // before the totalTerminalDifficulty threshold + /* + genesis, preMergeBlocks := generatePreMergeChain(core.TriesInMemory * 2) + n, ethservice := startEthService(t, genesis, preMergeBlocks) + defer n.Close() + + var ( + api = NewConsensusAPI(ethservice, nil) + parent = preMergeBlocks[len(preMergeBlocks)-core.TriesInMemory-1] + head = ethservice.BlockChain().CurrentBlock().NumberU64() + ) + if ethservice.BlockChain().HasBlockAndState(parent.Hash(), parent.NumberU64()) { + t.Errorf("Block %d not pruned", parent.NumberU64()) + } + for i := 0; i < 10; i++ { + execData, err := api.assembleBlock(AssembleBlockParams{ + ParentHash: parent.Hash(), + Timestamp: parent.Time() + 5, + }) + if err != nil { + t.Fatalf("Failed to create the executable data %v", err) + } + block, err := ExecutableDataToBlock(ethservice.BlockChain().Config(), parent.Header(), *execData) + if err != nil { + t.Fatalf("Failed to convert executable data to block %v", err) + } + newResp, err := api.ExecutePayload(*execData) + if err != nil || newResp.Status != "VALID" { + t.Fatalf("Failed to insert block: %v", err) + } + if ethservice.BlockChain().CurrentBlock().NumberU64() != head { + t.Fatalf("Chain head shouldn't be updated") + } + if err := api.setHead(block.Hash()); err != nil { + t.Fatalf("Failed to set head: %v", err) + } + if ethservice.BlockChain().CurrentBlock().NumberU64() != block.NumberU64() { + t.Fatalf("Chain head should be updated") + } + parent, head = block, block.NumberU64() + } + */ +} // startEthService creates a full node instance for testing. func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) (*node.Node, *eth.Ethereum) { @@ -232,7 +338,7 @@ func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) t.Fatal("can't create node:", err) } - ethcfg := ðconfig.Config{Genesis: genesis, Ethash: ethash.Config{PowMode: ethash.ModeFake}} + ethcfg := ðconfig.Config{Genesis: genesis, Ethash: ethash.Config{PowMode: ethash.ModeFake}, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256} ethservice, err := eth.New(n, ethcfg) if err != nil { t.Fatal("can't create eth service:", err) @@ -245,6 +351,69 @@ func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block) t.Fatal("can't import test blocks:", err) } ethservice.SetEtherbase(testAddr) + ethservice.SetSynced() return n, ethservice } + +func TestFullAPI(t *testing.T) { + genesis, preMergeBlocks := generatePreMergeChain(10) + n, ethservice := startEthService(t, genesis, preMergeBlocks) + ethservice.Merger().ReachTTD() + defer n.Close() + var ( + api = NewConsensusAPI(ethservice, nil) + parent = ethservice.BlockChain().CurrentBlock() + // This EVM code generates a log when the contract is created. + logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") + ) + for i := 0; i < 10; i++ { + statedb, _ := ethservice.BlockChain().StateAt(parent.Root()) + nonce := statedb.GetNonce(testAddr) + tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) + ethservice.TxPool().AddLocal(tx) + + params := PayloadAttributesV1{ + Timestamp: parent.Time() + 1, + Random: crypto.Keccak256Hash([]byte{byte(i)}), + SuggestedFeeRecipient: parent.Coinbase(), + } + fcState := ForkchoiceStateV1{ + HeadBlockHash: parent.Hash(), + SafeBlockHash: common.Hash{}, + FinalizedBlockHash: common.Hash{}, + } + resp, err := api.ForkchoiceUpdatedV1(fcState, ¶ms) + if err != nil { + t.Fatalf("error preparing payload, err=%v", err) + } + if resp.Status != SUCCESS.Status { + t.Fatalf("error preparing payload, invalid status: %v", resp.Status) + } + payloadID := computePayloadId(parent.Hash(), ¶ms) + payload, err := api.GetPayloadV1(hexutil.Bytes(payloadID)) + if err != nil { + t.Fatalf("can't get payload: %v", err) + } + execResp, err := api.ExecutePayloadV1(*payload) + if err != nil { + t.Fatalf("can't execute payload: %v", err) + } + if execResp.Status != VALID.Status { + t.Fatalf("invalid status: %v", execResp.Status) + } + fcState = ForkchoiceStateV1{ + HeadBlockHash: payload.BlockHash, + SafeBlockHash: payload.ParentHash, + FinalizedBlockHash: payload.ParentHash, + } + if _, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil { + t.Fatalf("Failed to insert block: %v", err) + } + if ethservice.BlockChain().CurrentBlock().NumberU64() != payload.Number { + t.Fatalf("Chain head should be updated") + } + parent = ethservice.BlockChain().CurrentBlock() + + } +} diff --git a/eth/catalyst/api_types.go b/eth/catalyst/api_types.go index d5d351a99..1f6703030 100644 --- a/eth/catalyst/api_types.go +++ b/eth/catalyst/api_types.go @@ -17,54 +17,98 @@ package catalyst import ( + "math/big" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" ) -//go:generate go run github.com/fjl/gencodec -type assembleBlockParams -field-override assembleBlockParamsMarshaling -out gen_blockparams.go +//go:generate go run github.com/fjl/gencodec -type PayloadAttributesV1 -field-override payloadAttributesMarshaling -out gen_blockparams.go -// Structure described at https://hackmd.io/T9x2mMA4S7us8tJwEB3FDQ -type assembleBlockParams struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - Timestamp uint64 `json:"timestamp" gencodec:"required"` +// Structure described at https://github.com/ethereum/execution-apis/pull/74 +type PayloadAttributesV1 struct { + Timestamp uint64 `json:"timestamp" gencodec:"required"` + Random common.Hash `json:"random" gencodec:"required"` + SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` } -// JSON type overrides for assembleBlockParams. -type assembleBlockParamsMarshaling struct { +// JSON type overrides for PayloadAttributesV1. +type payloadAttributesMarshaling struct { Timestamp hexutil.Uint64 } -//go:generate go run github.com/fjl/gencodec -type executableData -field-override executableDataMarshaling -out gen_ed.go +//go:generate go run github.com/fjl/gencodec -type ExecutableDataV1 -field-override executableDataMarshaling -out gen_ed.go -// Structure described at https://notes.ethereum.org/@n0ble/rayonism-the-merge-spec#Parameters1 -type executableData struct { - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - Miner common.Address `json:"miner" gencodec:"required"` - StateRoot common.Hash `json:"stateRoot" gencodec:"required"` - Number uint64 `json:"number" gencodec:"required"` - GasLimit uint64 `json:"gasLimit" gencodec:"required"` - GasUsed uint64 `json:"gasUsed" gencodec:"required"` - Timestamp uint64 `json:"timestamp" gencodec:"required"` - ReceiptRoot common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom []byte `json:"logsBloom" gencodec:"required"` - Transactions [][]byte `json:"transactions" gencodec:"required"` +// Structure described at https://github.com/ethereum/execution-apis/src/engine/specification.md +type ExecutableDataV1 struct { + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom []byte `json:"logsBloom" gencodec:"required"` + Random common.Hash `json:"random" gencodec:"required"` + Number uint64 `json:"blockNumber" gencodec:"required"` + GasLimit uint64 `json:"gasLimit" gencodec:"required"` + GasUsed uint64 `json:"gasUsed" gencodec:"required"` + Timestamp uint64 `json:"timestamp" gencodec:"required"` + ExtraData []byte `json:"extraData" gencodec:"required"` + BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + Transactions [][]byte `json:"transactions" gencodec:"required"` } // JSON type overrides for executableData. type executableDataMarshaling struct { - Number hexutil.Uint64 - GasLimit hexutil.Uint64 - GasUsed hexutil.Uint64 - Timestamp hexutil.Uint64 - LogsBloom hexutil.Bytes - Transactions []hexutil.Bytes + Number hexutil.Uint64 + GasLimit hexutil.Uint64 + GasUsed hexutil.Uint64 + Timestamp hexutil.Uint64 + BaseFeePerGas *hexutil.Big + ExtraData hexutil.Bytes + LogsBloom hexutil.Bytes + Transactions []hexutil.Bytes } -type newBlockResponse struct { +//go:generate go run github.com/fjl/gencodec -type PayloadResponse -field-override payloadResponseMarshaling -out gen_payload.go + +type PayloadResponse struct { + PayloadID uint64 `json:"payloadId"` +} + +// JSON type overrides for payloadResponse. +type payloadResponseMarshaling struct { + PayloadID hexutil.Uint64 +} + +type NewBlockResponse struct { Valid bool `json:"valid"` } -type genericResponse struct { +type GenericResponse struct { Success bool `json:"success"` } + +type GenericStringResponse struct { + Status string `json:"status"` +} + +type ExecutePayloadResponse struct { + Status string `json:"status"` + LatestValidHash common.Hash `json:"latestValidHash"` +} + +type ConsensusValidatedParams struct { + BlockHash common.Hash `json:"blockHash"` + Status string `json:"status"` +} + +type ForkChoiceResponse struct { + Status string `json:"status"` + PayloadID *hexutil.Bytes `json:"payloadId"` +} + +type ForkchoiceStateV1 struct { + HeadBlockHash common.Hash `json:"headBlockHash"` + SafeBlockHash common.Hash `json:"safeBlockHash"` + FinalizedBlockHash common.Hash `json:"finalizedBlockHash"` +} diff --git a/eth/catalyst/gen_blockparams.go b/eth/catalyst/gen_blockparams.go index a9a08ec3a..ccf5c327f 100644 --- a/eth/catalyst/gen_blockparams.go +++ b/eth/catalyst/gen_blockparams.go @@ -10,37 +10,44 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" ) -var _ = (*assembleBlockParamsMarshaling)(nil) +var _ = (*payloadAttributesMarshaling)(nil) // MarshalJSON marshals as JSON. -func (a assembleBlockParams) MarshalJSON() ([]byte, error) { - type assembleBlockParams struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` +func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) { + type PayloadAttributesV1 struct { + Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Random common.Hash `json:"random" gencodec:"required"` + SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` } - var enc assembleBlockParams - enc.ParentHash = a.ParentHash - enc.Timestamp = hexutil.Uint64(a.Timestamp) + var enc PayloadAttributesV1 + enc.Timestamp = hexutil.Uint64(p.Timestamp) + enc.Random = p.Random + enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. -func (a *assembleBlockParams) UnmarshalJSON(input []byte) error { - type assembleBlockParams struct { - ParentHash *common.Hash `json:"parentHash" gencodec:"required"` - Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` +func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error { + type PayloadAttributesV1 struct { + Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Random *common.Hash `json:"random" gencodec:"required"` + SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"` } - var dec assembleBlockParams + var dec PayloadAttributesV1 if err := json.Unmarshal(input, &dec); err != nil { return err } - if dec.ParentHash == nil { - return errors.New("missing required field 'parentHash' for assembleBlockParams") - } - a.ParentHash = *dec.ParentHash if dec.Timestamp == nil { - return errors.New("missing required field 'timestamp' for assembleBlockParams") + return errors.New("missing required field 'timestamp' for PayloadAttributesV1") } - a.Timestamp = uint64(*dec.Timestamp) + p.Timestamp = uint64(*dec.Timestamp) + if dec.Random == nil { + return errors.New("missing required field 'random' for PayloadAttributesV1") + } + p.Random = *dec.Random + if dec.SuggestedFeeRecipient == nil { + return errors.New("missing required field 'suggestedFeeRecipient' for PayloadAttributesV1") + } + p.SuggestedFeeRecipient = *dec.SuggestedFeeRecipient return nil } diff --git a/eth/catalyst/gen_ed.go b/eth/catalyst/gen_ed.go index 4c2e4c8ea..46eb45808 100644 --- a/eth/catalyst/gen_ed.go +++ b/eth/catalyst/gen_ed.go @@ -5,6 +5,7 @@ package catalyst import ( "encoding/json" "errors" + "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -13,31 +14,37 @@ import ( var _ = (*executableDataMarshaling)(nil) // MarshalJSON marshals as JSON. -func (e executableData) MarshalJSON() ([]byte, error) { - type executableData struct { - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - Miner common.Address `json:"miner" gencodec:"required"` - StateRoot common.Hash `json:"stateRoot" gencodec:"required"` - Number hexutil.Uint64 `json:"number" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ReceiptRoot common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` - Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` +func (e ExecutableDataV1) MarshalJSON() ([]byte, error) { + type ExecutableDataV1 struct { + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` + Random common.Hash `json:"random" gencodec:"required"` + Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` } - var enc executableData - enc.BlockHash = e.BlockHash + var enc ExecutableDataV1 enc.ParentHash = e.ParentHash - enc.Miner = e.Miner + enc.FeeRecipient = e.FeeRecipient enc.StateRoot = e.StateRoot + enc.ReceiptsRoot = e.ReceiptsRoot + enc.LogsBloom = e.LogsBloom + enc.Random = e.Random enc.Number = hexutil.Uint64(e.Number) enc.GasLimit = hexutil.Uint64(e.GasLimit) enc.GasUsed = hexutil.Uint64(e.GasUsed) enc.Timestamp = hexutil.Uint64(e.Timestamp) - enc.ReceiptRoot = e.ReceiptRoot - enc.LogsBloom = e.LogsBloom + enc.ExtraData = e.ExtraData + enc.BaseFeePerGas = (*hexutil.Big)(e.BaseFeePerGas) + enc.BlockHash = e.BlockHash if e.Transactions != nil { enc.Transactions = make([]hexutil.Bytes, len(e.Transactions)) for k, v := range e.Transactions { @@ -48,66 +55,81 @@ func (e executableData) MarshalJSON() ([]byte, error) { } // UnmarshalJSON unmarshals from JSON. -func (e *executableData) UnmarshalJSON(input []byte) error { - type executableData struct { - BlockHash *common.Hash `json:"blockHash" gencodec:"required"` - ParentHash *common.Hash `json:"parentHash" gencodec:"required"` - Miner *common.Address `json:"miner" gencodec:"required"` - StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` - Number *hexutil.Uint64 `json:"number" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ReceiptRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` - Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` +func (e *ExecutableDataV1) UnmarshalJSON(input []byte) error { + type ExecutableDataV1 struct { + ParentHash *common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` + Random *common.Hash `json:"random" gencodec:"required"` + Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash *common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` } - var dec executableData + var dec ExecutableDataV1 if err := json.Unmarshal(input, &dec); err != nil { return err } - if dec.BlockHash == nil { - return errors.New("missing required field 'blockHash' for executableData") - } - e.BlockHash = *dec.BlockHash if dec.ParentHash == nil { - return errors.New("missing required field 'parentHash' for executableData") + return errors.New("missing required field 'parentHash' for ExecutableDataV1") } e.ParentHash = *dec.ParentHash - if dec.Miner == nil { - return errors.New("missing required field 'miner' for executableData") + if dec.FeeRecipient == nil { + return errors.New("missing required field 'feeRecipient' for ExecutableDataV1") } - e.Miner = *dec.Miner + e.FeeRecipient = *dec.FeeRecipient if dec.StateRoot == nil { - return errors.New("missing required field 'stateRoot' for executableData") + return errors.New("missing required field 'stateRoot' for ExecutableDataV1") } e.StateRoot = *dec.StateRoot + if dec.ReceiptsRoot == nil { + return errors.New("missing required field 'receiptsRoot' for ExecutableDataV1") + } + e.ReceiptsRoot = *dec.ReceiptsRoot + if dec.LogsBloom == nil { + return errors.New("missing required field 'logsBloom' for ExecutableDataV1") + } + e.LogsBloom = *dec.LogsBloom + if dec.Random == nil { + return errors.New("missing required field 'random' for ExecutableDataV1") + } + e.Random = *dec.Random if dec.Number == nil { - return errors.New("missing required field 'number' for executableData") + return errors.New("missing required field 'blockNumber' for ExecutableDataV1") } e.Number = uint64(*dec.Number) if dec.GasLimit == nil { - return errors.New("missing required field 'gasLimit' for executableData") + return errors.New("missing required field 'gasLimit' for ExecutableDataV1") } e.GasLimit = uint64(*dec.GasLimit) if dec.GasUsed == nil { - return errors.New("missing required field 'gasUsed' for executableData") + return errors.New("missing required field 'gasUsed' for ExecutableDataV1") } e.GasUsed = uint64(*dec.GasUsed) if dec.Timestamp == nil { - return errors.New("missing required field 'timestamp' for executableData") + return errors.New("missing required field 'timestamp' for ExecutableDataV1") } e.Timestamp = uint64(*dec.Timestamp) - if dec.ReceiptRoot == nil { - return errors.New("missing required field 'receiptsRoot' for executableData") + if dec.ExtraData == nil { + return errors.New("missing required field 'extraData' for ExecutableDataV1") } - e.ReceiptRoot = *dec.ReceiptRoot - if dec.LogsBloom == nil { - return errors.New("missing required field 'logsBloom' for executableData") + e.ExtraData = *dec.ExtraData + if dec.BaseFeePerGas == nil { + return errors.New("missing required field 'baseFeePerGas' for ExecutableDataV1") } - e.LogsBloom = *dec.LogsBloom + e.BaseFeePerGas = (*big.Int)(dec.BaseFeePerGas) + if dec.BlockHash == nil { + return errors.New("missing required field 'blockHash' for ExecutableDataV1") + } + e.BlockHash = *dec.BlockHash if dec.Transactions == nil { - return errors.New("missing required field 'transactions' for executableData") + return errors.New("missing required field 'transactions' for ExecutableDataV1") } e.Transactions = make([][]byte, len(dec.Transactions)) for k, v := range dec.Transactions { diff --git a/eth/catalyst/gen_payload.go b/eth/catalyst/gen_payload.go new file mode 100644 index 000000000..a0b00fcfd --- /dev/null +++ b/eth/catalyst/gen_payload.go @@ -0,0 +1,36 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package catalyst + +import ( + "encoding/json" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +var _ = (*payloadResponseMarshaling)(nil) + +// MarshalJSON marshals as JSON. +func (p PayloadResponse) MarshalJSON() ([]byte, error) { + type PayloadResponse struct { + PayloadID hexutil.Uint64 `json:"payloadId"` + } + var enc PayloadResponse + enc.PayloadID = hexutil.Uint64(p.PayloadID) + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (p *PayloadResponse) UnmarshalJSON(input []byte) error { + type PayloadResponse struct { + PayloadID *hexutil.Uint64 `json:"payloadId"` + } + var dec PayloadResponse + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.PayloadID != nil { + p.PayloadID = uint64(*dec.PayloadID) + } + return nil +} diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 4ca1b55bb..28ad18b81 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -35,9 +35,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" ) var ( @@ -45,7 +43,6 @@ var ( MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request - MaxStateFetch = 384 // Amount of node state values to allow fetching per request maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) maxHeadersProcess = 2048 // Number of header download results to import at once into the chain @@ -56,11 +53,11 @@ var ( reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs - fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync + fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during snap sync fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download - fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync + fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in snap sync ) var ( @@ -80,27 +77,34 @@ var ( errCancelStateFetch = errors.New("state data download canceled (requested)") errCancelContentProcessing = errors.New("content processing canceled (requested)") errCanceled = errors.New("syncing canceled (requested)") - errNoSyncActive = errors.New("no sync active") errTooOld = errors.New("peer's protocol version too old") errNoAncestorFound = errors.New("no common ancestor found") ) +// peerDropFn is a callback type for dropping a peer detected as malicious. +type peerDropFn func(id string) + +// headerTask is a set of downloaded headers to queue along with their precomputed +// hashes to avoid constant rehashing. +type headerTask struct { + headers []*types.Header + hashes []common.Hash +} + type Downloader struct { mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode mux *event.TypeMux // Event multiplexer to announce sync operation events - checkpoint uint64 // Checkpoint block number to enforce head against (e.g. fast sync) + checkpoint uint64 // Checkpoint block number to enforce head against (e.g. snap sync) genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed - stateDB ethdb.Database // Database to state sync into (and deduplicate via) - stateBloom *trie.SyncBloom // Bloom filter for fast trie node and contract code existence checks + stateDB ethdb.Database // Database to state sync into (and deduplicate via) // Statistics - syncStatsChainOrigin uint64 // Origin block number where syncing started at - syncStatsChainHeight uint64 // Highest block number known when syncing started - syncStatsState stateSyncStats + syncStatsChainOrigin uint64 // Origin block number where syncing started at + syncStatsChainHeight uint64 // Highest block number known when syncing started syncStatsLock sync.RWMutex // Lock protecting the sync stats fields lightchain LightChain @@ -117,12 +121,7 @@ type Downloader struct { ancientLimit uint64 // The maximum block number which can be regarded as ancient data. // Channels - headerCh chan dataPack // Channel receiving inbound block headers - bodyCh chan dataPack // Channel receiving inbound block bodies - receiptCh chan dataPack // Channel receiving inbound receipts - bodyWakeCh chan bool // Channel to signal the block body fetcher of new tasks - receiptWakeCh chan bool // Channel to signal the receipt fetcher of new tasks - headerProcCh chan []*types.Header // Channel to feed the header processor new tasks + headerProcCh chan *headerTask // Channel to feed the header processor new tasks // State sync pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root @@ -131,8 +130,6 @@ type Downloader struct { snapSync bool // Whether to run state sync over the snap protocol SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now stateSyncStart chan *stateSync - trackStateReq chan *stateReq - stateCh chan dataPack // Channel receiving inbound node state data // Cancellation and termination cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) @@ -171,14 +168,14 @@ type LightChain interface { SetHead(uint64) error } -// BlockChain encapsulates functions required to sync a (full or fast) blockchain. +// BlockChain encapsulates functions required to sync a (full or snap) blockchain. type BlockChain interface { LightChain // HasBlock verifies a block's presence in the local chain. HasBlock(common.Hash, uint64) bool - // HasFastBlock verifies a fast block's presence in the local chain. + // HasFastBlock verifies a snap block's presence in the local chain. HasFastBlock(common.Hash, uint64) bool // GetBlockByHash retrieves a block from the local chain. @@ -187,11 +184,11 @@ type BlockChain interface { // CurrentBlock retrieves the head block from the local chain. CurrentBlock() *types.Block - // CurrentFastBlock retrieves the head fast block from the local chain. + // CurrentFastBlock retrieves the head snap block from the local chain. CurrentFastBlock() *types.Block - // FastSyncCommitHead directly commits the head block to a certain entity. - FastSyncCommitHead(common.Hash) error + // SnapSyncCommitHead directly commits the head block to a certain entity. + SnapSyncCommitHead(common.Hash) error // InsertChain inserts a batch of blocks into the local chain. InsertChain(types.Blocks) (int, error) @@ -204,13 +201,12 @@ type BlockChain interface { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { +func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { if lightchain == nil { lightchain = chain } dl := &Downloader{ stateDB: stateDb, - stateBloom: stateBloom, mux: mux, checkpoint: checkpoint, queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), @@ -218,20 +214,10 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, blockchain: chain, lightchain: lightchain, dropPeer: dropPeer, - headerCh: make(chan dataPack, 1), - bodyCh: make(chan dataPack, 1), - receiptCh: make(chan dataPack, 1), - bodyWakeCh: make(chan bool, 1), - receiptWakeCh: make(chan bool, 1), - headerProcCh: make(chan []*types.Header, 1), + headerProcCh: make(chan *headerTask, 1), quitCh: make(chan struct{}), - stateCh: make(chan dataPack), SnapSyncer: snap.NewSyncer(stateDb), stateSyncStart: make(chan *stateSync), - syncStatsState: stateSyncStats{ - processed: rawdb.ReadFastTrieProgress(stateDb), - }, - trackStateReq: make(chan *stateReq), } go dl.stateFetcher() return dl @@ -241,7 +227,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, // block where synchronisation started at (may have failed/suspended); the block // or header sync is currently at; and the latest known block which the sync targets. // -// In addition, during the state download phase of fast synchronisation the number +// In addition, during the state download phase of snap synchronisation the number // of processed and the total number of known states are also returned. Otherwise // these are zero. func (d *Downloader) Progress() ethereum.SyncProgress { @@ -254,19 +240,31 @@ func (d *Downloader) Progress() ethereum.SyncProgress { switch { case d.blockchain != nil && mode == FullSync: current = d.blockchain.CurrentBlock().NumberU64() - case d.blockchain != nil && mode == FastSync: + case d.blockchain != nil && mode == SnapSync: current = d.blockchain.CurrentFastBlock().NumberU64() case d.lightchain != nil: current = d.lightchain.CurrentHeader().Number.Uint64() default: log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode) } + progress, pending := d.SnapSyncer.Progress() + return ethereum.SyncProgress{ - StartingBlock: d.syncStatsChainOrigin, - CurrentBlock: current, - HighestBlock: d.syncStatsChainHeight, - PulledStates: d.syncStatsState.processed, - KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, + StartingBlock: d.syncStatsChainOrigin, + CurrentBlock: current, + HighestBlock: d.syncStatsChainHeight, + SyncedAccounts: progress.AccountSynced, + SyncedAccountBytes: uint64(progress.AccountBytes), + SyncedBytecodes: progress.BytecodeSynced, + SyncedBytecodeBytes: uint64(progress.BytecodeBytes), + SyncedStorage: progress.StorageSynced, + SyncedStorageBytes: uint64(progress.StorageBytes), + HealedTrienodes: progress.TrienodeHealSynced, + HealedTrienodeBytes: uint64(progress.TrienodeHealBytes), + HealedBytecodes: progress.BytecodeHealSynced, + HealedBytecodeBytes: uint64(progress.BytecodeHealBytes), + HealingTrienodes: pending.TrienodeHeal, + HealingBytecode: pending.BytecodeHeal, } } @@ -364,47 +362,27 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { log.Info("Block synchronisation started") } - // If we are already full syncing, but have a fast-sync bloom filter laying - // around, make sure it doesn't use memory any more. This is a special case - // when the user attempts to fast sync a new empty network. - if mode == FullSync && d.stateBloom != nil { - d.stateBloom.Close() - } - // If snap sync was requested, create the snap scheduler and switch to fast - // sync mode. Long term we could drop fast sync or merge the two together, + // If snap sync was requested, create the snap scheduler and switch to snap + // sync mode. Long term we could drop snap sync or merge the two together, // but until snap becomes prevalent, we should support both. TODO(karalabe). if mode == SnapSync { - if !d.snapSync { - // Snap sync uses the snapshot namespace to store potentially flakey data until - // sync completely heals and finishes. Pause snapshot maintenance in the mean - // time to prevent access. - if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests - snapshots.Disable() - } - log.Warn("Enabling snapshot sync prototype") - d.snapSync = true + // Snap sync uses the snapshot namespace to store potentially flakey data until + // sync completely heals and finishes. Pause snapshot maintenance in the mean- + // time to prevent access. + if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests + snapshots.Disable() } - mode = FastSync } // Reset the queue, peer set and wake channels to clean any internal leftover state d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems) d.peers.Reset() - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { select { case <-ch: default: } } - for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { - for empty := false; !empty; { - select { - case <-ch: - default: - empty = true - } - } - } for empty := false; !empty; { select { case <-d.headerProcCh: @@ -463,9 +441,9 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I if err != nil { return err } - if mode == FastSync && pivot == nil { + if mode == SnapSync && pivot == nil { // If no pivot block was returned, the head is below the min full block - // threshold (i.e. new chain). In that case we won't really fast sync + // threshold (i.e. new chain). In that case we won't really snap sync // anyway, but still need a valid pivot block to avoid some code hitting // nil panics on an access. pivot = d.blockchain.CurrentBlock().Header() @@ -483,8 +461,8 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I d.syncStatsChainHeight = height d.syncStatsLock.Unlock() - // Ensure our origin point is below any fast sync pivot point - if mode == FastSync { + // Ensure our origin point is below any snap sync pivot point + if mode == SnapSync { if height <= uint64(fsMinFullBlocks) { origin = 0 } else { @@ -493,17 +471,17 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I origin = pivotNumber - 1 } // Write out the pivot into the database so a rollback beyond it will - // reenable fast sync + // reenable snap sync rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber) } } d.committed = 1 - if mode == FastSync && pivot.Number.Uint64() != 0 { + if mode == SnapSync && pivot.Number.Uint64() != 0 { d.committed = 0 } - if mode == FastSync { + if mode == SnapSync { // Set the ancient data limitation. - // If we are running fast sync, all block data older than ancientLimit will be + // If we are running snap sync, all block data older than ancientLimit will be // written to the ancient store. More recent data will be written to the active // database and will wait for the freezer to migrate. // @@ -546,17 +524,17 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I d.syncInitHook(origin, height) } fetchers := []func() error{ - func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved - func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync - func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync + func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) }, // Headers are always retrieved + func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and snap sync + func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during snap sync func() error { return d.processHeaders(origin+1, td) }, } - if mode == FastSync { + if mode == SnapSync { d.pivotLock.Lock() d.pivotHeader = pivot d.pivotLock.Unlock() - fetchers = append(fetchers, func() error { return d.processFastSyncContent() }) + fetchers = append(fetchers, func() error { return d.processSnapSyncContent() }) } else if mode == FullSync { fetchers = append(fetchers, d.processFullSyncContent) } @@ -625,9 +603,6 @@ func (d *Downloader) Terminate() { default: close(d.quitCh) } - if d.stateBloom != nil { - d.stateBloom.Close() - } d.quitLock.Unlock() // Cancel any pending download requests @@ -643,60 +618,38 @@ func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *ty // Request the advertised remote head block and wait for the response latest, _ := p.peer.Head() fetch := 1 - if mode == FastSync { + if mode == SnapSync { fetch = 2 // head + pivot headers } - go p.peer.RequestHeadersByHash(latest, fetch, fsMinFullBlocks-1, true) - - ttl := d.peers.rates.TargetTimeout() - timeout := time.After(ttl) - for { - select { - case <-d.cancelCh: - return nil, nil, errCanceled - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) - break - } - // Make sure the peer gave us at least one and at most the requested headers - headers := packet.(*headerPack).headers - if len(headers) == 0 || len(headers) > fetch { - return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) - } - // The first header needs to be the head, validate against the checkpoint - // and request. If only 1 header was returned, make sure there's no pivot - // or there was not one requested. - head := headers[0] - if (mode == FastSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint { - return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint) - } - if len(headers) == 1 { - if mode == FastSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) - } - p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", head.Hash()) - return head, nil, nil - } - // At this point we have 2 headers in total and the first is the - // validated head of the chain. Check the pivot number and return, - pivot := headers[1] - if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { - return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) - } - return head, pivot, nil - - case <-timeout: - p.log.Debug("Waiting for head header timed out", "elapsed", ttl) - return nil, nil, errTimeout - - case <-d.bodyCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore - } + headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true) + if err != nil { + return nil, nil, err } + // Make sure the peer gave us at least one and at most the requested headers + if len(headers) == 0 || len(headers) > fetch { + return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) + } + // The first header needs to be the head, validate against the checkpoint + // and request. If only 1 header was returned, make sure there's no pivot + // or there was not one requested. + head = headers[0] + if (mode == SnapSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint { + return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint) + } + if len(headers) == 1 { + if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { + return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) + } + p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0]) + return head, nil, nil + } + // At this point we have 2 headers in total and the first is the + // validated head of the chain. Check the pivot number and return, + pivot = headers[1] + if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { + return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) + } + return head, pivot, nil } // calculateRequestSpan calculates what headers to request from a peer when trying to determine the @@ -767,7 +720,7 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) switch mode { case FullSync: localHeight = d.blockchain.CurrentBlock().NumberU64() - case FastSync: + case SnapSync: localHeight = d.blockchain.CurrentFastBlock().NumberU64() default: localHeight = d.lightchain.CurrentHeader().Number.Uint64() @@ -822,76 +775,52 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) return ancestor, nil } -func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (commonAncestor uint64, err error) { +func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (uint64, error) { from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) - go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false) - + headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false) + if err != nil { + return 0, err + } // Wait for the remote response to the head fetch number, hash := uint64(0), common.Hash{} - ttl := d.peers.rates.TargetTimeout() - timeout := time.After(ttl) + // Make sure the peer actually gave something valid + if len(headers) == 0 { + p.log.Warn("Empty head header set") + return 0, errEmptyHeaderSet + } + // Make sure the peer's reply conforms to the request + for i, header := range headers { + expectNumber := from + int64(i)*int64(skip+1) + if number := header.Number.Int64(); number != expectNumber { + p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) + return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) + } + } + // Check if a common ancestor was found + for i := len(headers) - 1; i >= 0; i-- { + // Skip any headers that underflow/overflow our requested set + if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { + continue + } + // Otherwise check if we already know the header or not + h := hashes[i] + n := headers[i].Number.Uint64() - for finished := false; !finished; { - select { - case <-d.cancelCh: - return 0, errCanceled - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) - break - } - // Make sure the peer actually gave something valid - headers := packet.(*headerPack).headers - if len(headers) == 0 { - p.log.Warn("Empty head header set") - return 0, errEmptyHeaderSet - } - // Make sure the peer's reply conforms to the request - for i, header := range headers { - expectNumber := from + int64(i)*int64(skip+1) - if number := header.Number.Int64(); number != expectNumber { - p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) - return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) - } - } - // Check if a common ancestor was found - finished = true - for i := len(headers) - 1; i >= 0; i-- { - // Skip any headers that underflow/overflow our requested set - if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { - continue - } - // Otherwise check if we already know the header or not - h := headers[i].Hash() - n := headers[i].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case FastSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if known { - number, hash = n, h - break - } - } - - case <-timeout: - p.log.Debug("Waiting for head header timed out", "elapsed", ttl) - return 0, errTimeout - - case <-d.bodyCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore + var known bool + switch mode { + case FullSync: + known = d.blockchain.HasBlock(h, n) + case SnapSync: + known = d.blockchain.HasFastBlock(h, n) + default: + known = d.lightchain.HasHeader(h, n) + } + if known { + number, hash = n, h + break } } // If the head fetch already found an ancestor, return @@ -906,7 +835,7 @@ func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, re return 0, errNoAncestorFound } -func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (commonAncestor uint64, err error) { +func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (uint64, error) { hash := common.Hash{} // Ancestor not found, we need to binary search over our chain @@ -920,65 +849,39 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, // Split our chain interval in two, and request the hash to cross check check := (start + end) / 2 - ttl := d.peers.rates.TargetTimeout() - timeout := time.After(ttl) - - go p.peer.RequestHeadersByNumber(check, 1, 0, false) - - // Wait until a reply arrives to this request - for arrived := false; !arrived; { - select { - case <-d.cancelCh: - return 0, errCanceled - - case packet := <-d.headerCh: - // Discard anything not from the origin peer - if packet.PeerId() != p.id { - log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) - break - } - // Make sure the peer actually gave something valid - headers := packet.(*headerPack).headers - if len(headers) != 1 { - p.log.Warn("Multiple headers for single request", "headers", len(headers)) - return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) - } - arrived = true - - // Modify the search interval based on the response - h := headers[0].Hash() - n := headers[0].Number.Uint64() - - var known bool - switch mode { - case FullSync: - known = d.blockchain.HasBlock(h, n) - case FastSync: - known = d.blockchain.HasFastBlock(h, n) - default: - known = d.lightchain.HasHeader(h, n) - } - if !known { - end = check - break - } - header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists - if header.Number.Uint64() != check { - p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) - return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) - } - start = check - hash = h - - case <-timeout: - p.log.Debug("Waiting for search header timed out", "elapsed", ttl) - return 0, errTimeout - - case <-d.bodyCh: - case <-d.receiptCh: - // Out of bounds delivery, ignore - } + headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false) + if err != nil { + return 0, err } + // Make sure the peer actually gave something valid + if len(headers) != 1 { + p.log.Warn("Multiple headers for single request", "headers", len(headers)) + return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) + } + // Modify the search interval based on the response + h := hashes[0] + n := headers[0].Number.Uint64() + + var known bool + switch mode { + case FullSync: + known = d.blockchain.HasBlock(h, n) + case SnapSync: + known = d.blockchain.HasFastBlock(h, n) + default: + known = d.lightchain.HasHeader(h, n) + } + if !known { + end = check + continue + } + header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists + if header.Number.Uint64() != check { + p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) + return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) + } + start = check + hash = h } // Ensure valid ancestry and return if int64(start) <= floor { @@ -997,217 +900,59 @@ func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, // other peers are only accepted if they map cleanly to the skeleton. If no one // can fill in the skeleton - not even the origin peer - it's assumed invalid and // the origin is dropped. -func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error { +func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) error { p.log.Debug("Directing header downloads", "origin", from) defer p.log.Debug("Header download terminated") - // Create a timeout timer, and the associated header fetcher - skeleton := true // Skeleton assembly phase or finishing up - pivoting := false // Whether the next request is pivot verification - request := time.Now() // time of the last skeleton fetch request - timeout := time.NewTimer(0) // timer to dump a non-responsive active peer - <-timeout.C // timeout channel should be initially empty - defer timeout.Stop() - - var ttl time.Duration - getHeaders := func(from uint64) { - request = time.Now() - - ttl = d.peers.rates.TargetTimeout() - timeout.Reset(ttl) - - if skeleton { - p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) - go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) - } else { - p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) - go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) - } - } - getNextPivot := func() { - pivoting = true - request = time.Now() - - ttl = d.peers.rates.TargetTimeout() - timeout.Reset(ttl) - - d.pivotLock.RLock() - pivot := d.pivotHeader.Number.Uint64() - d.pivotLock.RUnlock() - - p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) - go p.peer.RequestHeadersByNumber(pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep - } // Start pulling the header chain skeleton until all is done - ancestor := from - getHeaders(from) - - mode := d.getMode() + var ( + skeleton = true // Skeleton assembly phase or finishing up + pivoting = false // Whether the next request is pivot verification + ancestor = from + mode = d.getMode() + ) for { - select { - case <-d.cancelCh: - return errCanceled - - case packet := <-d.headerCh: - // Make sure the active peer is giving us the skeleton headers - if packet.PeerId() != p.id { - log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) - break - } - headerReqTimer.UpdateSince(request) - timeout.Stop() - - // If the pivot is being checked, move if it became stale and run the real retrieval - var pivot uint64 - + // Pull the next batch of headers, it either: + // - Pivot check to see if the chain moved too far + // - Skeleton retrieval to permit concurrent header fetches + // - Full header retrieval if we're near the chain head + var ( + headers []*types.Header + hashes []common.Hash + err error + ) + switch { + case pivoting: d.pivotLock.RLock() - if d.pivotHeader != nil { - pivot = d.pivotHeader.Number.Uint64() - } + pivot := d.pivotHeader.Number.Uint64() d.pivotLock.RUnlock() - if pivoting { - if packet.Items() == 2 { - // Retrieve the headers and do some sanity checks, just in case - headers := packet.(*headerPack).headers + p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) + headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep - if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { - log.Warn("Peer sent invalid next pivot", "have", have, "want", want) - return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) - } - if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { - log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) - return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) - } - log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) - pivot = headers[0].Number.Uint64() + case skeleton: + p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) + headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) - d.pivotLock.Lock() - d.pivotHeader = headers[0] - d.pivotLock.Unlock() + default: + p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) + headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false) + } + switch err { + case nil: + // Headers retrieved, continue with processing - // Write out the pivot into the database so a rollback beyond - // it will reenable fast sync and update the state root that - // the state syncer will be downloading. - rawdb.WriteLastPivotNumber(d.stateDB, pivot) - } - pivoting = false - getHeaders(from) - continue - } - // If the skeleton's finished, pull any remaining head headers directly from the origin - if skeleton && packet.Items() == 0 { - skeleton = false - getHeaders(from) - continue - } - // If no more headers are inbound, notify the content fetchers and return - if packet.Items() == 0 { - // Don't abort header fetches while the pivot is downloading - if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { - p.log.Debug("No headers, waiting for pivot commit") - select { - case <-time.After(fsHeaderContCheck): - getHeaders(from) - continue - case <-d.cancelCh: - return errCanceled - } - } - // Pivot done (or not in fast sync) and no more headers, terminate the process - p.log.Debug("No more headers available") - select { - case d.headerProcCh <- nil: - return nil - case <-d.cancelCh: - return errCanceled - } - } - headers := packet.(*headerPack).headers + case errCanceled: + // Sync cancelled, no issue, propagate up + return err - // If we received a skeleton batch, resolve internals concurrently - if skeleton { - filled, proced, err := d.fillHeaderSkeleton(from, headers) - if err != nil { - p.log.Debug("Skeleton chain invalid", "err", err) - return fmt.Errorf("%w: %v", errInvalidChain, err) - } - headers = filled[proced:] - from += uint64(proced) - } else { - // If we're closing in on the chain head, but haven't yet reached it, delay - // the last few headers so mini reorgs on the head don't cause invalid hash - // chain errors. - if n := len(headers); n > 0 { - // Retrieve the current head we're at - var head uint64 - if mode == LightSync { - head = d.lightchain.CurrentHeader().Number.Uint64() - } else { - head = d.blockchain.CurrentFastBlock().NumberU64() - if full := d.blockchain.CurrentBlock().NumberU64(); head < full { - head = full - } - } - // If the head is below the common ancestor, we're actually deduplicating - // already existing chain segments, so use the ancestor as the fake head. - // Otherwise we might end up delaying header deliveries pointlessly. - if head < ancestor { - head = ancestor - } - // If the head is way older than this batch, delay the last few headers - if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { - delay := reorgProtHeaderDelay - if delay > n { - delay = n - } - headers = headers[:n-delay] - } - } - } - // Insert all the new headers and fetch the next batch - if len(headers) > 0 { - p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) - select { - case d.headerProcCh <- headers: - case <-d.cancelCh: - return errCanceled - } - from += uint64(len(headers)) - - // If we're still skeleton filling fast sync, check pivot staleness - // before continuing to the next skeleton filling - if skeleton && pivot > 0 { - getNextPivot() - } else { - getHeaders(from) - } - } else { - // No headers delivered, or all of them being delayed, sleep a bit and retry - p.log.Trace("All headers delayed, waiting") - select { - case <-time.After(fsHeaderContCheck): - getHeaders(from) - continue - case <-d.cancelCh: - return errCanceled - } - } - - case <-timeout.C: - if d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) - break - } - // Header retrieval timed out, consider the peer bad and drop - p.log.Debug("Header request timed out", "elapsed", ttl) - headerTimeoutMeter.Mark(1) + default: + // Header retrieval either timed out, or the peer failed in some strange way + // (e.g. disconnect). Consider the master peer bad and drop d.dropPeer(p.id) // Finish the sync gracefully instead of dumping the gathered data though - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { select { case ch <- false: case <-d.cancelCh: @@ -1217,7 +962,154 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error { case d.headerProcCh <- nil: case <-d.cancelCh: } - return fmt.Errorf("%w: header request timed out", errBadPeer) + return fmt.Errorf("%w: header request failed: %v", errBadPeer, err) + } + // If the pivot is being checked, move if it became stale and run the real retrieval + var pivot uint64 + + d.pivotLock.RLock() + if d.pivotHeader != nil { + pivot = d.pivotHeader.Number.Uint64() + } + d.pivotLock.RUnlock() + + if pivoting { + if len(headers) == 2 { + if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { + log.Warn("Peer sent invalid next pivot", "have", have, "want", want) + return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) + } + if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { + log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) + return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) + } + log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) + pivot = headers[0].Number.Uint64() + + d.pivotLock.Lock() + d.pivotHeader = headers[0] + d.pivotLock.Unlock() + + // Write out the pivot into the database so a rollback beyond + // it will reenable snap sync and update the state root that + // the state syncer will be downloading. + rawdb.WriteLastPivotNumber(d.stateDB, pivot) + } + // Disable the pivot check and fetch the next batch of headers + pivoting = false + continue + } + // If the skeleton's finished, pull any remaining head headers directly from the origin + if skeleton && len(headers) == 0 { + // A malicious node might withhold advertised headers indefinitely + if from+uint64(MaxHeaderFetch)-1 <= head { + p.log.Warn("Peer withheld skeleton headers", "advertised", head, "withheld", from+uint64(MaxHeaderFetch)-1) + return fmt.Errorf("%w: withheld skeleton headers: advertised %d, withheld #%d", errStallingPeer, head, from+uint64(MaxHeaderFetch)-1) + } + p.log.Debug("No skeleton, fetching headers directly") + skeleton = false + continue + } + // If no more headers are inbound, notify the content fetchers and return + if len(headers) == 0 { + // Don't abort header fetches while the pivot is downloading + if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { + p.log.Debug("No headers, waiting for pivot commit") + select { + case <-time.After(fsHeaderContCheck): + continue + case <-d.cancelCh: + return errCanceled + } + } + // Pivot done (or not in snap sync) and no more headers, terminate the process + p.log.Debug("No more headers available") + select { + case d.headerProcCh <- nil: + return nil + case <-d.cancelCh: + return errCanceled + } + } + // If we received a skeleton batch, resolve internals concurrently + var progressed bool + if skeleton { + filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers) + if err != nil { + p.log.Debug("Skeleton chain invalid", "err", err) + return fmt.Errorf("%w: %v", errInvalidChain, err) + } + headers = filled[proced:] + hashes = hashset[proced:] + + progressed = proced > 0 + from += uint64(proced) + } else { + // A malicious node might withhold advertised headers indefinitely + if n := len(headers); n < MaxHeaderFetch && headers[n-1].Number.Uint64() < head { + p.log.Warn("Peer withheld headers", "advertised", head, "delivered", headers[n-1].Number.Uint64()) + return fmt.Errorf("%w: withheld headers: advertised %d, delivered %d", errStallingPeer, head, headers[n-1].Number.Uint64()) + } + // If we're closing in on the chain head, but haven't yet reached it, delay + // the last few headers so mini reorgs on the head don't cause invalid hash + // chain errors. + if n := len(headers); n > 0 { + // Retrieve the current head we're at + var head uint64 + if mode == LightSync { + head = d.lightchain.CurrentHeader().Number.Uint64() + } else { + head = d.blockchain.CurrentFastBlock().NumberU64() + if full := d.blockchain.CurrentBlock().NumberU64(); head < full { + head = full + } + } + // If the head is below the common ancestor, we're actually deduplicating + // already existing chain segments, so use the ancestor as the fake head. + // Otherwise, we might end up delaying header deliveries pointlessly. + if head < ancestor { + head = ancestor + } + // If the head is way older than this batch, delay the last few headers + if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { + delay := reorgProtHeaderDelay + if delay > n { + delay = n + } + headers = headers[:n-delay] + hashes = hashes[:n-delay] + } + } + } + // If no headers have bene delivered, or all of them have been delayed, + // sleep a bit and retry. Take care with headers already consumed during + // skeleton filling + if len(headers) == 0 && !progressed { + p.log.Trace("All headers delayed, waiting") + select { + case <-time.After(fsHeaderContCheck): + continue + case <-d.cancelCh: + return errCanceled + } + } + // Insert any remaining new headers and fetch the next batch + if len(headers) > 0 { + p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) + select { + case d.headerProcCh <- &headerTask{ + headers: headers, + hashes: hashes, + }: + case <-d.cancelCh: + return errCanceled + } + from += uint64(len(headers)) + } + // If we're still skeleton filling snap sync, check pivot staleness + // before continuing to the next skeleton filling + if skeleton && pivot > 0 { + pivoting = true } } } @@ -1231,33 +1123,19 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error { // // The method returns the entire filled skeleton and also the number of headers // already forwarded for processing. -func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { +func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) { log.Debug("Filling up skeleton", "from", from) d.queue.ScheduleSkeleton(from, skeleton) - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*headerPack) - return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh) - } - expire = func() map[string]int { return d.queue.ExpireHeaders(d.peers.rates.TargetTimeout()) } - reserve = func(p *peerConnection, count int) (*fetchRequest, bool, bool) { - return d.queue.ReserveHeaders(p, count), false, false - } - fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } - capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.peers.rates.TargetRoundTrip()) } - setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { - p.SetHeadersIdle(accepted, deliveryTime) - } - ) - err := d.fetchParts(d.headerCh, deliver, d.queue.headerContCh, expire, - d.queue.PendingHeaders, d.queue.InFlightHeaders, reserve, - nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") - - log.Debug("Skeleton fill terminated", "err", err) - - filled, proced := d.queue.RetrieveHeaders() - return filled, proced, err + err := d.concurrentFetch((*headerQueue)(d)) + if err != nil { + log.Debug("Skeleton fill failed", "err", err) + } + filled, hashes, proced := d.queue.RetrieveHeaders() + if err == nil { + log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced) + } + return filled, hashes, proced, err } // fetchBodies iteratively downloads the scheduled block bodies, taking any @@ -1265,20 +1143,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ( // and also periodically checking for timeouts. func (d *Downloader) fetchBodies(from uint64) error { log.Debug("Downloading block bodies", "origin", from) - - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*bodyPack) - return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles) - } - expire = func() map[string]int { return d.queue.ExpireBodies(d.peers.rates.TargetTimeout()) } - fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } - capacity = func(p *peerConnection) int { return p.BlockCapacity(d.peers.rates.TargetRoundTrip()) } - setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetBodiesIdle(accepted, deliveryTime) } - ) - err := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire, - d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ReserveBodies, - d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") + err := d.concurrentFetch((*bodyQueue)(d)) log.Debug("Block body download terminated", "err", err) return err @@ -1288,225 +1153,13 @@ func (d *Downloader) fetchBodies(from uint64) error { // available peers, reserving a chunk of receipts for each, waiting for delivery // and also periodically checking for timeouts. func (d *Downloader) fetchReceipts(from uint64) error { - log.Debug("Downloading transaction receipts", "origin", from) + log.Debug("Downloading receipts", "origin", from) + err := d.concurrentFetch((*receiptQueue)(d)) - var ( - deliver = func(packet dataPack) (int, error) { - pack := packet.(*receiptPack) - return d.queue.DeliverReceipts(pack.peerID, pack.receipts) - } - expire = func() map[string]int { return d.queue.ExpireReceipts(d.peers.rates.TargetTimeout()) } - fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } - capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.peers.rates.TargetRoundTrip()) } - setIdle = func(p *peerConnection, accepted int, deliveryTime time.Time) { - p.SetReceiptsIdle(accepted, deliveryTime) - } - ) - err := d.fetchParts(d.receiptCh, deliver, d.receiptWakeCh, expire, - d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ReserveReceipts, - d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") - - log.Debug("Transaction receipt download terminated", "err", err) + log.Debug("Receipt download terminated", "err", err) return err } -// fetchParts iteratively downloads scheduled block parts, taking any available -// peers, reserving a chunk of fetch requests for each, waiting for delivery and -// also periodically checking for timeouts. -// -// As the scheduling/timeout logic mostly is the same for all downloaded data -// types, this method is used by each for data gathering and is instrumented with -// various callbacks to handle the slight differences between processing them. -// -// The instrumentation parameters: -// - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) -// - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) -// - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) -// - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) -// - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) -// - pending: task callback for the number of requests still needing download (detect completion/non-completability) -// - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) -// - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) -// - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) -// - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) -// - fetch: network callback to actually send a particular download request to a physical remote peer -// - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) -// - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) -// - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks -// - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) -// - kind: textual label of the type being downloaded to display in log messages -func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, - expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool), - fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, - idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int, time.Time), kind string) error { - - // Create a ticker to detect expired retrieval tasks - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - update := make(chan struct{}, 1) - - // Prepare the queue and fetch block parts until the block header fetcher's done - finished := false - for { - select { - case <-d.cancelCh: - return errCanceled - - case packet := <-deliveryCh: - deliveryTime := time.Now() - // If the peer was previously banned and failed to deliver its pack - // in a reasonable time frame, ignore its message. - if peer := d.peers.Peer(packet.PeerId()); peer != nil { - // Deliver the received chunk of data and check chain validity - accepted, err := deliver(packet) - if errors.Is(err, errInvalidChain) { - return err - } - // Unless a peer delivered something completely else than requested (usually - // caused by a timed out request which came through in the end), set it to - // idle. If the delivery's stale, the peer should have already been idled. - if !errors.Is(err, errStaleDelivery) { - setIdle(peer, accepted, deliveryTime) - } - // Issue a log to the user to see what's going on - switch { - case err == nil && packet.Items() == 0: - peer.log.Trace("Requested data not delivered", "type", kind) - case err == nil: - peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) - default: - peer.log.Debug("Failed to deliver retrieved data", "type", kind, "err", err) - } - } - // Blocks assembled, try to update the progress - select { - case update <- struct{}{}: - default: - } - - case cont := <-wakeCh: - // The header fetcher sent a continuation flag, check if it's done - if !cont { - finished = true - } - // Headers arrive, try to update the progress - select { - case update <- struct{}{}: - default: - } - - case <-ticker.C: - // Sanity check update the progress - select { - case update <- struct{}{}: - default: - } - - case <-update: - // Short circuit if we lost all our peers - if d.peers.Len() == 0 { - return errNoPeers - } - // Check for fetch request timeouts and demote the responsible peers - for pid, fails := range expire() { - if peer := d.peers.Peer(pid); peer != nil { - // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps - // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times - // out that sync wise we need to get rid of the peer. - // - // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth - // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing - // how response times reacts, to it always requests one more than the minimum (i.e. min 2). - if fails > 2 { - peer.log.Trace("Data delivery timed out", "type", kind) - setIdle(peer, 0, time.Now()) - } else { - peer.log.Debug("Stalling delivery, dropping", "type", kind) - - if d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid) - } else { - d.dropPeer(pid) - - // If this peer was the master peer, abort sync immediately - d.cancelLock.RLock() - master := pid == d.cancelPeer - d.cancelLock.RUnlock() - - if master { - d.cancel() - return errTimeout - } - } - } - } - } - // If there's nothing more to fetch, wait or terminate - if pending() == 0 { - if !inFlight() && finished { - log.Debug("Data fetching completed", "type", kind) - return nil - } - break - } - // Send a download request to all idle peers, until throttled - progressed, throttled, running := false, false, inFlight() - idles, total := idle() - pendCount := pending() - for _, peer := range idles { - // Short circuit if throttling activated - if throttled { - break - } - // Short circuit if there is no more available task. - if pendCount = pending(); pendCount == 0 { - break - } - // Reserve a chunk of fetches for a peer. A nil can mean either that - // no more headers are available, or that the peer is known not to - // have them. - request, progress, throttle := reserve(peer, capacity(peer)) - if progress { - progressed = true - } - if throttle { - throttled = true - throttleCounter.Inc(1) - } - if request == nil { - continue - } - if request.From > 0 { - peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) - } else { - peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) - } - // Fetch the chunk and make sure any errors return the hashes to the queue - if fetchHook != nil { - fetchHook(request.Headers) - } - if err := fetch(peer, request); err != nil { - // Although we could try and make an attempt to fix this, this error really - // means that we've double allocated a fetch task to a peer. If that is the - // case, the internal state of the downloader and the queue is very wrong so - // better hard crash and note the error instead of silently accumulating into - // a much bigger issue. - panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) - } - running = true - } - // Make sure that we have peers available for fetching. If all peers have been tried - // and all failed throw an error - if !progressed && !throttled && !running && len(idles) == total && pendCount > 0 { - return errPeersUnavailable - } - } - } -} - // processHeaders takes batches of retrieved headers from an input channel and // keeps processing and scheduling them into the header chain and downloader's // queue until the stream ends or a failure occurs. @@ -1535,7 +1188,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { } log.Warn("Rolled back chain segment", "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), - "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), + "snap", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) } }() @@ -1548,11 +1201,11 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { rollbackErr = errCanceled return errCanceled - case headers := <-d.headerProcCh: + case task := <-d.headerProcCh: // Terminate header processing if we synced up - if len(headers) == 0 { + if task == nil || len(task.headers) == 0 { // Notify everyone that headers are fully processed - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { select { case ch <- false: case <-d.cancelCh: @@ -1576,14 +1229,14 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { return errStallingPeer } } - // If fast or light syncing, ensure promised headers are indeed delivered. This is + // If snap or light syncing, ensure promised headers are indeed delivered. This is // needed to detect scenarios where an attacker feeds a bad pivot and then bails out // of delivering the post-pivot blocks that would flag the invalid content. // // This check cannot be executed "as is" for full imports, since blocks may still be // queued for processing when the header download completes. However, as long as the // peer gave us something useful, we're already happy/progressed (above check). - if mode == FastSync || mode == LightSync { + if mode == SnapSync || mode == LightSync { head := d.lightchain.CurrentHeader() if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { return errStallingPeer @@ -1594,6 +1247,8 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { return nil } // Otherwise split the chunk of headers into batches and process them + headers, hashes := task.headers, task.hashes + gotHeaders = true for len(headers) > 0 { // Terminate if something failed in between processing chunks @@ -1608,10 +1263,11 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { if limit > len(headers) { limit = len(headers) } - chunk := headers[:limit] + chunkHeaders := headers[:limit] + chunkHashes := hashes[:limit] // In case of header only syncing, validate the chunk immediately - if mode == FastSync || mode == LightSync { + if mode == SnapSync || mode == LightSync { // If we're importing pure headers, verify based on their recentness var pivot uint64 @@ -1622,22 +1278,22 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { d.pivotLock.RUnlock() frequency := fsHeaderCheckFrequency - if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { + if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { frequency = 1 } - if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { + if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil { rollbackErr = err // If some headers were inserted, track them as uncertain - if (mode == FastSync || frequency > 1) && n > 0 && rollback == 0 { - rollback = chunk[0].Number.Uint64() + if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 { + rollback = chunkHeaders[0].Number.Uint64() } - log.Warn("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err) + log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) return fmt.Errorf("%w: %v", errInvalidChain, err) } // All verifications passed, track all headers within the alloted limits - if mode == FastSync { - head := chunk[len(chunk)-1].Number.Uint64() + if mode == SnapSync { + head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() if head-rollback > uint64(fsHeaderSafetyNet) { rollback = head - uint64(fsHeaderSafetyNet) } else { @@ -1646,9 +1302,9 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { } } // Unless we're doing light chains, schedule the headers for associated content retrieval - if mode == FullSync || mode == FastSync { + if mode == FullSync || mode == SnapSync { // If we've reached the allowed number of pending headers, stall a bit - for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { + for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { select { case <-d.cancelCh: rollbackErr = errCanceled @@ -1657,13 +1313,14 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { } } // Otherwise insert the headers for content retrieval - inserts := d.queue.Schedule(chunk, origin) - if len(inserts) != len(chunk) { - rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunk)) + inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) + if len(inserts) != len(chunkHeaders) { + rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders)) return fmt.Errorf("%w: stale headers", errBadPeer) } } headers = headers[limit:] + hashes = hashes[limit:] origin += uint64(limit) } // Update the highest block number we know if a higher one is found. @@ -1674,7 +1331,7 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { d.syncStatsLock.Unlock() // Signal the content downloaders of the availablility of new tasks - for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { + for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { select { case ch <- true: default: @@ -1720,6 +1377,9 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { for i, result := range results { blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) } + // Downloaded blocks are always regarded as trusted after the + // transition. Because the downloaded chain is guided by the + // consensus-layer. if index, err := d.blockchain.InsertChain(blocks); err != nil { if index < len(results) { log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) @@ -1735,9 +1395,9 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { return nil } -// processFastSyncContent takes fetch results from the queue and writes them to the +// processSnapSyncContent takes fetch results from the queue and writes them to the // database. It also controls the synchronisation of state nodes of the pivot block. -func (d *Downloader) processFastSyncContent() error { +func (d *Downloader) processSnapSyncContent() error { // Start syncing state of the reported head block. This should get us most of // the state of the pivot block. d.pivotLock.RLock() @@ -1800,7 +1460,7 @@ func (d *Downloader) processFastSyncContent() error { } else { results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) } - // Split around the pivot block and process the two sides via fast/full sync + // Split around the pivot block and process the two sides via snap/full sync if atomic.LoadInt32(&d.committed) == 0 { latest := results[len(results)-1].Header // If the height is above the pivot block by 2 sets, it means the pivot @@ -1819,12 +1479,12 @@ func (d *Downloader) processFastSyncContent() error { d.pivotLock.Unlock() // Write out the pivot into the database so a rollback beyond it will - // reenable fast sync + // reenable snap sync rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64()) } } P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results) - if err := d.commitFastSyncData(beforeP, sync); err != nil { + if err := d.commitSnapSyncData(beforeP, sync); err != nil { return err } if P != nil { @@ -1882,7 +1542,7 @@ func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, bef return p, before, after } -func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { +func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *stateSync) error { // Check for any early termination requests if len(results) == 0 { return nil @@ -1898,7 +1558,7 @@ func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *state } // Retrieve the a batch of results to import first, last := results[0].Header, results[len(results)-1].Header - log.Debug("Inserting fast-sync blocks", "items", len(results), + log.Debug("Inserting snap-sync blocks", "items", len(results), "firstnum", first.Number, "firsthash", first.Hash(), "lastnumn", last.Number, "lasthash", last.Hash(), ) @@ -1917,49 +1577,19 @@ func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *state func (d *Downloader) commitPivotBlock(result *fetchResult) error { block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) - log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) + log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash()) // Commit the pivot block as the new head, will require full sync from here on if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil { return err } - if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { + if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil { return err } atomic.StoreInt32(&d.committed, 1) - - // If we had a bloom filter for the state sync, deallocate it now. Note, we only - // deallocate internally, but keep the empty wrapper. This ensures that if we do - // a rollback after committing the pivot and restarting fast sync, we don't end - // up using a nil bloom. Empty bloom is fine, it just returns that it does not - // have the info we need, so reach down to the database instead. - if d.stateBloom != nil { - d.stateBloom.Close() - } return nil } -// DeliverHeaders injects a new batch of block headers received from a remote -// node into the download schedule. -func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error { - return d.deliver(d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) -} - -// DeliverBodies injects a new batch of block bodies received from a remote node. -func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) error { - return d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) -} - -// DeliverReceipts injects a new batch of receipts received from a remote node. -func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) error { - return d.deliver(d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) -} - -// DeliverNodeData injects a new batch of node state data received from a remote node. -func (d *Downloader) DeliverNodeData(id string, data [][]byte) error { - return d.deliver(d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) -} - // DeliverSnapPacket is invoked from a peer's message handler when it transmits a // data packet for the local node to consume. func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error { @@ -1985,27 +1615,3 @@ func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) erro return fmt.Errorf("unexpected snap packet type: %T", packet) } } - -// deliver injects a new batch of data received from a remote node. -func (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { - // Update the delivery metrics for both good and failed deliveries - inMeter.Mark(int64(packet.Items())) - defer func() { - if err != nil { - dropMeter.Mark(int64(packet.Items())) - } - }() - // Deliver or abort if the sync is canceled while queuing - d.cancelLock.RLock() - cancel := d.cancelCh - d.cancelLock.RUnlock() - if cancel == nil { - return errNoSyncActive - } - select { - case destCh <- packet: - return nil - case <-cancel: - return errNoSyncActive - } -} diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 17cd3630c..70c6a5121 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -19,7 +19,9 @@ package downloader import ( "errors" "fmt" + "io/ioutil" "math/big" + "os" "strings" "sync" "sync/atomic" @@ -28,68 +30,52 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) -// Reduce some of the parameters to make the tester faster. -func init() { - fullMaxForkAncestry = 10000 - lightMaxForkAncestry = 10000 - blockCacheMaxItems = 1024 - fsHeaderContCheck = 500 * time.Millisecond -} - // downloadTester is a test simulator for mocking out local block chain. type downloadTester struct { + freezer string + chain *core.BlockChain downloader *Downloader - genesis *types.Block // Genesis blocks used by the tester and peers - stateDb ethdb.Database // Database used by the tester for syncing from peers - peerDb ethdb.Database // Database of the peers containing all data - peers map[string]*downloadTesterPeer - - ownHashes []common.Hash // Hash chain belonging to the tester - ownHeaders map[common.Hash]*types.Header // Headers belonging to the tester - ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester - ownReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester - ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain - - ancientHeaders map[common.Hash]*types.Header // Ancient headers belonging to the tester - ancientBlocks map[common.Hash]*types.Block // Ancient blocks belonging to the tester - ancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester - ancientChainTd map[common.Hash]*big.Int // Ancient total difficulties of the blocks in the local chain - - lock sync.RWMutex + peers map[string]*downloadTesterPeer + lock sync.RWMutex } // newTester creates a new downloader test mocker. func newTester() *downloadTester { - tester := &downloadTester{ - genesis: testGenesis, - peerDb: testDB, - peers: make(map[string]*downloadTesterPeer), - ownHashes: []common.Hash{testGenesis.Hash()}, - ownHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, - ownBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, - ownReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, - ownChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, - - // Initialize ancient store with test genesis block - ancientHeaders: map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()}, - ancientBlocks: map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis}, - ancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil}, - ancientChainTd: map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()}, + freezer, err := ioutil.TempDir("", "") + if err != nil { + panic(err) } - tester.stateDb = rawdb.NewMemoryDatabase() - tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) + if err != nil { + panic(err) + } + core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000)) - tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer) + chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) + if err != nil { + panic(err) + } + tester := &downloadTester{ + freezer: freezer, + chain: chain, + peers: make(map[string]*downloadTesterPeer), + } + tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer) return tester } @@ -97,20 +83,20 @@ func newTester() *downloadTester { // held resources. func (dl *downloadTester) terminate() { dl.downloader.Terminate() + dl.chain.Stop() + + os.RemoveAll(dl.freezer) } // sync starts synchronizing with a remote peer, blocking until it completes. func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { - dl.lock.RLock() - hash := dl.peers[id].chain.headBlock().Hash() - // If no particular TD was requested, load from the peer's blockchain + head := dl.peers[id].chain.CurrentBlock() if td == nil { - td = dl.peers[id].chain.td(hash) + // If no particular TD was requested, load from the peer's blockchain + td = dl.peers[id].chain.GetTd(head.Hash(), head.NumberU64()) } - dl.lock.RUnlock() - // Synchronise with the chosen peer and ensure proper cleanup afterwards - err := dl.downloader.synchronise(id, hash, td, mode) + err := dl.downloader.synchronise(id, head.Hash(), td, mode) select { case <-dl.downloader.cancelCh: // Ok, downloader fully cancelled after sync cycle @@ -121,284 +107,26 @@ func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { return err } -// HasHeader checks if a header is present in the testers canonical chain. -func (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool { - return dl.GetHeaderByHash(hash) != nil -} - -// HasBlock checks if a block is present in the testers canonical chain. -func (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool { - return dl.GetBlockByHash(hash) != nil -} - -// HasFastBlock checks if a block is present in the testers canonical chain. -func (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool { - dl.lock.RLock() - defer dl.lock.RUnlock() - - if _, ok := dl.ancientReceipts[hash]; ok { - return true - } - _, ok := dl.ownReceipts[hash] - return ok -} - -// GetHeader retrieves a header from the testers canonical chain. -func (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header { - dl.lock.RLock() - defer dl.lock.RUnlock() - return dl.getHeaderByHash(hash) -} - -// getHeaderByHash returns the header if found either within ancients or own blocks) -// This method assumes that the caller holds at least the read-lock (dl.lock) -func (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header { - header := dl.ancientHeaders[hash] - if header != nil { - return header - } - return dl.ownHeaders[hash] -} - -// GetBlock retrieves a block from the testers canonical chain. -func (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() - - block := dl.ancientBlocks[hash] - if block != nil { - return block - } - return dl.ownBlocks[hash] -} - -// CurrentHeader retrieves the current head header from the canonical chain. -func (dl *downloadTester) CurrentHeader() *types.Header { - dl.lock.RLock() - defer dl.lock.RUnlock() - - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil { - return header - } - if header := dl.ownHeaders[dl.ownHashes[i]]; header != nil { - return header - } - } - return dl.genesis.Header() -} - -// CurrentBlock retrieves the current head block from the canonical chain. -func (dl *downloadTester) CurrentBlock() *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() - - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { - if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { - return block - } - return block - } - if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { - if _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil { - return block - } - } - } - return dl.genesis -} - -// CurrentFastBlock retrieves the current head fast-sync block from the canonical chain. -func (dl *downloadTester) CurrentFastBlock() *types.Block { - dl.lock.RLock() - defer dl.lock.RUnlock() - - for i := len(dl.ownHashes) - 1; i >= 0; i-- { - if block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil { - return block - } - if block := dl.ownBlocks[dl.ownHashes[i]]; block != nil { - return block - } - } - return dl.genesis -} - -// FastSyncCommitHead manually sets the head block to a given hash. -func (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error { - // For now only check that the state trie is correct - if block := dl.GetBlockByHash(hash); block != nil { - _, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb)) - return err - } - return fmt.Errorf("non existent block: %x", hash[:4]) -} - -// GetTd retrieves the block's total difficulty from the canonical chain. -func (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int { - dl.lock.RLock() - defer dl.lock.RUnlock() - - return dl.getTd(hash) -} - -// getTd retrieves the block's total difficulty if found either within -// ancients or own blocks). -// This method assumes that the caller holds at least the read-lock (dl.lock) -func (dl *downloadTester) getTd(hash common.Hash) *big.Int { - if td := dl.ancientChainTd[hash]; td != nil { - return td - } - return dl.ownChainTd[hash] -} - -// InsertHeaderChain injects a new batch of headers into the simulated chain. -func (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) { - dl.lock.Lock() - defer dl.lock.Unlock() - // Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors - if dl.getHeaderByHash(headers[0].ParentHash) == nil { - return 0, fmt.Errorf("InsertHeaderChain: unknown parent at first position, parent of number %d", headers[0].Number) - } - var hashes []common.Hash - for i := 1; i < len(headers); i++ { - hash := headers[i-1].Hash() - if headers[i].ParentHash != headers[i-1].Hash() { - return i, fmt.Errorf("non-contiguous import at position %d", i) - } - hashes = append(hashes, hash) - } - hashes = append(hashes, headers[len(headers)-1].Hash()) - // Do a full insert if pre-checks passed - for i, header := range headers { - hash := hashes[i] - if dl.getHeaderByHash(hash) != nil { - continue - } - if dl.getHeaderByHash(header.ParentHash) == nil { - // This _should_ be impossible, due to precheck and induction - return i, fmt.Errorf("InsertHeaderChain: unknown parent at position %d", i) - } - dl.ownHashes = append(dl.ownHashes, hash) - dl.ownHeaders[hash] = header - - td := dl.getTd(header.ParentHash) - dl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty) - } - return len(headers), nil -} - -// InsertChain injects a new batch of blocks into the simulated chain. -func (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) { - dl.lock.Lock() - defer dl.lock.Unlock() - for i, block := range blocks { - if parent, ok := dl.ownBlocks[block.ParentHash()]; !ok { - return i, fmt.Errorf("InsertChain: unknown parent at position %d / %d", i, len(blocks)) - } else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil { - return i, fmt.Errorf("InsertChain: unknown parent state %x: %v", parent.Root(), err) - } - if hdr := dl.getHeaderByHash(block.Hash()); hdr == nil { - dl.ownHashes = append(dl.ownHashes, block.Hash()) - dl.ownHeaders[block.Hash()] = block.Header() - } - dl.ownBlocks[block.Hash()] = block - dl.ownReceipts[block.Hash()] = make(types.Receipts, 0) - dl.stateDb.Put(block.Root().Bytes(), []byte{0x00}) - td := dl.getTd(block.ParentHash()) - dl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty()) - } - return len(blocks), nil -} - -// InsertReceiptChain injects a new batch of receipts into the simulated chain. -func (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) { - dl.lock.Lock() - defer dl.lock.Unlock() - - for i := 0; i < len(blocks) && i < len(receipts); i++ { - if _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok { - return i, errors.New("unknown owner") - } - if _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok { - if _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok { - return i, errors.New("InsertReceiptChain: unknown parent") - } - } - if blocks[i].NumberU64() <= ancientLimit { - dl.ancientBlocks[blocks[i].Hash()] = blocks[i] - dl.ancientReceipts[blocks[i].Hash()] = receipts[i] - - // Migrate from active db to ancient db - dl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header() - dl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty()) - delete(dl.ownHeaders, blocks[i].Hash()) - delete(dl.ownChainTd, blocks[i].Hash()) - } else { - dl.ownBlocks[blocks[i].Hash()] = blocks[i] - dl.ownReceipts[blocks[i].Hash()] = receipts[i] - } - } - return len(blocks), nil -} - -// SetHead rewinds the local chain to a new head. -func (dl *downloadTester) SetHead(head uint64) error { - dl.lock.Lock() - defer dl.lock.Unlock() - - // Find the hash of the head to reset to - var hash common.Hash - for h, header := range dl.ownHeaders { - if header.Number.Uint64() == head { - hash = h - } - } - for h, header := range dl.ancientHeaders { - if header.Number.Uint64() == head { - hash = h - } - } - if hash == (common.Hash{}) { - return fmt.Errorf("unknown head to set: %d", head) - } - // Find the offset in the header chain - var offset int - for o, h := range dl.ownHashes { - if h == hash { - offset = o - break - } - } - // Remove all the hashes and associated data afterwards - for i := offset + 1; i < len(dl.ownHashes); i++ { - delete(dl.ownChainTd, dl.ownHashes[i]) - delete(dl.ownHeaders, dl.ownHashes[i]) - delete(dl.ownReceipts, dl.ownHashes[i]) - delete(dl.ownBlocks, dl.ownHashes[i]) - - delete(dl.ancientChainTd, dl.ownHashes[i]) - delete(dl.ancientHeaders, dl.ownHashes[i]) - delete(dl.ancientReceipts, dl.ownHashes[i]) - delete(dl.ancientBlocks, dl.ownHashes[i]) - } - dl.ownHashes = dl.ownHashes[:offset+1] - return nil -} - -// Rollback removes some recently added elements from the chain. -func (dl *downloadTester) Rollback(hashes []common.Hash) { -} - // newPeer registers a new block download source into the downloader. -func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error { +func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer { dl.lock.Lock() defer dl.lock.Unlock() - peer := &downloadTesterPeer{dl: dl, id: id, chain: chain} + peer := &downloadTesterPeer{ + dl: dl, + id: id, + chain: newTestBlockchain(blocks), + withholdHeaders: make(map[common.Hash]struct{}), + } dl.peers[id] = peer - return dl.downloader.RegisterPeer(id, version, peer) + + if err := dl.downloader.RegisterPeer(id, version, peer); err != nil { + panic(err) + } + if err := dl.downloader.SnapSyncer.Register(peer); err != nil { + panic(err) + } + return peer } // dropPeer simulates a hard peer removal from the connection pool. @@ -407,154 +135,332 @@ func (dl *downloadTester) dropPeer(id string) { defer dl.lock.Unlock() delete(dl.peers, id) + dl.downloader.SnapSyncer.Unregister(id) dl.downloader.UnregisterPeer(id) } -// Snapshots implements the BlockChain interface for the downloader, but is a noop. -func (dl *downloadTester) Snapshots() *snapshot.Tree { - return nil -} - type downloadTesterPeer struct { - dl *downloadTester - id string - chain *testChain - missingStates map[common.Hash]bool // State entries that fast sync should not return + dl *downloadTester + id string + chain *core.BlockChain + + withholdHeaders map[common.Hash]struct{} } // Head constructs a function to retrieve a peer's current head hash // and total difficulty. func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { - b := dlp.chain.headBlock() - return b.Hash(), dlp.chain.td(b.Hash()) + head := dlp.chain.CurrentBlock() + return head.Hash(), dlp.chain.GetTd(head.Hash(), head.NumberU64()) +} + +func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { + var headers = make([]*types.Header, len(rlpdata)) + for i, data := range rlpdata { + var h types.Header + if err := rlp.DecodeBytes(data, &h); err != nil { + panic(err) + } + headers[i] = &h + } + return headers } // RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed // origin; associated with a particular peer in the download tester. The returned // function can be used to retrieve batches of headers from the particular peer. -func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { - result := dlp.chain.headersByHash(origin, amount, skip, reverse) - go dlp.dl.downloader.DeliverHeaders(dlp.id, result) - return nil +func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { + // Service the header query via the live handler code + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: origin, + }, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + }, nil) + headers := unmarshalRlpHeaders(rlpHeaders) + // If a malicious peer is simulated withholding headers, delete them + for hash := range dlp.withholdHeaders { + for i, header := range headers { + if header.Hash() == hash { + headers = append(headers[:i], headers[i+1:]...) + break + } + } + } + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { + hashes[i] = header.Hash() + } + // Deliver the headers to the downloader + req := ð.Request{ + Peer: dlp.id, + } + res := ð.Response{ + Req: req, + Res: (*eth.BlockHeadersPacket)(&headers), + Meta: hashes, + Time: 1, + Done: make(chan error, 1), // Ignore the returned status + } + go func() { + sink <- res + }() + return req, nil } // RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered // origin; associated with a particular peer in the download tester. The returned // function can be used to retrieve batches of headers from the particular peer. -func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { - result := dlp.chain.headersByNumber(origin, amount, skip, reverse) - go dlp.dl.downloader.DeliverHeaders(dlp.id, result) - return nil +func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { + // Service the header query via the live handler code + rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: origin, + }, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + }, nil) + headers := unmarshalRlpHeaders(rlpHeaders) + // If a malicious peer is simulated withholding headers, delete them + for hash := range dlp.withholdHeaders { + for i, header := range headers { + if header.Hash() == hash { + headers = append(headers[:i], headers[i+1:]...) + break + } + } + } + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { + hashes[i] = header.Hash() + } + // Deliver the headers to the downloader + req := ð.Request{ + Peer: dlp.id, + } + res := ð.Response{ + Req: req, + Res: (*eth.BlockHeadersPacket)(&headers), + Meta: hashes, + Time: 1, + Done: make(chan error, 1), // Ignore the returned status + } + go func() { + sink <- res + }() + return req, nil } // RequestBodies constructs a getBlockBodies method associated with a particular // peer in the download tester. The returned function can be used to retrieve // batches of block bodies from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error { - txs, uncles := dlp.chain.bodies(hashes) - go dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles) - return nil +func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { + blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes) + + bodies := make([]*eth.BlockBody, len(blobs)) + for i, blob := range blobs { + bodies[i] = new(eth.BlockBody) + rlp.DecodeBytes(blob, bodies[i]) + } + var ( + txsHashes = make([]common.Hash, len(bodies)) + uncleHashes = make([]common.Hash, len(bodies)) + ) + hasher := trie.NewStackTrie(nil) + for i, body := range bodies { + txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) + uncleHashes[i] = types.CalcUncleHash(body.Uncles) + } + req := ð.Request{ + Peer: dlp.id, + } + res := ð.Response{ + Req: req, + Res: (*eth.BlockBodiesPacket)(&bodies), + Meta: [][]common.Hash{txsHashes, uncleHashes}, + Time: 1, + Done: make(chan error, 1), // Ignore the returned status + } + go func() { + sink <- res + }() + return req, nil } // RequestReceipts constructs a getReceipts method associated with a particular // peer in the download tester. The returned function can be used to retrieve // batches of block receipts from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error { - receipts := dlp.chain.receipts(hashes) - go dlp.dl.downloader.DeliverReceipts(dlp.id, receipts) +func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { + blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes) + + receipts := make([][]*types.Receipt, len(blobs)) + for i, blob := range blobs { + rlp.DecodeBytes(blob, &receipts[i]) + } + hasher := trie.NewStackTrie(nil) + hashes = make([]common.Hash, len(receipts)) + for i, receipt := range receipts { + hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) + } + req := ð.Request{ + Peer: dlp.id, + } + res := ð.Response{ + Req: req, + Res: (*eth.ReceiptsPacket)(&receipts), + Meta: hashes, + Time: 1, + Done: make(chan error, 1), // Ignore the returned status + } + go func() { + sink <- res + }() + return req, nil +} + +// ID retrieves the peer's unique identifier. +func (dlp *downloadTesterPeer) ID() string { + return dlp.id +} + +// RequestAccountRange fetches a batch of accounts rooted in a specific account +// trie, starting with the origin. +func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { + // Create the request and service it + req := &snap.GetAccountRangePacket{ + ID: id, + Root: root, + Origin: origin, + Limit: limit, + Bytes: bytes, + } + slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req) + + // We need to convert to non-slim format, delegate to the packet code + res := &snap.AccountRangePacket{ + ID: id, + Accounts: slimaccs, + Proof: proofs, + } + hashes, accounts, _ := res.Unpack() + + go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs) return nil } -// RequestNodeData constructs a getNodeData method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of node state data from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error { - dlp.dl.lock.RLock() - defer dlp.dl.lock.RUnlock() - - results := make([][]byte, 0, len(hashes)) - for _, hash := range hashes { - if data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil { - if !dlp.missingStates[hash] { - results = append(results, data) - } - } +// RequestStorageRanges fetches a batch of storage slots belonging to one or +// more accounts. If slots from only one accout is requested, an origin marker +// may also be used to retrieve from there. +func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { + // Create the request and service it + req := &snap.GetStorageRangesPacket{ + ID: id, + Accounts: accounts, + Root: root, + Origin: origin, + Limit: limit, + Bytes: bytes, } - go dlp.dl.downloader.DeliverNodeData(dlp.id, results) + storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req) + + // We need to convert to demultiplex, delegate to the packet code + res := &snap.StorageRangesPacket{ + ID: id, + Slots: storage, + Proof: proofs, + } + hashes, slots := res.Unpack() + + go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs) return nil } +// RequestByteCodes fetches a batch of bytecodes by hash. +func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { + req := &snap.GetByteCodesPacket{ + ID: id, + Hashes: hashes, + Bytes: bytes, + } + codes := snap.ServiceGetByteCodesQuery(dlp.chain, req) + go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes) + return nil +} + +// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in +// a specificstate trie. +func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error { + req := &snap.GetTrieNodesPacket{ + ID: id, + Root: root, + Paths: paths, + Bytes: bytes, + } + nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now()) + go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes) + return nil +} + +// Log retrieves the peer's own contextual logger. +func (dlp *downloadTesterPeer) Log() log.Logger { + return log.New("peer", dlp.id) +} + // assertOwnChain checks if the local chain contains the correct number of items // of the various chain components. func assertOwnChain(t *testing.T, tester *downloadTester, length int) { // Mark this method as a helper to report errors at callsite, not in here t.Helper() - assertOwnForkedChain(t, tester, 1, []int{length}) -} - -// assertOwnForkedChain checks if the local forked chain contains the correct -// number of items of the various chain components. -func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) { - // Mark this method as a helper to report errors at callsite, not in here - t.Helper() - - // Initialize the counters for the first fork - headers, blocks, receipts := lengths[0], lengths[0], lengths[0] - - // Update the counters for each subsequent fork - for _, length := range lengths[1:] { - headers += length - common - blocks += length - common - receipts += length - common - } + headers, blocks, receipts := length, length, length if tester.downloader.getMode() == LightSync { blocks, receipts = 1, 1 } - if hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers { + if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers { t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) } - if bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks { + if bs := int(tester.chain.CurrentBlock().NumberU64()) + 1; bs != blocks { t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) } - if rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts { + if rs := int(tester.chain.CurrentFastBlock().NumberU64()) + 1; rs != receipts { t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) } } func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, eth.ETH66, FullSync) } -func TestCanonicalSynchronisation66Fast(t *testing.T) { testCanonSync(t, eth.ETH66, FastSync) } +func TestCanonicalSynchronisation66Snap(t *testing.T) { testCanonSync(t, eth.ETH66, SnapSync) } func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, eth.ETH66, LightSync) } func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() // Create a small enough block chain to download chain := testChainBase.shorten(blockCacheMaxItems - 15) - tester.newPeer("peer", protocol, chain) + tester.newPeer("peer", protocol, chain.blocks[1:]) // Synchronise with the peer and make sure all relevant data was retrieved if err := tester.sync("peer", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) } // Tests that if a large batch of blocks are being downloaded, it is throttled // until the cached blocks are retrieved. func TestThrottling66Full(t *testing.T) { testThrottling(t, eth.ETH66, FullSync) } -func TestThrottling66Fast(t *testing.T) { testThrottling(t, eth.ETH66, FastSync) } +func TestThrottling66Snap(t *testing.T) { testThrottling(t, eth.ETH66, SnapSync) } func testThrottling(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() tester := newTester() + defer tester.terminate() // Create a long block chain to download and the tester - targetBlocks := testChainBase.len() - 1 - tester.newPeer("peer", protocol, testChainBase) + targetBlocks := len(testChainBase.blocks) - 1 + tester.newPeer("peer", protocol, testChainBase.blocks[1:]) // Wrap the importer to allow stepping blocked, proceed := uint32(0), make(chan struct{}) @@ -571,7 +477,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { for { // Check the retrieval count synchronously (! reason for this ugly block) tester.lock.RLock() - retrieved := len(tester.ownBlocks) + retrieved := int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 tester.lock.RUnlock() if retrieved >= targetBlocks+1 { break @@ -587,7 +493,7 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { { cached = tester.downloader.queue.resultCache.countCompleted() frozen = int(atomic.LoadUint32(&blocked)) - retrieved = len(tester.ownBlocks) + retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 } tester.downloader.queue.resultCache.lock.Unlock() tester.downloader.queue.lock.Unlock() @@ -603,12 +509,11 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { // Make sure we filled up the cache, then exhaust it time.Sleep(25 * time.Millisecond) // give it a chance to screw up tester.lock.RLock() - retrieved = len(tester.ownBlocks) + retrieved = int(tester.chain.CurrentFastBlock().Number().Uint64()) + 1 tester.lock.RUnlock() if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1) } - // Permit the blocked blocks to import if atomic.LoadUint32(&blocked) > 0 { atomic.StoreUint32(&blocked, uint32(0)) @@ -620,93 +525,85 @@ func testThrottling(t *testing.T, protocol uint, mode SyncMode) { if err := <-errc; err != nil { t.Fatalf("block synchronization failed: %v", err) } - tester.terminate() - } // Tests that simple synchronization against a forked chain works correctly. In // this test common ancestor lookup should *not* be short circuited, and a full // binary search should be executed. func TestForkedSync66Full(t *testing.T) { testForkedSync(t, eth.ETH66, FullSync) } -func TestForkedSync66Fast(t *testing.T) { testForkedSync(t, eth.ETH66, FastSync) } +func TestForkedSync66Snap(t *testing.T) { testForkedSync(t, eth.ETH66, SnapSync) } func TestForkedSync66Light(t *testing.T) { testForkedSync(t, eth.ETH66, LightSync) } func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() - chainA := testChainForkLightA.shorten(testChainBase.len() + 80) - chainB := testChainForkLightB.shorten(testChainBase.len() + 80) - tester.newPeer("fork A", protocol, chainA) - tester.newPeer("fork B", protocol, chainB) + chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) + chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81) + tester.newPeer("fork A", protocol, chainA.blocks[1:]) + tester.newPeer("fork B", protocol, chainB.blocks[1:]) // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("fork A", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chainA.len()) + assertOwnChain(t, tester, len(chainA.blocks)) // Synchronise with the second peer and make sure that fork is pulled too if err := tester.sync("fork B", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) + assertOwnChain(t, tester, len(chainB.blocks)) } // Tests that synchronising against a much shorter but much heavyer fork works // corrently and is not dropped. func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FullSync) } -func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, FastSync) } +func TestHeavyForkedSync66Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, SnapSync) } func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH66, LightSync) } func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() - chainA := testChainForkLightA.shorten(testChainBase.len() + 80) - chainB := testChainForkHeavy.shorten(testChainBase.len() + 80) - tester.newPeer("light", protocol, chainA) - tester.newPeer("heavy", protocol, chainB) + chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) + chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79) + tester.newPeer("light", protocol, chainA.blocks[1:]) + tester.newPeer("heavy", protocol, chainB.blocks[1:]) // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("light", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chainA.len()) + assertOwnChain(t, tester, len(chainA.blocks)) // Synchronise with the second peer and make sure that fork is pulled too if err := tester.sync("heavy", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()}) + assertOwnChain(t, tester, len(chainB.blocks)) } // Tests that chain forks are contained within a certain interval of the current // chain head, ensuring that malicious peers cannot waste resources by feeding // long dead chains. func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FullSync) } -func TestBoundedForkedSync66Fast(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, FastSync) } +func TestBoundedForkedSync66Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, SnapSync) } func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH66, LightSync) } func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() chainA := testChainForkLightA chainB := testChainForkLightB - tester.newPeer("original", protocol, chainA) - tester.newPeer("rewriter", protocol, chainB) + tester.newPeer("original", protocol, chainA.blocks[1:]) + tester.newPeer("rewriter", protocol, chainB.blocks[1:]) // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("original", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chainA.len()) + assertOwnChain(t, tester, len(chainA.blocks)) // Synchronise with the second peer and ensure that the fork is rejected to being too old if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { @@ -720,69 +617,46 @@ func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { func TestBoundedHeavyForkedSync66Full(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH66, FullSync) } -func TestBoundedHeavyForkedSync66Fast(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH66, FastSync) +func TestBoundedHeavyForkedSync66Snap(t *testing.T) { + testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) } func TestBoundedHeavyForkedSync66Light(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) } func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() tester := newTester() + defer tester.terminate() // Create a long enough forked chain chainA := testChainForkLightA chainB := testChainForkHeavy - tester.newPeer("original", protocol, chainA) + tester.newPeer("original", protocol, chainA.blocks[1:]) // Synchronise with the peer and make sure all blocks were retrieved if err := tester.sync("original", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chainA.len()) + assertOwnChain(t, tester, len(chainA.blocks)) - tester.newPeer("heavy-rewriter", protocol, chainB) + tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:]) // Synchronise with the second peer and ensure that the fork is rejected to being too old if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) } - tester.terminate() -} - -// Tests that an inactive downloader will not accept incoming block headers, -// bodies and receipts. -func TestInactiveDownloader63(t *testing.T) { - t.Parallel() - - tester := newTester() - defer tester.terminate() - - // Check that neither block headers nor bodies are accepted - if err := tester.downloader.DeliverHeaders("bad peer", []*types.Header{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } - if err := tester.downloader.DeliverBodies("bad peer", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } - if err := tester.downloader.DeliverReceipts("bad peer", [][]*types.Receipt{}); err != errNoSyncActive { - t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) - } } // Tests that a canceled download wipes all previously accumulated state. func TestCancel66Full(t *testing.T) { testCancel(t, eth.ETH66, FullSync) } -func TestCancel66Fast(t *testing.T) { testCancel(t, eth.ETH66, FastSync) } +func TestCancel66Snap(t *testing.T) { testCancel(t, eth.ETH66, SnapSync) } func TestCancel66Light(t *testing.T) { testCancel(t, eth.ETH66, LightSync) } func testCancel(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() chain := testChainBase.shorten(MaxHeaderFetch) - tester.newPeer("peer", protocol, chain) + tester.newPeer("peer", protocol, chain.blocks[1:]) // Make sure canceling works with a pristine downloader tester.downloader.Cancel() @@ -801,12 +675,10 @@ func testCancel(t *testing.T, protocol uint, mode SyncMode) { // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FullSync) } -func TestMultiSynchronisation66Fast(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, FastSync) } +func TestMultiSynchronisation66Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, SnapSync) } func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH66, LightSync) } func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() @@ -816,23 +688,21 @@ func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { for i := 0; i < targetPeers; i++ { id := fmt.Sprintf("peer #%d", i) - tester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1))) + tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:]) } if err := tester.sync("peer #0", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) } // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FullSync) } -func TestMultiProtoSynchronisation66Fast(t *testing.T) { testMultiProtoSync(t, eth.ETH66, FastSync) } +func TestMultiProtoSynchronisation66Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH66, SnapSync) } func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, eth.ETH66, LightSync) } func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() @@ -840,14 +710,14 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { chain := testChainBase.shorten(blockCacheMaxItems - 15) // Create peers of every type - tester.newPeer("peer 66", eth.ETH66, chain) - //tester.newPeer("peer 65", eth.ETH67, chain) + tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) + //tester.newPeer("peer 65", eth.ETH67, chain.blocks[1:) // Synchronise with the requested peer and make sure all blocks were retrieved if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) // Check that no peers have been dropped off for _, version := range []int{66} { @@ -861,18 +731,16 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FullSync) } -func TestEmptyShortCircuit66Fast(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, FastSync) } +func TestEmptyShortCircuit66Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, SnapSync) } func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH66, LightSync) } func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() // Create a block chain to download chain := testChainBase - tester.newPeer("peer", protocol, chain) + tester.newPeer("peer", protocol, chain.blocks[1:]) // Instrument the downloader to signal body requests bodiesHave, receiptsHave := int32(0), int32(0) @@ -886,17 +754,17 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { if err := tester.sync("peer", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) // Validate the number of block bodies that should have been requested bodiesNeeded, receiptsNeeded := 0, 0 - for _, block := range chain.blockm { - if mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { + for _, block := range chain.blocks[1:] { + if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { bodiesNeeded++ } } - for _, receipt := range chain.receiptm { - if mode == FastSync && len(receipt) > 0 { + for _, block := range chain.blocks[1:] { + if mode == SnapSync && len(block.Transactions()) > 0 { receiptsNeeded++ } } @@ -911,72 +779,64 @@ func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { // Tests that headers are enqueued continuously, preventing malicious nodes from // stalling the downloader by feeding gapped header chains. func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FullSync) } -func TestMissingHeaderAttack66Fast(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, FastSync) } +func TestMissingHeaderAttack66Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, SnapSync) } func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH66, LightSync) } func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() chain := testChainBase.shorten(blockCacheMaxItems - 15) - brokenChain := chain.shorten(chain.len()) - delete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2]) - tester.newPeer("attack", protocol, brokenChain) + + attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) + attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{} if err := tester.sync("attack", nil, mode); err == nil { t.Fatalf("succeeded attacker synchronisation") } // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain) + tester.newPeer("valid", protocol, chain.blocks[1:]) if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) } // Tests that if requested headers are shifted (i.e. first is missing), the queue // detects the invalid numbering. func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FullSync) } -func TestShiftedHeaderAttack66Fast(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, FastSync) } +func TestShiftedHeaderAttack66Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, SnapSync) } func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH66, LightSync) } func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() chain := testChainBase.shorten(blockCacheMaxItems - 15) // Attempt a full sync with an attacker feeding shifted headers - brokenChain := chain.shorten(chain.len()) - delete(brokenChain.headerm, brokenChain.chain[1]) - delete(brokenChain.blockm, brokenChain.chain[1]) - delete(brokenChain.receiptm, brokenChain.chain[1]) - tester.newPeer("attack", protocol, brokenChain) + attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) + attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{} + if err := tester.sync("attack", nil, mode); err == nil { t.Fatalf("succeeded attacker synchronisation") } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain) + tester.newPeer("valid", protocol, chain.blocks[1:]) if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) } // Tests that upon detecting an invalid header, the recent ones are rolled back // for various failure scenarios. Afterwards a full sync is attempted to make // sure no state was corrupted. -func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, FastSync) } +func TestInvalidHeaderRollback66Snap(t *testing.T) { testInvalidHeaderRollback(t, eth.ETH66, SnapSync) } func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() + defer tester.terminate() // Create a small enough block chain to download targetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks @@ -985,78 +845,67 @@ func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { // Attempt to sync with an attacker that feeds junk during the fast sync phase. // This should result in the last fsHeaderSafetyNet headers being rolled back. missing := fsHeaderSafetyNet + MaxHeaderFetch + 1 - fastAttackChain := chain.shorten(chain.len()) - delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) - tester.newPeer("fast-attack", protocol, fastAttackChain) + + fastAttacker := tester.newPeer("fast-attack", protocol, chain.blocks[1:]) + fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} if err := tester.sync("fast-attack", nil, mode); err == nil { t.Fatalf("succeeded fast attacker synchronisation") } - if head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { + if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch { t.Errorf("rollback head mismatch: have %v, want at most %v", head, MaxHeaderFetch) } - // Attempt to sync with an attacker that feeds junk during the block import phase. // This should result in both the last fsHeaderSafetyNet number of headers being // rolled back, and also the pivot point being reverted to a non-block status. missing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1 - blockAttackChain := chain.shorten(chain.len()) - delete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in - delete(blockAttackChain.headerm, blockAttackChain.chain[missing]) - tester.newPeer("block-attack", protocol, blockAttackChain) + + blockAttacker := tester.newPeer("block-attack", protocol, chain.blocks[1:]) + fastAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} // Make sure the fast-attacker doesn't fill in + blockAttacker.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} if err := tester.sync("block-attack", nil, mode); err == nil { t.Fatalf("succeeded block attacker synchronisation") } - if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { + if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) } - if mode == FastSync { - if head := tester.CurrentBlock().NumberU64(); head != 0 { + if mode == SnapSync { + if head := tester.chain.CurrentBlock().NumberU64(); head != 0 { t.Errorf("fast sync pivot block #%d not rolled back", head) } } - // Attempt to sync with an attacker that withholds promised blocks after the // fast sync pivot point. This could be a trial to leave the node with a bad // but already imported pivot block. - withholdAttackChain := chain.shorten(chain.len()) - tester.newPeer("withhold-attack", protocol, withholdAttackChain) + withholdAttacker := tester.newPeer("withhold-attack", protocol, chain.blocks[1:]) + tester.downloader.syncInitHook = func(uint64, uint64) { - for i := missing; i < withholdAttackChain.len(); i++ { - delete(withholdAttackChain.headerm, withholdAttackChain.chain[i]) + for i := missing; i < len(chain.blocks); i++ { + withholdAttacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} } tester.downloader.syncInitHook = nil } if err := tester.sync("withhold-attack", nil, mode); err == nil { t.Fatalf("succeeded withholding attacker synchronisation") } - if head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { + if head := tester.chain.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch { t.Errorf("rollback head mismatch: have %v, want at most %v", head, 2*fsHeaderSafetyNet+MaxHeaderFetch) } - if mode == FastSync { - if head := tester.CurrentBlock().NumberU64(); head != 0 { + if mode == SnapSync { + if head := tester.chain.CurrentBlock().NumberU64(); head != 0 { t.Errorf("fast sync pivot block #%d not rolled back", head) } } - - // synchronise with the valid peer and make sure sync succeeds. Since the last rollback + // Synchronise with the valid peer and make sure sync succeeds. Since the last rollback // should also disable fast syncing for this process, verify that we did a fresh full // sync. Note, we can't assert anything about the receipts since we won't purge the // database of them, hence we can't use assertOwnChain. - tester.newPeer("valid", protocol, chain) + tester.newPeer("valid", protocol, chain.blocks[1:]) if err := tester.sync("valid", nil, mode); err != nil { t.Fatalf("failed to synchronise blocks: %v", err) } - if hs := len(tester.ownHeaders); hs != chain.len() { - t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, chain.len()) - } - if mode != LightSync { - if bs := len(tester.ownBlocks); bs != chain.len() { - t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, chain.len()) - } - } - tester.terminate() + assertOwnChain(t, tester, len(chain.blocks)) } // Tests that a peer advertising a high TD doesn't get to stall the downloader @@ -1064,32 +913,28 @@ func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { func TestHighTDStarvationAttack66Full(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH66, FullSync) } -func TestHighTDStarvationAttack66Fast(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH66, FastSync) +func TestHighTDStarvationAttack66Snap(t *testing.T) { + testHighTDStarvationAttack(t, eth.ETH66, SnapSync) } func TestHighTDStarvationAttack66Light(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH66, LightSync) } func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() + defer tester.terminate() chain := testChainBase.shorten(1) - tester.newPeer("attack", protocol, chain) + tester.newPeer("attack", protocol, chain.blocks[1:]) if err := tester.sync("attack", big.NewInt(1000000), mode); err != errStallingPeer { t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) } - tester.terminate() } // Tests that misbehaving peers are disconnected, whilst behaving ones are not. func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH66) } func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { - t.Parallel() - // Define the disconnection requirement for individual hash fetch errors tests := []struct { result error @@ -1119,16 +964,14 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { for i, tt := range tests { // Register a new peer and ensure its presence id := fmt.Sprintf("test %d", i) - if err := tester.newPeer(id, protocol, chain); err != nil { - t.Fatalf("test %d: failed to register new peer: %v", i, err) - } + tester.newPeer(id, protocol, chain.blocks[1:]) if _, ok := tester.peers[id]; !ok { t.Fatalf("test %d: registered peer not found", i) } // Simulate a synchronisation and check the required result tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } - tester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync) + tester.downloader.Synchronise(id, tester.chain.Genesis().Hash(), big.NewInt(1000), FullSync) if _, ok := tester.peers[id]; !ok != tt.drop { t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) } @@ -1138,14 +981,13 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { // Tests that synchronisation progress (origin block number, current block number // and highest block number) is tracked and updated correctly. func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, eth.ETH66, FullSync) } -func TestSyncProgress66Fast(t *testing.T) { testSyncProgress(t, eth.ETH66, FastSync) } +func TestSyncProgress66Snap(t *testing.T) { testSyncProgress(t, eth.ETH66, SnapSync) } func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, eth.ETH66, LightSync) } func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() + chain := testChainBase.shorten(blockCacheMaxItems - 15) // Set a sync init hook to catch progress changes @@ -1159,7 +1001,7 @@ func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) // Synchronise half the blocks and check initial progress - tester.newPeer("peer-half", protocol, chain.shorten(chain.len()/2)) + tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:]) pending := new(sync.WaitGroup) pending.Add(1) @@ -1171,13 +1013,13 @@ func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { }() <-starting checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(chain.len()/2 - 1), + HighestBlock: uint64(len(chain.blocks)/2 - 1), }) progress <- struct{}{} pending.Wait() // Synchronise all the blocks and check continuation progress - tester.newPeer("peer-full", protocol, chain) + tester.newPeer("peer-full", protocol, chain.blocks[1:]) pending.Add(1) go func() { defer pending.Done() @@ -1187,18 +1029,18 @@ func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { }() <-starting checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - StartingBlock: uint64(chain.len()/2 - 1), - CurrentBlock: uint64(chain.len()/2 - 1), - HighestBlock: uint64(chain.len() - 1), + StartingBlock: uint64(len(chain.blocks)/2 - 1), + CurrentBlock: uint64(len(chain.blocks)/2 - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) // Check final progress after successful sync progress <- struct{}{} pending.Wait() checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(chain.len()/2 - 1), - CurrentBlock: uint64(chain.len() - 1), - HighestBlock: uint64(chain.len() - 1), + StartingBlock: uint64(len(chain.blocks)/2 - 1), + CurrentBlock: uint64(len(chain.blocks) - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) } @@ -1207,9 +1049,7 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync t.Helper() p := d.Progress() - p.KnownStates, p.PulledStates = 0, 0 - want.KnownStates, want.PulledStates = 0, 0 - if p != want { + if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock { t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) } } @@ -1218,16 +1058,15 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync // number) is tracked and updated correctly in case of a fork (or manual head // revertal). func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FullSync) } -func TestForkedSyncProgress66Fast(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, FastSync) } +func TestForkedSyncProgress66Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, SnapSync) } func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH66, LightSync) } func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() - chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch) - chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch) + + chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) + chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) // Set a sync init hook to catch progress changes starting := make(chan struct{}) @@ -1240,7 +1079,7 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, chainA) + tester.newPeer("fork A", protocol, chainA.blocks[1:]) pending := new(sync.WaitGroup) pending.Add(1) go func() { @@ -1252,7 +1091,7 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { <-starting checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(chainA.len() - 1), + HighestBlock: uint64(len(chainA.blocks) - 1), }) progress <- struct{}{} pending.Wait() @@ -1261,7 +1100,7 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight // Synchronise with the second fork and check progress resets - tester.newPeer("fork B", protocol, chainB) + tester.newPeer("fork B", protocol, chainB.blocks[1:]) pending.Add(1) go func() { defer pending.Done() @@ -1271,18 +1110,18 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { }() <-starting checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ - StartingBlock: uint64(testChainBase.len()) - 1, - CurrentBlock: uint64(chainA.len() - 1), - HighestBlock: uint64(chainB.len() - 1), + StartingBlock: uint64(len(testChainBase.blocks)) - 1, + CurrentBlock: uint64(len(chainA.blocks) - 1), + HighestBlock: uint64(len(chainB.blocks) - 1), }) // Check final progress after successful sync progress <- struct{}{} pending.Wait() checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(testChainBase.len()) - 1, - CurrentBlock: uint64(chainB.len() - 1), - HighestBlock: uint64(chainB.len() - 1), + StartingBlock: uint64(len(testChainBase.blocks)) - 1, + CurrentBlock: uint64(len(chainB.blocks) - 1), + HighestBlock: uint64(len(chainB.blocks) - 1), }) } @@ -1290,14 +1129,13 @@ func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // origin is not updated in the next sync cycle, as it should be considered the // continuation of the previous sync and not a new instance. func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FullSync) } -func TestFailedSyncProgress66Fast(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, FastSync) } +func TestFailedSyncProgress66Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, SnapSync) } func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH66, LightSync) } func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() + chain := testChainBase.shorten(blockCacheMaxItems - 15) // Set a sync init hook to catch progress changes @@ -1311,12 +1149,10 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) // Attempt a full sync with a faulty peer - brokenChain := chain.shorten(chain.len()) - missing := brokenChain.len() / 2 - delete(brokenChain.headerm, brokenChain.chain[missing]) - delete(brokenChain.blockm, brokenChain.chain[missing]) - delete(brokenChain.receiptm, brokenChain.chain[missing]) - tester.newPeer("faulty", protocol, brokenChain) + missing := len(chain.blocks)/2 - 1 + + faulter := tester.newPeer("faulty", protocol, chain.blocks[1:]) + faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} pending := new(sync.WaitGroup) pending.Add(1) @@ -1328,7 +1164,7 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { }() <-starting checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(brokenChain.len() - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) progress <- struct{}{} pending.Wait() @@ -1336,7 +1172,7 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Synchronise with a good peer and check that the progress origin remind the same // after a failure - tester.newPeer("valid", protocol, chain) + tester.newPeer("valid", protocol, chain.blocks[1:]) pending.Add(1) go func() { defer pending.Done() @@ -1351,22 +1187,21 @@ func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { progress <- struct{}{} pending.Wait() checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(chain.len() - 1), - HighestBlock: uint64(chain.len() - 1), + CurrentBlock: uint64(len(chain.blocks) - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) } // Tests that if an attacker fakes a chain height, after the attack is detected, // the progress height is successfully reduced at the next sync invocation. func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FullSync) } -func TestFakedSyncProgress66Fast(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, FastSync) } +func TestFakedSyncProgress66Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, SnapSync) } func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH66, LightSync) } func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - tester := newTester() defer tester.terminate() + chain := testChainBase.shorten(blockCacheMaxItems - 15) // Set a sync init hook to catch progress changes @@ -1379,13 +1214,11 @@ func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) // Create and sync with an attacker that promises a higher chain than available. - brokenChain := chain.shorten(chain.len()) + attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) numMissing := 5 - for i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- { - delete(brokenChain.headerm, brokenChain.chain[i]) + for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- { + attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} } - tester.newPeer("attack", protocol, brokenChain) - pending := new(sync.WaitGroup) pending.Add(1) go func() { @@ -1396,7 +1229,7 @@ func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { }() <-starting checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(brokenChain.len() - 1), + HighestBlock: uint64(len(chain.blocks) - 1), }) progress <- struct{}{} pending.Wait() @@ -1404,8 +1237,8 @@ func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { // Synchronise with a good peer and check that the progress height has been reduced to // the true value. - validChain := chain.shorten(chain.len() - numMissing) - tester.newPeer("valid", protocol, validChain) + validChain := chain.shorten(len(chain.blocks) - numMissing) + tester.newPeer("valid", protocol, validChain.blocks[1:]) pending.Add(1) go func() { @@ -1417,100 +1250,17 @@ func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { <-starting checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ CurrentBlock: afterFailedSync.CurrentBlock, - HighestBlock: uint64(validChain.len() - 1), + HighestBlock: uint64(len(validChain.blocks) - 1), }) - // Check final progress after successful sync. progress <- struct{}{} pending.Wait() checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(validChain.len() - 1), - HighestBlock: uint64(validChain.len() - 1), + CurrentBlock: uint64(len(validChain.blocks) - 1), + HighestBlock: uint64(len(validChain.blocks) - 1), }) } -// This test reproduces an issue where unexpected deliveries would -// block indefinitely if they arrived at the right time. -func TestDeliverHeadersHang66Full(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FullSync) } -func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, FastSync) } -func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, eth.ETH66, LightSync) } - -func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - - master := newTester() - defer master.terminate() - chain := testChainBase.shorten(15) - - for i := 0; i < 200; i++ { - tester := newTester() - tester.peerDb = master.peerDb - tester.newPeer("peer", protocol, chain) - - // Whenever the downloader requests headers, flood it with - // a lot of unrequested header deliveries. - tester.downloader.peers.peers["peer"].peer = &floodingTestPeer{ - peer: tester.downloader.peers.peers["peer"].peer, - tester: tester, - } - if err := tester.sync("peer", nil, mode); err != nil { - t.Errorf("test %d: sync failed: %v", i, err) - } - tester.terminate() - } -} - -type floodingTestPeer struct { - peer Peer - tester *downloadTester -} - -func (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() } -func (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error { - return ftp.peer.RequestHeadersByHash(hash, count, skip, reverse) -} -func (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error { - return ftp.peer.RequestBodies(hashes) -} -func (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error { - return ftp.peer.RequestReceipts(hashes) -} -func (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error { - return ftp.peer.RequestNodeData(hashes) -} - -func (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error { - deliveriesDone := make(chan struct{}, 500) - for i := 0; i < cap(deliveriesDone)-1; i++ { - peer := fmt.Sprintf("fake-peer%d", i) - go func() { - ftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}}) - deliveriesDone <- struct{}{} - }() - } - - // None of the extra deliveries should block. - timeout := time.After(60 * time.Second) - launched := false - for i := 0; i < cap(deliveriesDone); i++ { - select { - case <-deliveriesDone: - if !launched { - // Start delivering the requested headers - // after one of the flooding responses has arrived. - go func() { - ftp.peer.RequestHeadersByNumber(from, count, skip, reverse) - deliveriesDone <- struct{}{} - }() - launched = true - } - case <-timeout: - panic("blocked") - } - } - return nil -} - func TestRemoteHeaderRequestSpan(t *testing.T) { testCases := []struct { remoteHeight uint64 @@ -1589,14 +1339,12 @@ func TestRemoteHeaderRequestSpan(t *testing.T) { // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FullSync) } -func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, FastSync) } +func TestCheckpointEnforcement66Snap(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, SnapSync) } func TestCheckpointEnforcement66Light(t *testing.T) { testCheckpointEnforcement(t, eth.ETH66, LightSync) } func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) { - t.Parallel() - // Create a new tester with a particular hard coded checkpoint block tester := newTester() defer tester.terminate() @@ -1605,18 +1353,18 @@ func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) { chain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1) // Attempt to sync with the peer and validate the result - tester.newPeer("peer", protocol, chain) + tester.newPeer("peer", protocol, chain.blocks[1:]) var expect error - if mode == FastSync || mode == LightSync { + if mode == SnapSync || mode == LightSync { expect = errUnsyncedPeer } if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) { t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) } - if mode == FastSync || mode == LightSync { + if mode == SnapSync || mode == LightSync { assertOwnChain(t, tester, 1) } else { - assertOwnChain(t, tester, chain.len()) + assertOwnChain(t, tester, len(chain.blocks)) } } diff --git a/eth/downloader/fetchers.go b/eth/downloader/fetchers.go new file mode 100644 index 000000000..021e8c4f9 --- /dev/null +++ b/eth/downloader/fetchers.go @@ -0,0 +1,115 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols/eth" +) + +// fetchHeadersByHash is a blocking version of Peer.RequestHeadersByHash which +// handles all the cancellation, interruption and timeout mechanisms of a data +// retrieval to allow blocking API calls. +func (d *Downloader) fetchHeadersByHash(p *peerConnection, hash common.Hash, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) { + // Create the response sink and send the network request + start := time.Now() + resCh := make(chan *eth.Response) + + req, err := p.peer.RequestHeadersByHash(hash, amount, skip, reverse, resCh) + if err != nil { + return nil, nil, err + } + defer req.Close() + + // Wait until the response arrives, the request is cancelled or times out + ttl := d.peers.rates.TargetTimeout() + + timeoutTimer := time.NewTimer(ttl) + defer timeoutTimer.Stop() + + select { + case <-d.cancelCh: + return nil, nil, errCanceled + + case <-timeoutTimer.C: + // Header retrieval timed out, update the metrics + p.log.Debug("Header request timed out", "elapsed", ttl) + headerTimeoutMeter.Mark(1) + + return nil, nil, errTimeout + + case res := <-resCh: + // Headers successfully retrieved, update the metrics + headerReqTimer.Update(time.Since(start)) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) + + // Don't reject the packet even if it turns out to be bad, downloader will + // disconnect the peer on its own terms. Simply delivery the headers to + // be processed by the caller + res.Done <- nil + + return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil + } +} + +// fetchHeadersByNumber is a blocking version of Peer.RequestHeadersByNumber which +// handles all the cancellation, interruption and timeout mechanisms of a data +// retrieval to allow blocking API calls. +func (d *Downloader) fetchHeadersByNumber(p *peerConnection, number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) { + // Create the response sink and send the network request + start := time.Now() + resCh := make(chan *eth.Response) + + req, err := p.peer.RequestHeadersByNumber(number, amount, skip, reverse, resCh) + if err != nil { + return nil, nil, err + } + defer req.Close() + + // Wait until the response arrives, the request is cancelled or times out + ttl := d.peers.rates.TargetTimeout() + + timeoutTimer := time.NewTimer(ttl) + defer timeoutTimer.Stop() + + select { + case <-d.cancelCh: + return nil, nil, errCanceled + + case <-timeoutTimer.C: + // Header retrieval timed out, update the metrics + p.log.Debug("Header request timed out", "elapsed", ttl) + headerTimeoutMeter.Mark(1) + + return nil, nil, errTimeout + + case res := <-resCh: + // Headers successfully retrieved, update the metrics + headerReqTimer.Update(time.Since(start)) + headerInMeter.Mark(int64(len(*res.Res.(*eth.BlockHeadersPacket)))) + + // Don't reject the packet even if it turns out to be bad, downloader will + // disconnect the peer on its own terms. Simply delivery the headers to + // be processed by the caller + res.Done <- nil + + return *res.Res.(*eth.BlockHeadersPacket), res.Meta.([]common.Hash), nil + } +} diff --git a/eth/downloader/fetchers_concurrent.go b/eth/downloader/fetchers_concurrent.go new file mode 100644 index 000000000..4bade2b4c --- /dev/null +++ b/eth/downloader/fetchers_concurrent.go @@ -0,0 +1,381 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "errors" + "sort" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/log" +) + +// timeoutGracePeriod is the amount of time to allow for a peer to deliver a +// response to a locally already timed out request. Timeouts are not penalized +// as a peer might be temporarily overloaded, however, they still must reply +// to each request. Failing to do so is considered a protocol violation. +var timeoutGracePeriod = 2 * time.Minute + +// typedQueue is an interface defining the adaptor needed to translate the type +// specific downloader/queue schedulers into the type-agnostic general concurrent +// fetcher algorithm calls. +type typedQueue interface { + // waker returns a notification channel that gets pinged in case more fetches + // have been queued up, so the fetcher might assign it to idle peers. + waker() chan bool + + // pending returns the number of wrapped items that are currently queued for + // fetching by the concurrent downloader. + pending() int + + // capacity is responsible for calculating how many items of the abstracted + // type a particular peer is estimated to be able to retrieve within the + // alloted round trip time. + capacity(peer *peerConnection, rtt time.Duration) int + + // updateCapacity is responsible for updating how many items of the abstracted + // type a particular peer is estimated to be able to retrieve in a unit time. + updateCapacity(peer *peerConnection, items int, elapsed time.Duration) + + // reserve is responsible for allocating a requested number of pending items + // from the download queue to the specified peer. + reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) + + // unreserve is resposible for removing the current retrieval allocation + // assigned to a specific peer and placing it back into the pool to allow + // reassigning to some other peer. + unreserve(peer string) int + + // request is responsible for converting a generic fetch request into a typed + // one and sending it to the remote peer for fulfillment. + request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) + + // deliver is responsible for taking a generic response packet from the + // concurrent fetcher, unpacking the type specific data and delivering + // it to the downloader's queue. + deliver(peer *peerConnection, packet *eth.Response) (int, error) +} + +// concurrentFetch iteratively downloads scheduled block parts, taking available +// peers, reserving a chunk of fetch requests for each and waiting for delivery +// or timeouts. +func (d *Downloader) concurrentFetch(queue typedQueue) error { + // Create a delivery channel to accept responses from all peers + responses := make(chan *eth.Response) + + // Track the currently active requests and their timeout order + pending := make(map[string]*eth.Request) + defer func() { + // Abort all requests on sync cycle cancellation. The requests may still + // be fulfilled by the remote side, but the dispatcher will not wait to + // deliver them since nobody's going to be listening. + for _, req := range pending { + req.Close() + } + }() + ordering := make(map[*eth.Request]int) + timeouts := prque.New(func(data interface{}, index int) { + ordering[data.(*eth.Request)] = index + }) + + timeout := time.NewTimer(0) + if !timeout.Stop() { + <-timeout.C + } + defer timeout.Stop() + + // Track the timed-out but not-yet-answered requests separately. We want to + // keep tracking which peers are busy (potentially overloaded), so removing + // all trace of a timed out request is not good. We also can't just cancel + // the pending request altogether as that would prevent a late response from + // being delivered, thus never unblocking the peer. + stales := make(map[string]*eth.Request) + defer func() { + // Abort all requests on sync cycle cancellation. The requests may still + // be fulfilled by the remote side, but the dispatcher will not wait to + // deliver them since nobody's going to be listening. + for _, req := range stales { + req.Close() + } + }() + // Subscribe to peer lifecycle events to schedule tasks to new joiners and + // reschedule tasks upon disconnections. We don't care which event happened + // for simplicity, so just use a single channel. + peering := make(chan *peeringEvent, 64) // arbitrary buffer, just some burst protection + + peeringSub := d.peers.SubscribeEvents(peering) + defer peeringSub.Unsubscribe() + + // Prepare the queue and fetch block parts until the block header fetcher's done + finished := false + for { + // Short circuit if we lost all our peers + if d.peers.Len() == 0 { + return errNoPeers + } + // If there's nothing more to fetch, wait or terminate + if queue.pending() == 0 { + if len(pending) == 0 && finished { + return nil + } + } else { + // Send a download request to all idle peers, until throttled + var ( + idles []*peerConnection + caps []int + ) + for _, peer := range d.peers.AllPeers() { + pending, stale := pending[peer.id], stales[peer.id] + if pending == nil && stale == nil { + idles = append(idles, peer) + caps = append(caps, queue.capacity(peer, time.Second)) + } else if stale != nil { + if waited := time.Since(stale.Sent); waited > timeoutGracePeriod { + // Request has been in flight longer than the grace period + // permitted it, consider the peer malicious attempting to + // stall the sync. + peer.log.Warn("Peer stalling, dropping", "waited", common.PrettyDuration(waited)) + d.dropPeer(peer.id) + } + } + } + sort.Sort(&peerCapacitySort{idles, caps}) + + var ( + progressed bool + throttled bool + queued = queue.pending() + ) + for _, peer := range idles { + // Short circuit if throttling activated or there are no more + // queued tasks to be retrieved + if throttled { + break + } + if queued = queue.pending(); queued == 0 { + break + } + // Reserve a chunk of fetches for a peer. A nil can mean either that + // no more headers are available, or that the peer is known not to + // have them. + request, progress, throttle := queue.reserve(peer, queue.capacity(peer, d.peers.rates.TargetRoundTrip())) + if progress { + progressed = true + } + if throttle { + throttled = true + throttleCounter.Inc(1) + } + if request == nil { + continue + } + // Fetch the chunk and make sure any errors return the hashes to the queue + req, err := queue.request(peer, request, responses) + if err != nil { + // Sending the request failed, which generally means the peer + // was diconnected in between assignment and network send. + // Although all peer removal operations return allocated tasks + // to the queue, that is async, and we can do better here by + // immediately pushing the unfulfilled requests. + queue.unreserve(peer.id) // TODO(karalabe): This needs a non-expiration method + continue + } + pending[peer.id] = req + + ttl := d.peers.rates.TargetTimeout() + ordering[req] = timeouts.Size() + + timeouts.Push(req, -time.Now().Add(ttl).UnixNano()) + if timeouts.Size() == 1 { + timeout.Reset(ttl) + } + } + // Make sure that we have peers available for fetching. If all peers have been tried + // and all failed throw an error + if !progressed && !throttled && len(pending) == 0 && len(idles) == d.peers.Len() && queued > 0 { + return errPeersUnavailable + } + } + // Wait for something to happen + select { + case <-d.cancelCh: + // If sync was cancelled, tear down the parallel retriever. Pending + // requests will be cancelled locally, and the remote responses will + // be dropped when they arrive + return errCanceled + + case event := <-peering: + // A peer joined or left, the tasks queue and allocations need to be + // checked for potential assignment or reassignment + peerid := event.peer.id + + if event.join { + // Sanity check the internal state; this can be dropped later + if _, ok := pending[peerid]; ok { + event.peer.log.Error("Pending request exists for joining peer") + } + if _, ok := stales[peerid]; ok { + event.peer.log.Error("Stale request exists for joining peer") + } + // Loop back to the entry point for task assignment + continue + } + // A peer left, any existing requests need to be untracked, pending + // tasks returned and possible reassignment checked + if req, ok := pending[peerid]; ok { + queue.unreserve(peerid) // TODO(karalabe): This needs a non-expiration method + delete(pending, peerid) + req.Close() + + if index, live := ordering[req]; live { + timeouts.Remove(index) + if index == 0 { + if !timeout.Stop() { + <-timeout.C + } + if timeouts.Size() > 0 { + _, exp := timeouts.Peek() + timeout.Reset(time.Until(time.Unix(0, -exp))) + } + } + delete(ordering, req) + } + } + if req, ok := stales[peerid]; ok { + delete(stales, peerid) + req.Close() + } + + case <-timeout.C: + // Retrieve the next request which should have timed out. The check + // below is purely for to catch programming errors, given the correct + // code, there's no possible order of events that should result in a + // timeout firing for a non-existent event. + item, exp := timeouts.Peek() + if now, at := time.Now(), time.Unix(0, -exp); now.Before(at) { + log.Error("Timeout triggered but not reached", "left", at.Sub(now)) + timeout.Reset(at.Sub(now)) + continue + } + req := item.(*eth.Request) + + // Stop tracking the timed out request from a timing perspective, + // cancel it, so it's not considered in-flight anymore, but keep + // the peer marked busy to prevent assigning a second request and + // overloading it further. + delete(pending, req.Peer) + stales[req.Peer] = req + delete(ordering, req) + + timeouts.Pop() + if timeouts.Size() > 0 { + _, exp := timeouts.Peek() + timeout.Reset(time.Until(time.Unix(0, -exp))) + } + // New timeout potentially set if there are more requests pending, + // reschedule the failed one to a free peer + fails := queue.unreserve(req.Peer) + + // Finally, update the peer's retrieval capacity, or if it's already + // below the minimum allowance, drop the peer. If a lot of retrieval + // elements expired, we might have overestimated the remote peer or + // perhaps ourselves. Only reset to minimal throughput but don't drop + // just yet. + // + // The reason the minimum threshold is 2 is that the downloader tries + // to estimate the bandwidth and latency of a peer separately, which + // requires pushing the measured capacity a bit and seeing how response + // times reacts, to it always requests one more than the minimum (i.e. + // min 2). + peer := d.peers.Peer(req.Peer) + if peer == nil { + // If the peer got disconnected in between, we should really have + // short-circuited it already. Just in case there's some strange + // codepath, leave this check in not to crash. + log.Error("Delivery timeout from unknown peer", "peer", req.Peer) + continue + } + if fails > 2 { + queue.updateCapacity(peer, 0, 0) + } else { + d.dropPeer(peer.id) + + // If this peer was the master peer, abort sync immediately + d.cancelLock.RLock() + master := peer.id == d.cancelPeer + d.cancelLock.RUnlock() + + if master { + d.cancel() + return errTimeout + } + } + + case res := <-responses: + // Response arrived, it may be for an existing or an already timed + // out request. If the former, update the timeout heap and perhaps + // reschedule the timeout timer. + index, live := ordering[res.Req] + if live { + timeouts.Remove(index) + if index == 0 { + if !timeout.Stop() { + <-timeout.C + } + if timeouts.Size() > 0 { + _, exp := timeouts.Peek() + timeout.Reset(time.Until(time.Unix(0, -exp))) + } + } + delete(ordering, res.Req) + } + // Delete the pending request (if it still exists) and mark the peer idle + delete(pending, res.Req.Peer) + delete(stales, res.Req.Peer) + + // Signal the dispatcher that the round trip is done. We'll drop the + // peer if the data turns out to be junk. + res.Done <- nil + res.Req.Close() + + // If the peer was previously banned and failed to deliver its pack + // in a reasonable time frame, ignore its message. + if peer := d.peers.Peer(res.Req.Peer); peer != nil { + // Deliver the received chunk of data and check chain validity + accepted, err := queue.deliver(peer, res) + if errors.Is(err, errInvalidChain) { + return err + } + // Unless a peer delivered something completely else than requested (usually + // caused by a timed out request which came through in the end), set it to + // idle. If the delivery's stale, the peer should have already been idled. + if !errors.Is(err, errStaleDelivery) { + queue.updateCapacity(peer, accepted, res.Time) + } + } + + case cont := <-queue.waker(): + // The header fetcher sent a continuation flag, check if it's done + if !cont { + finished = true + } + } + } +} diff --git a/eth/downloader/fetchers_concurrent_bodies.go b/eth/downloader/fetchers_concurrent_bodies.go new file mode 100644 index 000000000..a8de41032 --- /dev/null +++ b/eth/downloader/fetchers_concurrent_bodies.go @@ -0,0 +1,105 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/log" +) + +// bodyQueue implements typedQueue and is a type adapter between the generic +// concurrent fetcher and the downloader. +type bodyQueue Downloader + +// waker returns a notification channel that gets pinged in case more body +// fetches have been queued up, so the fetcher might assign it to idle peers. +func (q *bodyQueue) waker() chan bool { + return q.queue.blockWakeCh +} + +// pending returns the number of bodies that are currently queued for fetching +// by the concurrent downloader. +func (q *bodyQueue) pending() int { + return q.queue.PendingBodies() +} + +// capacity is responsible for calculating how many bodies a particular peer is +// estimated to be able to retrieve within the alloted round trip time. +func (q *bodyQueue) capacity(peer *peerConnection, rtt time.Duration) int { + return peer.BodyCapacity(rtt) +} + +// updateCapacity is responsible for updating how many bodies a particular peer +// is estimated to be able to retrieve in a unit time. +func (q *bodyQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) { + peer.UpdateBodyRate(items, span) +} + +// reserve is responsible for allocating a requested number of pending bodies +// from the download queue to the specified peer. +func (q *bodyQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) { + return q.queue.ReserveBodies(peer, items) +} + +// unreserve is resposible for removing the current body retrieval allocation +// assigned to a specific peer and placing it back into the pool to allow +// reassigning to some other peer. +func (q *bodyQueue) unreserve(peer string) int { + fails := q.queue.ExpireBodies(peer) + if fails > 2 { + log.Trace("Body delivery timed out", "peer", peer) + } else { + log.Debug("Body delivery stalling", "peer", peer) + } + return fails +} + +// request is responsible for converting a generic fetch request into a body +// one and sending it to the remote peer for fulfillment. +func (q *bodyQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) { + peer.log.Trace("Requesting new batch of bodies", "count", len(req.Headers), "from", req.Headers[0].Number) + if q.bodyFetchHook != nil { + q.bodyFetchHook(req.Headers) + } + + hashes := make([]common.Hash, 0, len(req.Headers)) + for _, header := range req.Headers { + hashes = append(hashes, header.Hash()) + } + return peer.peer.RequestBodies(hashes, resCh) +} + +// deliver is responsible for taking a generic response packet from the concurrent +// fetcher, unpacking the body data and delivering it to the downloader's queue. +func (q *bodyQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { + txs, uncles := packet.Res.(*eth.BlockBodiesPacket).Unpack() + hashsets := packet.Meta.([][]common.Hash) // {txs hashes, uncle hashes} + + accepted, err := q.queue.DeliverBodies(peer.id, txs, hashsets[0], uncles, hashsets[1]) + switch { + case err == nil && len(txs) == 0: + peer.log.Trace("Requested bodies delivered") + case err == nil: + peer.log.Trace("Delivered new batch of bodies", "count", len(txs), "accepted", accepted) + default: + peer.log.Debug("Failed to deliver retrieved bodies", "err", err) + } + return accepted, err +} diff --git a/eth/downloader/fetchers_concurrent_headers.go b/eth/downloader/fetchers_concurrent_headers.go new file mode 100644 index 000000000..bd3bb3e00 --- /dev/null +++ b/eth/downloader/fetchers_concurrent_headers.go @@ -0,0 +1,97 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/log" +) + +// headerQueue implements typedQueue and is a type adapter between the generic +// concurrent fetcher and the downloader. +type headerQueue Downloader + +// waker returns a notification channel that gets pinged in case more header +// fetches have been queued up, so the fetcher might assign it to idle peers. +func (q *headerQueue) waker() chan bool { + return q.queue.headerContCh +} + +// pending returns the number of headers that are currently queued for fetching +// by the concurrent downloader. +func (q *headerQueue) pending() int { + return q.queue.PendingHeaders() +} + +// capacity is responsible for calculating how many headers a particular peer is +// estimated to be able to retrieve within the alloted round trip time. +func (q *headerQueue) capacity(peer *peerConnection, rtt time.Duration) int { + return peer.HeaderCapacity(rtt) +} + +// updateCapacity is responsible for updating how many headers a particular peer +// is estimated to be able to retrieve in a unit time. +func (q *headerQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) { + peer.UpdateHeaderRate(items, span) +} + +// reserve is responsible for allocating a requested number of pending headers +// from the download queue to the specified peer. +func (q *headerQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) { + return q.queue.ReserveHeaders(peer, items), false, false +} + +// unreserve is resposible for removing the current header retrieval allocation +// assigned to a specific peer and placing it back into the pool to allow +// reassigning to some other peer. +func (q *headerQueue) unreserve(peer string) int { + fails := q.queue.ExpireHeaders(peer) + if fails > 2 { + log.Trace("Header delivery timed out", "peer", peer) + } else { + log.Debug("Header delivery stalling", "peer", peer) + } + return fails +} + +// request is responsible for converting a generic fetch request into a header +// one and sending it to the remote peer for fulfillment. +func (q *headerQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) { + peer.log.Trace("Requesting new batch of headers", "from", req.From) + return peer.peer.RequestHeadersByNumber(req.From, MaxHeaderFetch, 0, false, resCh) +} + +// deliver is responsible for taking a generic response packet from the concurrent +// fetcher, unpacking the header data and delivering it to the downloader's queue. +func (q *headerQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { + headers := *packet.Res.(*eth.BlockHeadersPacket) + hashes := packet.Meta.([]common.Hash) + + accepted, err := q.queue.DeliverHeaders(peer.id, headers, hashes, q.headerProcCh) + switch { + case err == nil && len(headers) == 0: + peer.log.Trace("Requested headers delivered") + case err == nil: + peer.log.Trace("Delivered new batch of headers", "count", len(headers), "accepted", accepted) + default: + peer.log.Debug("Failed to deliver retrieved headers", "err", err) + } + return accepted, err +} diff --git a/eth/downloader/fetchers_concurrent_receipts.go b/eth/downloader/fetchers_concurrent_receipts.go new file mode 100644 index 000000000..fee2c3410 --- /dev/null +++ b/eth/downloader/fetchers_concurrent_receipts.go @@ -0,0 +1,104 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package downloader + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/log" +) + +// receiptQueue implements typedQueue and is a type adapter between the generic +// concurrent fetcher and the downloader. +type receiptQueue Downloader + +// waker returns a notification channel that gets pinged in case more reecipt +// fetches have been queued up, so the fetcher might assign it to idle peers. +func (q *receiptQueue) waker() chan bool { + return q.queue.receiptWakeCh +} + +// pending returns the number of receipt that are currently queued for fetching +// by the concurrent downloader. +func (q *receiptQueue) pending() int { + return q.queue.PendingReceipts() +} + +// capacity is responsible for calculating how many receipts a particular peer is +// estimated to be able to retrieve within the alloted round trip time. +func (q *receiptQueue) capacity(peer *peerConnection, rtt time.Duration) int { + return peer.ReceiptCapacity(rtt) +} + +// updateCapacity is responsible for updating how many receipts a particular peer +// is estimated to be able to retrieve in a unit time. +func (q *receiptQueue) updateCapacity(peer *peerConnection, items int, span time.Duration) { + peer.UpdateReceiptRate(items, span) +} + +// reserve is responsible for allocating a requested number of pending receipts +// from the download queue to the specified peer. +func (q *receiptQueue) reserve(peer *peerConnection, items int) (*fetchRequest, bool, bool) { + return q.queue.ReserveReceipts(peer, items) +} + +// unreserve is resposible for removing the current receipt retrieval allocation +// assigned to a specific peer and placing it back into the pool to allow +// reassigning to some other peer. +func (q *receiptQueue) unreserve(peer string) int { + fails := q.queue.ExpireReceipts(peer) + if fails > 2 { + log.Trace("Receipt delivery timed out", "peer", peer) + } else { + log.Debug("Receipt delivery stalling", "peer", peer) + } + return fails +} + +// request is responsible for converting a generic fetch request into a receipt +// one and sending it to the remote peer for fulfillment. +func (q *receiptQueue) request(peer *peerConnection, req *fetchRequest, resCh chan *eth.Response) (*eth.Request, error) { + peer.log.Trace("Requesting new batch of receipts", "count", len(req.Headers), "from", req.Headers[0].Number) + if q.receiptFetchHook != nil { + q.receiptFetchHook(req.Headers) + } + hashes := make([]common.Hash, 0, len(req.Headers)) + for _, header := range req.Headers { + hashes = append(hashes, header.Hash()) + } + return peer.peer.RequestReceipts(hashes, resCh) +} + +// deliver is responsible for taking a generic response packet from the concurrent +// fetcher, unpacking the receipt data and delivering it to the downloader's queue. +func (q *receiptQueue) deliver(peer *peerConnection, packet *eth.Response) (int, error) { + receipts := *packet.Res.(*eth.ReceiptsPacket) + hashes := packet.Meta.([]common.Hash) // {receipt hashes} + + accepted, err := q.queue.DeliverReceipts(peer.id, receipts, hashes) + switch { + case err == nil && len(receipts) == 0: + peer.log.Trace("Requested receipts delivered") + case err == nil: + peer.log.Trace("Delivered new batch of receipts", "count", len(receipts), "accepted", accepted) + default: + peer.log.Debug("Failed to deliver retrieved receipts", "err", err) + } + return accepted, err +} diff --git a/eth/downloader/metrics.go b/eth/downloader/metrics.go index c38732043..23c033a8a 100644 --- a/eth/downloader/metrics.go +++ b/eth/downloader/metrics.go @@ -38,8 +38,5 @@ var ( receiptDropMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil) receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil) - stateInMeter = metrics.NewRegisteredMeter("eth/downloader/states/in", nil) - stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil) - throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil) ) diff --git a/eth/downloader/modes.go b/eth/downloader/modes.go index 3ea14d22d..d388b9ee4 100644 --- a/eth/downloader/modes.go +++ b/eth/downloader/modes.go @@ -24,7 +24,6 @@ type SyncMode uint32 const ( FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks - FastSync // Quickly download the headers, full sync only at the chain SnapSync // Download the chain and the state via compact snapshots LightSync // Download only the headers and terminate afterwards ) @@ -38,8 +37,6 @@ func (mode SyncMode) String() string { switch mode { case FullSync: return "full" - case FastSync: - return "fast" case SnapSync: return "snap" case LightSync: @@ -53,8 +50,6 @@ func (mode SyncMode) MarshalText() ([]byte, error) { switch mode { case FullSync: return []byte("full"), nil - case FastSync: - return []byte("fast"), nil case SnapSync: return []byte("snap"), nil case LightSync: @@ -68,14 +63,12 @@ func (mode *SyncMode) UnmarshalText(text []byte) error { switch string(text) { case "full": *mode = FullSync - case "fast": - *mode = FastSync case "snap": *mode = SnapSync case "light": *mode = LightSync default: - return fmt.Errorf(`unknown sync mode %q, want "full", "fast" or "light"`, text) + return fmt.Errorf(`unknown sync mode %q, want "full", "snap" or "light"`, text) } return nil } diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 863294832..324fdb9cd 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -22,9 +22,7 @@ package downloader import ( "errors" "math/big" - "sort" "sync" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -39,7 +37,6 @@ const ( ) var ( - errAlreadyFetching = errors.New("already fetching blocks from peer") errAlreadyRegistered = errors.New("peer is already registered") errNotRegistered = errors.New("peer is not registered") ) @@ -48,16 +45,6 @@ var ( type peerConnection struct { id string // Unique identifier of the peer - headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1) - blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1) - receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1) - stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1) - - headerStarted time.Time // Time instance when the last header fetch was started - blockStarted time.Time // Time instance when the last block (body) fetch was started - receiptStarted time.Time // Time instance when the last receipt fetch was started - stateStarted time.Time // Time instance when the last node data fetch was started - rates *msgrate.Tracker // Tracker to hone in on the number of items retrievable per second lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously) @@ -71,16 +58,15 @@ type peerConnection struct { // LightPeer encapsulates the methods required to synchronise with a remote light peer. type LightPeer interface { Head() (common.Hash, *big.Int) - RequestHeadersByHash(common.Hash, int, int, bool) error - RequestHeadersByNumber(uint64, int, int, bool) error + RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) + RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error) } // Peer encapsulates the methods required to synchronise with a remote full peer. type Peer interface { LightPeer - RequestBodies([]common.Hash) error - RequestReceipts([]common.Hash) error - RequestNodeData([]common.Hash) error + RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) + RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) } // lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods. @@ -89,21 +75,18 @@ type lightPeerWrapper struct { } func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } -func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error { - return w.peer.RequestHeadersByHash(h, amount, skip, reverse) +func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { + return w.peer.RequestHeadersByHash(h, amount, skip, reverse, sink) } -func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error { - return w.peer.RequestHeadersByNumber(i, amount, skip, reverse) +func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { + return w.peer.RequestHeadersByNumber(i, amount, skip, reverse, sink) } -func (w *lightPeerWrapper) RequestBodies([]common.Hash) error { +func (w *lightPeerWrapper) RequestBodies([]common.Hash, chan *eth.Response) (*eth.Request, error) { panic("RequestBodies not supported in light client mode sync") } -func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error { +func (w *lightPeerWrapper) RequestReceipts([]common.Hash, chan *eth.Response) (*eth.Request, error) { panic("RequestReceipts not supported in light client mode sync") } -func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error { - panic("RequestNodeData not supported in light client mode sync") -} // newPeerConnection creates a new downloader peer. func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { @@ -121,114 +104,28 @@ func (p *peerConnection) Reset() { p.lock.Lock() defer p.lock.Unlock() - atomic.StoreInt32(&p.headerIdle, 0) - atomic.StoreInt32(&p.blockIdle, 0) - atomic.StoreInt32(&p.receiptIdle, 0) - atomic.StoreInt32(&p.stateIdle, 0) - p.lacking = make(map[common.Hash]struct{}) } -// FetchHeaders sends a header retrieval request to the remote peer. -func (p *peerConnection) FetchHeaders(from uint64, count int) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { - return errAlreadyFetching - } - p.headerStarted = time.Now() - - // Issue the header retrieval request (absolute upwards without gaps) - go p.peer.RequestHeadersByNumber(from, count, 0, false) - - return nil +// UpdateHeaderRate updates the peer's estimated header retrieval throughput with +// the current measurement. +func (p *peerConnection) UpdateHeaderRate(delivered int, elapsed time.Duration) { + p.rates.Update(eth.BlockHeadersMsg, elapsed, delivered) } -// FetchBodies sends a block body retrieval request to the remote peer. -func (p *peerConnection) FetchBodies(request *fetchRequest) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { - return errAlreadyFetching - } - p.blockStarted = time.Now() - - go func() { - // Convert the header set to a retrievable slice - hashes := make([]common.Hash, 0, len(request.Headers)) - for _, header := range request.Headers { - hashes = append(hashes, header.Hash()) - } - p.peer.RequestBodies(hashes) - }() - - return nil +// UpdateBodyRate updates the peer's estimated body retrieval throughput with the +// current measurement. +func (p *peerConnection) UpdateBodyRate(delivered int, elapsed time.Duration) { + p.rates.Update(eth.BlockBodiesMsg, elapsed, delivered) } -// FetchReceipts sends a receipt retrieval request to the remote peer. -func (p *peerConnection) FetchReceipts(request *fetchRequest) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { - return errAlreadyFetching - } - p.receiptStarted = time.Now() - - go func() { - // Convert the header set to a retrievable slice - hashes := make([]common.Hash, 0, len(request.Headers)) - for _, header := range request.Headers { - hashes = append(hashes, header.Hash()) - } - p.peer.RequestReceipts(hashes) - }() - - return nil +// UpdateReceiptRate updates the peer's estimated receipt retrieval throughput +// with the current measurement. +func (p *peerConnection) UpdateReceiptRate(delivered int, elapsed time.Duration) { + p.rates.Update(eth.ReceiptsMsg, elapsed, delivered) } -// FetchNodeData sends a node state data retrieval request to the remote peer. -func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { - // Short circuit if the peer is already fetching - if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { - return errAlreadyFetching - } - p.stateStarted = time.Now() - - go p.peer.RequestNodeData(hashes) - - return nil -} - -// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval -// requests. Its estimated header retrieval throughput is updated with that measured -// just now. -func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.BlockHeadersMsg, deliveryTime.Sub(p.headerStarted), delivered) - atomic.StoreInt32(&p.headerIdle, 0) -} - -// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval -// requests. Its estimated body retrieval throughput is updated with that measured -// just now. -func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.BlockBodiesMsg, deliveryTime.Sub(p.blockStarted), delivered) - atomic.StoreInt32(&p.blockIdle, 0) -} - -// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt -// retrieval requests. Its estimated receipt retrieval throughput is updated -// with that measured just now. -func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.ReceiptsMsg, deliveryTime.Sub(p.receiptStarted), delivered) - atomic.StoreInt32(&p.receiptIdle, 0) -} - -// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie -// data retrieval requests. Its estimated state retrieval throughput is updated -// with that measured just now. -func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) { - p.rates.Update(eth.NodeDataMsg, deliveryTime.Sub(p.stateStarted), delivered) - atomic.StoreInt32(&p.stateIdle, 0) -} - -// HeaderCapacity retrieves the peers header download allowance based on its +// HeaderCapacity retrieves the peer's header download allowance based on its // previously discovered throughput. func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT) @@ -238,9 +135,9 @@ func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { return cap } -// BlockCapacity retrieves the peers block download allowance based on its +// BodyCapacity retrieves the peer's body download allowance based on its // previously discovered throughput. -func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { +func (p *peerConnection) BodyCapacity(targetRTT time.Duration) int { cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT) if cap > MaxBlockFetch { cap = MaxBlockFetch @@ -258,16 +155,6 @@ func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { return cap } -// NodeDataCapacity retrieves the peers state download allowance based on its -// previously discovered throughput. -func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { - cap := p.rates.Capacity(eth.NodeDataMsg, targetRTT) - if cap > MaxStateFetch { - cap = MaxStateFetch - } - return cap -} - // MarkLacking appends a new entity to the set of items (blocks, receipts, states) // that a peer is known not to have (i.e. have been requested before). If the // set reaches its maximum allowed capacity, items are randomly dropped off. @@ -294,14 +181,19 @@ func (p *peerConnection) Lacks(hash common.Hash) bool { return ok } +// peeringEvent is sent on the peer event feed when a remote peer connects or +// disconnects. +type peeringEvent struct { + peer *peerConnection + join bool +} + // peerSet represents the collection of active peer participating in the chain // download procedure. type peerSet struct { - peers map[string]*peerConnection - rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat - - newPeerFeed event.Feed - peerDropFeed event.Feed + peers map[string]*peerConnection + rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat + events event.Feed // Feed to publish peer lifecycle events on lock sync.RWMutex } @@ -314,14 +206,9 @@ func newPeerSet() *peerSet { } } -// SubscribeNewPeers subscribes to peer arrival events. -func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription { - return ps.newPeerFeed.Subscribe(ch) -} - -// SubscribePeerDrops subscribes to peer departure events. -func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription { - return ps.peerDropFeed.Subscribe(ch) +// SubscribeEvents subscribes to peer arrival and departure events. +func (ps *peerSet) SubscribeEvents(ch chan<- *peeringEvent) event.Subscription { + return ps.events.Subscribe(ch) } // Reset iterates over the current peer set, and resets each of the known peers @@ -355,7 +242,7 @@ func (ps *peerSet) Register(p *peerConnection) error { ps.peers[p.id] = p ps.lock.Unlock() - ps.newPeerFeed.Send(p) + ps.events.Send(&peeringEvent{peer: p, join: true}) return nil } @@ -372,7 +259,7 @@ func (ps *peerSet) Unregister(id string) error { ps.rates.Untrack(id) ps.lock.Unlock() - ps.peerDropFeed.Send(p) + ps.events.Send(&peeringEvent{peer: p, join: false}) return nil } @@ -404,82 +291,6 @@ func (ps *peerSet) AllPeers() []*peerConnection { return list } -// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers -// within the active peer set, ordered by their reputation. -func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.headerIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.BlockHeadersMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) -} - -// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within -// the active peer set, ordered by their reputation. -func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.blockIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.BlockBodiesMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) -} - -// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers -// within the active peer set, ordered by their reputation. -func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.receiptIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.ReceiptsMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) -} - -// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle -// peers within the active peer set, ordered by their reputation. -func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { - idle := func(p *peerConnection) bool { - return atomic.LoadInt32(&p.stateIdle) == 0 - } - throughput := func(p *peerConnection) int { - return p.rates.Capacity(eth.NodeDataMsg, time.Second) - } - return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) -} - -// idlePeers retrieves a flat list of all currently idle peers satisfying the -// protocol version constraints, using the provided function to check idleness. -// The resulting set of peers are sorted by their capacity. -func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, capacity func(*peerConnection) int) ([]*peerConnection, int) { - ps.lock.RLock() - defer ps.lock.RUnlock() - - var ( - total = 0 - idle = make([]*peerConnection, 0, len(ps.peers)) - tps = make([]int, 0, len(ps.peers)) - ) - for _, p := range ps.peers { - if p.version >= minProtocol && p.version <= maxProtocol { - if idleCheck(p) { - idle = append(idle, p) - tps = append(tps, capacity(p)) - } - total++ - } - } - - // And sort them - sortPeers := &peerCapacitySort{idle, tps} - sort.Sort(sortPeers) - return sortPeers.p, total -} - // peerCapacitySort implements sort.Interface. // It sorts peer connections by capacity (descending). type peerCapacitySort struct { diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 04ec12cfa..ff34d932f 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -31,7 +31,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/trie" ) const ( @@ -54,8 +53,8 @@ var ( // fetchRequest is a currently running data retrieval operation. type fetchRequest struct { Peer *peerConnection // Peer to which the request was sent - From uint64 // [eth/62] Requested chain element index (used for skeleton fills only) - Headers []*types.Header // [eth/62] Requested headers, sorted by request order + From uint64 // Requested chain element index (used for skeleton fills only) + Headers []*types.Header // Requested headers, sorted by request order Time time.Time // Time when the request was made } @@ -119,6 +118,7 @@ type queue struct { headerPeerMiss map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable headerPendPool map[string]*fetchRequest // Currently pending header retrieval operations headerResults []*types.Header // Result cache accumulating the completed headers + headerHashes []common.Hash // Result cache accumulating the completed header hashes headerProced int // Number of headers already processed from the results headerOffset uint64 // Number of the first header in the result cache headerContCh chan bool // Channel to notify when header download finishes @@ -127,10 +127,12 @@ type queue struct { blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers blockTaskQueue *prque.Prque // Priority queue of the headers to fetch the blocks (bodies) for blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations + blockWakeCh chan bool // Channel to notify the block fetcher of new tasks receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers receiptTaskQueue *prque.Prque // Priority queue of the headers to fetch the receipts for receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations + receiptWakeCh chan bool // Channel to notify when receipt fetcher of new tasks resultCache *resultStore // Downloaded but not yet delivered fetch results resultSize common.StorageSize // Approximate size of a block (exponential moving average) @@ -146,9 +148,11 @@ type queue struct { func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue { lock := new(sync.RWMutex) q := &queue{ - headerContCh: make(chan bool), + headerContCh: make(chan bool, 1), blockTaskQueue: prque.New(nil), + blockWakeCh: make(chan bool, 1), receiptTaskQueue: prque.New(nil), + receiptWakeCh: make(chan bool, 1), active: sync.NewCond(lock), lock: lock, } @@ -196,8 +200,8 @@ func (q *queue) PendingHeaders() int { return q.headerTaskQueue.Size() } -// PendingBlocks retrieves the number of block (body) requests pending for retrieval. -func (q *queue) PendingBlocks() int { +// PendingBodies retrieves the number of block body requests pending for retrieval. +func (q *queue) PendingBodies() int { q.lock.Lock() defer q.lock.Unlock() @@ -212,15 +216,6 @@ func (q *queue) PendingReceipts() int { return q.receiptTaskQueue.Size() } -// InFlightHeaders retrieves whether there are header fetch requests currently -// in flight. -func (q *queue) InFlightHeaders() bool { - q.lock.Lock() - defer q.lock.Unlock() - - return len(q.headerPendPool) > 0 -} - // InFlightBlocks retrieves whether there are block fetch requests currently in // flight. func (q *queue) InFlightBlocks() bool { @@ -265,6 +260,7 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { q.headerTaskQueue = prque.New(nil) q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) + q.headerHashes = make([]common.Hash, len(skeleton)*MaxHeaderFetch) q.headerProced = 0 q.headerOffset = from q.headerContCh = make(chan bool, 1) @@ -279,27 +275,27 @@ func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { // RetrieveHeaders retrieves the header chain assemble based on the scheduled // skeleton. -func (q *queue) RetrieveHeaders() ([]*types.Header, int) { +func (q *queue) RetrieveHeaders() ([]*types.Header, []common.Hash, int) { q.lock.Lock() defer q.lock.Unlock() - headers, proced := q.headerResults, q.headerProced - q.headerResults, q.headerProced = nil, 0 + headers, hashes, proced := q.headerResults, q.headerHashes, q.headerProced + q.headerResults, q.headerHashes, q.headerProced = nil, nil, 0 - return headers, proced + return headers, hashes, proced } // Schedule adds a set of headers for the download queue for scheduling, returning // the new headers encountered. -func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { +func (q *queue) Schedule(headers []*types.Header, hashes []common.Hash, from uint64) []*types.Header { q.lock.Lock() defer q.lock.Unlock() // Insert all the headers prioritised by the contained block number inserts := make([]*types.Header, 0, len(headers)) - for _, header := range headers { + for i, header := range headers { // Make sure chain order is honoured and preserved throughout - hash := header.Hash() + hash := hashes[i] if header.Number == nil || header.Number.Uint64() != from { log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) break @@ -318,7 +314,7 @@ func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) } // Queue for receipt retrieval - if q.mode == FastSync && !header.EmptyReceipts() { + if q.mode == SnapSync && !header.EmptyReceipts() { if _, ok := q.receiptTaskPool[hash]; ok { log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) } else { @@ -383,6 +379,13 @@ func (q *queue) Results(block bool) []*fetchResult { throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold) + // With results removed from the cache, wake throttled fetchers + for _, ch := range []chan bool{q.blockWakeCh, q.receiptWakeCh} { + select { + case ch <- true: + default: + } + } // Log some info at certain times if time.Since(q.lastStatLog) > 60*time.Second { q.lastStatLog = time.Now() @@ -503,7 +506,7 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common // we can ask the resultcache if this header is within the // "prioritized" segment of blocks. If it is not, we need to throttle - stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync) + stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == SnapSync) if stale { // Don't put back in the task queue, this item has already been // delivered upstream @@ -566,40 +569,6 @@ func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common return request, progress, throttled } -// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue. -func (q *queue) CancelHeaders(request *fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - q.cancel(request, q.headerTaskQueue, q.headerPendPool) -} - -// CancelBodies aborts a body fetch request, returning all pending headers to the -// task queue. -func (q *queue) CancelBodies(request *fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - q.cancel(request, q.blockTaskQueue, q.blockPendPool) -} - -// CancelReceipts aborts a body fetch request, returning all pending headers to -// the task queue. -func (q *queue) CancelReceipts(request *fetchRequest) { - q.lock.Lock() - defer q.lock.Unlock() - q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) -} - -// Cancel aborts a fetch request, returning all pending hashes to the task queue. -func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { - if request.From > 0 { - taskQueue.Push(request.From, -int64(request.From)) - } - for _, header := range request.Headers { - taskQueue.Push(header, -int64(header.Number.Uint64())) - } - delete(pendPool, request.Peer.id) -} - // Revoke cancels all pending requests belonging to a given peer. This method is // meant to be called during a peer drop to quickly reassign owned data fetches // to remaining nodes. @@ -607,6 +576,10 @@ func (q *queue) Revoke(peerID string) { q.lock.Lock() defer q.lock.Unlock() + if request, ok := q.headerPendPool[peerID]; ok { + q.headerTaskQueue.Push(request.From, -int64(request.From)) + delete(q.headerPendPool, peerID) + } if request, ok := q.blockPendPool[peerID]; ok { for _, header := range request.Headers { q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) @@ -621,62 +594,60 @@ func (q *queue) Revoke(peerID string) { } } -// ExpireHeaders checks for in flight requests that exceeded a timeout allowance, -// canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { +// ExpireHeaders cancels a request that timed out and moves the pending fetch +// task back into the queue for rescheduling. +func (q *queue) ExpireHeaders(peer string) int { q.lock.Lock() defer q.lock.Unlock() - return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) + headerTimeoutMeter.Mark(1) + return q.expire(peer, q.headerPendPool, q.headerTaskQueue) } // ExpireBodies checks for in flight block body requests that exceeded a timeout // allowance, canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { +func (q *queue) ExpireBodies(peer string) int { q.lock.Lock() defer q.lock.Unlock() - return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) + bodyTimeoutMeter.Mark(1) + return q.expire(peer, q.blockPendPool, q.blockTaskQueue) } // ExpireReceipts checks for in flight receipt requests that exceeded a timeout // allowance, canceling them and returning the responsible peers for penalisation. -func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { +func (q *queue) ExpireReceipts(peer string) int { q.lock.Lock() defer q.lock.Unlock() - return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) + receiptTimeoutMeter.Mark(1) + return q.expire(peer, q.receiptPendPool, q.receiptTaskQueue) } -// expire is the generic check that move expired tasks from a pending pool back -// into a task pool, returning all entities caught with expired tasks. +// expire is the generic check that moves a specific expired task from a pending +// pool back into a task pool. // -// Note, this method expects the queue lock to be already held. The -// reason the lock is not obtained in here is because the parameters already need -// to access the queue, so they already need a lock anyway. -func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { - // Iterate over the expired requests and return each to the queue - expiries := make(map[string]int) - for id, request := range pendPool { - if time.Since(request.Time) > timeout { - // Update the metrics with the timeout - timeoutMeter.Mark(1) - - // Return any non satisfied requests to the pool - if request.From > 0 { - taskQueue.Push(request.From, -int64(request.From)) - } - for _, header := range request.Headers { - taskQueue.Push(header, -int64(header.Number.Uint64())) - } - // Add the peer to the expiry report along the number of failed requests - expiries[id] = len(request.Headers) - - // Remove the expired requests from the pending pool directly - delete(pendPool, id) - } +// Note, this method expects the queue lock to be already held. The reason the +// lock is not obtained in here is that the parameters already need to access +// the queue, so they already need a lock anyway. +func (q *queue) expire(peer string, pendPool map[string]*fetchRequest, taskQueue *prque.Prque) int { + // Retrieve the request being expired and log an error if it's non-existnet, + // as there's no order of events that should lead to such expirations. + req := pendPool[peer] + if req == nil { + log.Error("Expired request does not exist", "peer", peer) + return 0 } - return expiries + delete(pendPool, peer) + + // Return any non-satisfied requests to the pool + if req.From > 0 { + taskQueue.Push(req.From, -int64(req.From)) + } + for _, header := range req.Headers { + taskQueue.Push(header, -int64(header.Number.Uint64())) + } + return len(req.Headers) } // DeliverHeaders injects a header retrieval response into the header results @@ -684,9 +655,9 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, // if they do not map correctly to the skeleton. // // If the headers are accepted, the method makes an attempt to deliver the set -// of ready headers to the processor to keep the pipeline full. However it will +// of ready headers to the processor to keep the pipeline full. However, it will // not block to prevent stalling other pending deliveries. -func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { +func (q *queue) DeliverHeaders(id string, headers []*types.Header, hashes []common.Hash, headerProcCh chan *headerTask) (int, error) { q.lock.Lock() defer q.lock.Unlock() @@ -700,28 +671,31 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh // Short circuit if the data was never requested request := q.headerPendPool[id] if request == nil { + headerDropMeter.Mark(int64(len(headers))) return 0, errNoFetchesPending } - headerReqTimer.UpdateSince(request.Time) delete(q.headerPendPool, id) + headerReqTimer.UpdateSince(request.Time) + headerInMeter.Mark(int64(len(headers))) + // Ensure headers can be mapped onto the skeleton chain target := q.headerTaskPool[request.From].Hash() accepted := len(headers) == MaxHeaderFetch if accepted { if headers[0].Number.Uint64() != request.From { - logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", headers[0].Hash(), "expected", request.From) + logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", hashes[0], "expected", request.From) accepted = false - } else if headers[len(headers)-1].Hash() != target { - logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) + } else if hashes[len(headers)-1] != target { + logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", hashes[len(headers)-1], "expected", target) accepted = false } } if accepted { - parentHash := headers[0].Hash() + parentHash := hashes[0] for i, header := range headers[1:] { - hash := header.Hash() + hash := hashes[i+1] if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want) accepted = false @@ -739,6 +713,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh // If the batch of headers wasn't accepted, mark as unavailable if !accepted { logger.Trace("Skeleton filling not accepted", "from", request.From) + headerDropMeter.Mark(int64(len(headers))) miss := q.headerPeerMiss[id] if miss == nil { @@ -752,6 +727,8 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh } // Clean up a successful fetch and try to deliver any sub-results copy(q.headerResults[request.From-q.headerOffset:], headers) + copy(q.headerHashes[request.From-q.headerOffset:], hashes) + delete(q.headerTaskPool, request.From) ready := 0 @@ -760,13 +737,19 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh } if ready > 0 { // Headers are ready for delivery, gather them and push forward (non blocking) - process := make([]*types.Header, ready) - copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) + processHeaders := make([]*types.Header, ready) + copy(processHeaders, q.headerResults[q.headerProced:q.headerProced+ready]) + + processHashes := make([]common.Hash, ready) + copy(processHashes, q.headerHashes[q.headerProced:q.headerProced+ready]) select { - case headerProcCh <- process: - logger.Trace("Pre-scheduled new headers", "count", len(process), "from", process[0].Number) - q.headerProced += len(process) + case headerProcCh <- &headerTask{ + headers: processHeaders, + hashes: processHashes, + }: + logger.Trace("Pre-scheduled new headers", "count", len(processHeaders), "from", processHeaders[0].Number) + q.headerProced += len(processHeaders) default: } } @@ -780,15 +763,15 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh // DeliverBodies injects a block body retrieval response into the results queue. // The method returns the number of blocks bodies accepted from the delivery and // also wakes any threads waiting for data delivery. -func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { +func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, txListHashes []common.Hash, uncleLists [][]*types.Header, uncleListHashes []common.Hash) (int, error) { q.lock.Lock() defer q.lock.Unlock() - trieHasher := trie.NewStackTrie(nil) + validate := func(index int, header *types.Header) error { - if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash { + if txListHashes[index] != header.TxHash { return errInvalidBody } - if types.CalcUncleHash(uncleLists[index]) != header.UncleHash { + if uncleListHashes[index] != header.UncleHash { return errInvalidBody } return nil @@ -800,18 +783,18 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi result.SetBodyDone() } return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, - bodyReqTimer, len(txLists), validate, reconstruct) + bodyReqTimer, bodyInMeter, bodyDropMeter, len(txLists), validate, reconstruct) } // DeliverReceipts injects a receipt retrieval response into the results queue. // The method returns the number of transaction receipts accepted from the delivery // and also wakes any threads waiting for data delivery. -func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { +func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt, receiptListHashes []common.Hash) (int, error) { q.lock.Lock() defer q.lock.Unlock() - trieHasher := trie.NewStackTrie(nil) + validate := func(index int, header *types.Header) error { - if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash { + if receiptListHashes[index] != header.ReceiptHash { return errInvalidReceipt } return nil @@ -821,7 +804,7 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, result.SetReceiptsDone() } return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, - receiptReqTimer, len(receiptList), validate, reconstruct) + receiptReqTimer, receiptInMeter, receiptDropMeter, len(receiptList), validate, reconstruct) } // deliver injects a data retrieval response into the results queue. @@ -830,18 +813,22 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, // reason this lock is not obtained in here is because the parameters already need // to access the queue, so they already need a lock anyway. func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, - taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer, + taskQueue *prque.Prque, pendPool map[string]*fetchRequest, + reqTimer metrics.Timer, resInMeter metrics.Meter, resDropMeter metrics.Meter, results int, validate func(index int, header *types.Header) error, reconstruct func(index int, result *fetchResult)) (int, error) { // Short circuit if the data was never requested request := pendPool[id] if request == nil { + resDropMeter.Mark(int64(results)) return 0, errNoFetchesPending } - reqTimer.UpdateSince(request.Time) delete(pendPool, id) + reqTimer.UpdateSince(request.Time) + resInMeter.Mark(int64(results)) + // If no data items were retrieved, mark them as unavailable for the origin peer if results == 0 { for _, header := range request.Headers { @@ -883,6 +870,8 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, delete(taskPool, hashes[accepted]) accepted++ } + resDropMeter.Mark(int64(results - accepted)) + // Return all failed or missing fetches to the queue for _, header := range request.Headers[accepted:] { taskQueue.Push(header, -int64(header.Number.Uint64())) diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go index cde5f306a..f729def67 100644 --- a/eth/downloader/queue_test.go +++ b/eth/downloader/queue_test.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -104,17 +105,22 @@ func TestBasics(t *testing.T) { if !q.Idle() { t.Errorf("new queue should be idle") } - q.Prepare(1, FastSync) + q.Prepare(1, SnapSync) if res := q.Results(false); len(res) != 0 { t.Fatal("new queue should have 0 results") } // Schedule a batch of headers - q.Schedule(chain.headers(), 1) + headers := chain.headers() + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { + hashes[i] = header.Hash() + } + q.Schedule(headers, hashes, 1) if q.Idle() { t.Errorf("queue should not be idle") } - if got, exp := q.PendingBlocks(), chain.Len(); got != exp { + if got, exp := q.PendingBodies(), chain.Len(); got != exp { t.Errorf("wrong pending block count, got %d, exp %d", got, exp) } // Only non-empty receipts get added to task-queue @@ -197,13 +203,19 @@ func TestEmptyBlocks(t *testing.T) { q := newQueue(10, 10) - q.Prepare(1, FastSync) + q.Prepare(1, SnapSync) + // Schedule a batch of headers - q.Schedule(emptyChain.headers(), 1) + headers := emptyChain.headers() + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { + hashes[i] = header.Hash() + } + q.Schedule(headers, hashes, 1) if q.Idle() { t.Errorf("queue should not be idle") } - if got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp { + if got, exp := q.PendingBodies(), len(emptyChain.blocks); got != exp { t.Errorf("wrong pending block count, got %d, exp %d", got, exp) } if got, exp := q.PendingReceipts(), 0; got != exp { @@ -272,7 +284,7 @@ func XTestDelivery(t *testing.T) { } q := newQueue(10, 10) var wg sync.WaitGroup - q.Prepare(1, FastSync) + q.Prepare(1, SnapSync) wg.Add(1) go func() { // deliver headers @@ -280,11 +292,15 @@ func XTestDelivery(t *testing.T) { c := 1 for { //fmt.Printf("getting headers from %d\n", c) - hdrs := world.headers(c) - l := len(hdrs) + headers := world.headers(c) + hashes := make([]common.Hash, len(headers)) + for i, header := range headers { + hashes[i] = header.Hash() + } + l := len(headers) //fmt.Printf("scheduling %d headers, first %d last %d\n", - // l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64()) - q.Schedule(hdrs, uint64(c)) + // l, headers[0].Number.Uint64(), headers[len(headers)-1].Number.Uint64()) + q.Schedule(headers, hashes, uint64(c)) c += l } }() @@ -311,18 +327,31 @@ func XTestDelivery(t *testing.T) { peer := dummyPeer(fmt.Sprintf("peer-%d", i)) f, _, _ := q.ReserveBodies(peer, rand.Intn(30)) if f != nil { - var emptyList []*types.Header - var txs [][]*types.Transaction - var uncles [][]*types.Header + var ( + emptyList []*types.Header + txset [][]*types.Transaction + uncleset [][]*types.Header + ) numToSkip := rand.Intn(len(f.Headers)) for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] { - txs = append(txs, world.getTransactions(hdr.Number.Uint64())) - uncles = append(uncles, emptyList) + txset = append(txset, world.getTransactions(hdr.Number.Uint64())) + uncleset = append(uncleset, emptyList) + } + var ( + txsHashes = make([]common.Hash, len(txset)) + uncleHashes = make([]common.Hash, len(uncleset)) + ) + hasher := trie.NewStackTrie(nil) + for i, txs := range txset { + txsHashes[i] = types.DeriveSha(types.Transactions(txs), hasher) + } + for i, uncles := range uncleset { + uncleHashes[i] = types.CalcUncleHash(uncles) } time.Sleep(100 * time.Millisecond) - _, err := q.DeliverBodies(peer.id, txs, uncles) + _, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes) if err != nil { - fmt.Printf("delivered %d bodies %v\n", len(txs), err) + fmt.Printf("delivered %d bodies %v\n", len(txset), err) } } else { i++ @@ -341,7 +370,12 @@ func XTestDelivery(t *testing.T) { for _, hdr := range f.Headers { rcs = append(rcs, world.getReceipts(hdr.Number.Uint64())) } - _, err := q.DeliverReceipts(peer.id, rcs) + hasher := trie.NewStackTrie(nil) + hashes := make([]common.Hash, len(rcs)) + for i, receipt := range rcs { + hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) + } + _, err := q.DeliverReceipts(peer.id, rcs, hashes) if err != nil { fmt.Printf("delivered %d receipts %v\n", len(rcs), err) } diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go index 6c53e5577..501af63ed 100644 --- a/eth/downloader/statesync.go +++ b/eth/downloader/statesync.go @@ -17,48 +17,12 @@ package downloader import ( - "fmt" "sync" - "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/trie" - "golang.org/x/crypto/sha3" ) -// stateReq represents a batch of state fetch requests grouped together into -// a single data retrieval network packet. -type stateReq struct { - nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient) - trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts - codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts - timeout time.Duration // Maximum round trip time for this to complete - timer *time.Timer // Timer to fire when the RTT timeout expires - peer *peerConnection // Peer that we're requesting from - delivered time.Time // Time when the packet was delivered (independent when we process it) - response [][]byte // Response data of the peer (nil for timeouts) - dropped bool // Flag whether the peer dropped off early -} - -// timedOut returns if this request timed out. -func (req *stateReq) timedOut() bool { - return req.response == nil -} - -// stateSyncStats is a collection of progress stats to report during a state trie -// sync to RPC requests as well as to display in user logs. -type stateSyncStats struct { - processed uint64 // Number of state entries processed - duplicate uint64 // Number of state entries downloaded twice - unexpected uint64 // Number of non-requested state entries received - pending uint64 // Number of still pending state entries -} - // syncState starts downloading state with the given root hash. func (d *Downloader) syncState(root common.Hash) *stateSync { // Create the state sync @@ -85,8 +49,6 @@ func (d *Downloader) stateFetcher() { for next := s; next != nil; { next = d.runStateSync(next) } - case <-d.stateCh: - // Ignore state responses while no sync is running. case <-d.quitCh: return } @@ -96,216 +58,44 @@ func (d *Downloader) stateFetcher() { // runStateSync runs a state synchronisation until it completes or another root // hash is requested to be switched over to. func (d *Downloader) runStateSync(s *stateSync) *stateSync { - var ( - active = make(map[string]*stateReq) // Currently in-flight requests - finished []*stateReq // Completed or failed requests - timeout = make(chan *stateReq) // Timed out active requests - ) log.Trace("State sync starting", "root", s.root) - defer func() { - // Cancel active request timers on exit. Also set peers to idle so they're - // available for the next sync. - for _, req := range active { - req.timer.Stop() - req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) - } - }() go s.run() defer s.Cancel() - // Listen for peer departure events to cancel assigned tasks - peerDrop := make(chan *peerConnection, 1024) - peerSub := s.d.peers.SubscribePeerDrops(peerDrop) - defer peerSub.Unsubscribe() - for { - // Enable sending of the first buffered element if there is one. - var ( - deliverReq *stateReq - deliverReqCh chan *stateReq - ) - if len(finished) > 0 { - deliverReq = finished[0] - deliverReqCh = s.deliver - } - select { - // The stateSync lifecycle: case next := <-d.stateSyncStart: - d.spindownStateSync(active, finished, timeout, peerDrop) return next case <-s.done: - d.spindownStateSync(active, finished, timeout, peerDrop) return nil - - // Send the next finished request to the current sync: - case deliverReqCh <- deliverReq: - // Shift out the first request, but also set the emptied slot to nil for GC - copy(finished, finished[1:]) - finished[len(finished)-1] = nil - finished = finished[:len(finished)-1] - - // Handle incoming state packs: - case pack := <-d.stateCh: - // Discard any data not requested (or previously timed out) - req := active[pack.PeerId()] - if req == nil { - log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items()) - continue - } - // Finalize the request and queue up for processing - req.timer.Stop() - req.response = pack.(*statePack).states - req.delivered = time.Now() - - finished = append(finished, req) - delete(active, pack.PeerId()) - - // Handle dropped peer connections: - case p := <-peerDrop: - // Skip if no request is currently pending - req := active[p.id] - if req == nil { - continue - } - // Finalize the request and queue up for processing - req.timer.Stop() - req.dropped = true - req.delivered = time.Now() - - finished = append(finished, req) - delete(active, p.id) - - // Handle timed-out requests: - case req := <-timeout: - // If the peer is already requesting something else, ignore the stale timeout. - // This can happen when the timeout and the delivery happens simultaneously, - // causing both pathways to trigger. - if active[req.peer.id] != req { - continue - } - req.delivered = time.Now() - // Move the timed out data back into the download queue - finished = append(finished, req) - delete(active, req.peer.id) - - // Track outgoing state requests: - case req := <-d.trackStateReq: - // If an active request already exists for this peer, we have a problem. In - // theory the trie node schedule must never assign two requests to the same - // peer. In practice however, a peer might receive a request, disconnect and - // immediately reconnect before the previous times out. In this case the first - // request is never honored, alas we must not silently overwrite it, as that - // causes valid requests to go missing and sync to get stuck. - if old := active[req.peer.id]; old != nil { - log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id) - // Move the previous request to the finished set - old.timer.Stop() - old.dropped = true - old.delivered = time.Now() - finished = append(finished, old) - } - // Start a timer to notify the sync loop if the peer stalled. - req.timer = time.AfterFunc(req.timeout, func() { - timeout <- req - }) - active[req.peer.id] = req } } } -// spindownStateSync 'drains' the outstanding requests; some will be delivered and other -// will time out. This is to ensure that when the next stateSync starts working, all peers -// are marked as idle and de facto _are_ idle. -func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) { - log.Trace("State sync spinning down", "active", len(active), "finished", len(finished)) - for len(active) > 0 { - var ( - req *stateReq - reason string - ) - select { - // Handle (drop) incoming state packs: - case pack := <-d.stateCh: - req = active[pack.PeerId()] - reason = "delivered" - // Handle dropped peer connections: - case p := <-peerDrop: - req = active[p.id] - reason = "peerdrop" - // Handle timed-out requests: - case req = <-timeout: - reason = "timeout" - } - if req == nil { - continue - } - req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason) - req.timer.Stop() - delete(active, req.peer.id) - req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) - } - // The 'finished' set contains deliveries that we were going to pass to processing. - // Those are now moot, but we still need to set those peers as idle, which would - // otherwise have been done after processing - for _, req := range finished { - req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) - } -} - // stateSync schedules requests for downloading a particular state trie defined // by a given state root. type stateSync struct { - d *Downloader // Downloader instance to access and manage current peerset + d *Downloader // Downloader instance to access and manage current peerset + root common.Hash // State root currently being synced - root common.Hash // State root currently being synced - sched *trie.Sync // State trie sync scheduler defining the tasks - keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with - - trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval - codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval - - numUncommitted int - bytesUncommitted int - - started chan struct{} // Started is signalled once the sync loop starts - - deliver chan *stateReq // Delivery channel multiplexing peer responses - cancel chan struct{} // Channel to signal a termination request - cancelOnce sync.Once // Ensures cancel only ever gets called once - done chan struct{} // Channel to signal termination completion - err error // Any error hit during sync (set before completion) -} - -// trieTask represents a single trie node download task, containing a set of -// peers already attempted retrieval from to detect stalled syncs and abort. -type trieTask struct { - path [][]byte - attempts map[string]struct{} -} - -// codeTask represents a single byte code download task, containing a set of -// peers already attempted retrieval from to detect stalled syncs and abort. -type codeTask struct { - attempts map[string]struct{} + started chan struct{} // Started is signalled once the sync loop starts + cancel chan struct{} // Channel to signal a termination request + cancelOnce sync.Once // Ensures cancel only ever gets called once + done chan struct{} // Channel to signal termination completion + err error // Any error hit during sync (set before completion) } // newStateSync creates a new state trie download scheduler. This method does not // yet start the sync. The user needs to call run to initiate. func newStateSync(d *Downloader, root common.Hash) *stateSync { return &stateSync{ - d: d, - root: root, - sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil), - keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), - trieTasks: make(map[common.Hash]*trieTask), - codeTasks: make(map[common.Hash]*codeTask), - deliver: make(chan *stateReq), - cancel: make(chan struct{}), - done: make(chan struct{}), - started: make(chan struct{}), + d: d, + root: root, + cancel: make(chan struct{}), + done: make(chan struct{}), + started: make(chan struct{}), } } @@ -314,11 +104,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync { // finish. func (s *stateSync) run() { close(s.started) - if s.d.snapSync { - s.err = s.d.SnapSyncer.Sync(s.root, s.cancel) - } else { - s.err = s.loop() - } + s.err = s.d.SnapSyncer.Sync(s.root, s.cancel) close(s.done) } @@ -335,281 +121,3 @@ func (s *stateSync) Cancel() error { }) return s.Wait() } - -// loop is the main event loop of a state trie sync. It it responsible for the -// assignment of new tasks to peers (including sending it to them) as well as -// for the processing of inbound data. Note, that the loop does not directly -// receive data from peers, rather those are buffered up in the downloader and -// pushed here async. The reason is to decouple processing from data receipt -// and timeouts. -func (s *stateSync) loop() (err error) { - // Listen for new peer events to assign tasks to them - newPeer := make(chan *peerConnection, 1024) - peerSub := s.d.peers.SubscribeNewPeers(newPeer) - defer peerSub.Unsubscribe() - defer func() { - cerr := s.commit(true) - if err == nil { - err = cerr - } - }() - - // Keep assigning new tasks until the sync completes or aborts - for s.sched.Pending() > 0 { - if err = s.commit(false); err != nil { - return err - } - s.assignTasks() - // Tasks assigned, wait for something to happen - select { - case <-newPeer: - // New peer arrived, try to assign it download tasks - - case <-s.cancel: - return errCancelStateFetch - - case <-s.d.cancelCh: - return errCanceled - - case req := <-s.deliver: - // Response, disconnect or timeout triggered, drop the peer if stalling - log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut()) - if req.nItems <= 2 && !req.dropped && req.timedOut() { - // 2 items are the minimum requested, if even that times out, we've no use of - // this peer at the moment. - log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id) - if s.d.dropPeer == nil { - // The dropPeer method is nil when `--copydb` is used for a local copy. - // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored - req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id) - } else { - s.d.dropPeer(req.peer.id) - - // If this peer was the master peer, abort sync immediately - s.d.cancelLock.RLock() - master := req.peer.id == s.d.cancelPeer - s.d.cancelLock.RUnlock() - - if master { - s.d.cancel() - return errTimeout - } - } - } - // Process all the received blobs and check for stale delivery - delivered, err := s.process(req) - req.peer.SetNodeDataIdle(delivered, req.delivered) - if err != nil { - log.Warn("Node data write error", "err", err) - return err - } - } - } - return nil -} - -func (s *stateSync) commit(force bool) error { - if !force && s.bytesUncommitted < ethdb.IdealBatchSize { - return nil - } - start := time.Now() - b := s.d.stateDB.NewBatch() - if err := s.sched.Commit(b); err != nil { - return err - } - if err := b.Write(); err != nil { - return fmt.Errorf("DB write error: %v", err) - } - s.updateStats(s.numUncommitted, 0, 0, time.Since(start)) - s.numUncommitted = 0 - s.bytesUncommitted = 0 - return nil -} - -// assignTasks attempts to assign new tasks to all idle peers, either from the -// batch currently being retried, or fetching new data from the trie sync itself. -func (s *stateSync) assignTasks() { - // Iterate over all idle peers and try to assign them state fetches - peers, _ := s.d.peers.NodeDataIdlePeers() - for _, p := range peers { - // Assign a batch of fetches proportional to the estimated latency/bandwidth - cap := p.NodeDataCapacity(s.d.peers.rates.TargetRoundTrip()) - req := &stateReq{peer: p, timeout: s.d.peers.rates.TargetTimeout()} - - nodes, _, codes := s.fillTasks(cap, req) - - // If the peer was assigned tasks to fetch, send the network request - if len(nodes)+len(codes) > 0 { - req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root) - select { - case s.d.trackStateReq <- req: - req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x - case <-s.cancel: - case <-s.d.cancelCh: - } - } - } -} - -// fillTasks fills the given request object with a maximum of n state download -// tasks to send to the remote peer. -func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { - // Refill available tasks from the scheduler. - if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 { - nodes, paths, codes := s.sched.Missing(fill) - for i, hash := range nodes { - s.trieTasks[hash] = &trieTask{ - path: paths[i], - attempts: make(map[string]struct{}), - } - } - for _, hash := range codes { - s.codeTasks[hash] = &codeTask{ - attempts: make(map[string]struct{}), - } - } - } - // Find tasks that haven't been tried with the request's peer. Prefer code - // over trie nodes as those can be written to disk and forgotten about. - nodes = make([]common.Hash, 0, n) - paths = make([]trie.SyncPath, 0, n) - codes = make([]common.Hash, 0, n) - - req.trieTasks = make(map[common.Hash]*trieTask, n) - req.codeTasks = make(map[common.Hash]*codeTask, n) - - for hash, t := range s.codeTasks { - // Stop when we've gathered enough requests - if len(nodes)+len(codes) == n { - break - } - // Skip any requests we've already tried from this peer - if _, ok := t.attempts[req.peer.id]; ok { - continue - } - // Assign the request to this peer - t.attempts[req.peer.id] = struct{}{} - codes = append(codes, hash) - req.codeTasks[hash] = t - delete(s.codeTasks, hash) - } - for hash, t := range s.trieTasks { - // Stop when we've gathered enough requests - if len(nodes)+len(codes) == n { - break - } - // Skip any requests we've already tried from this peer - if _, ok := t.attempts[req.peer.id]; ok { - continue - } - // Assign the request to this peer - t.attempts[req.peer.id] = struct{}{} - - nodes = append(nodes, hash) - paths = append(paths, t.path) - - req.trieTasks[hash] = t - delete(s.trieTasks, hash) - } - req.nItems = uint16(len(nodes) + len(codes)) - return nodes, paths, codes -} - -// process iterates over a batch of delivered state data, injecting each item -// into a running state sync, re-queuing any items that were requested but not -// delivered. Returns whether the peer actually managed to deliver anything of -// value, and any error that occurred. -func (s *stateSync) process(req *stateReq) (int, error) { - // Collect processing stats and update progress if valid data was received - duplicate, unexpected, successful := 0, 0, 0 - - defer func(start time.Time) { - if duplicate > 0 || unexpected > 0 { - s.updateStats(0, duplicate, unexpected, time.Since(start)) - } - }(time.Now()) - - // Iterate over all the delivered data and inject one-by-one into the trie - for _, blob := range req.response { - hash, err := s.processNodeData(blob) - switch err { - case nil: - s.numUncommitted++ - s.bytesUncommitted += len(blob) - successful++ - case trie.ErrNotRequested: - unexpected++ - case trie.ErrAlreadyProcessed: - duplicate++ - default: - return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) - } - // Delete from both queues (one delivery is enough for the syncer) - delete(req.trieTasks, hash) - delete(req.codeTasks, hash) - } - // Put unfulfilled tasks back into the retry queue - npeers := s.d.peers.Len() - for hash, task := range req.trieTasks { - // If the node did deliver something, missing items may be due to a protocol - // limit or a previous timeout + delayed delivery. Both cases should permit - // the node to retry the missing items (to avoid single-peer stalls). - if len(req.response) > 0 || req.timedOut() { - delete(task.attempts, req.peer.id) - } - // If we've requested the node too many times already, it may be a malicious - // sync where nobody has the right data. Abort. - if len(task.attempts) >= npeers { - return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) - } - // Missing item, place into the retry queue. - s.trieTasks[hash] = task - } - for hash, task := range req.codeTasks { - // If the node did deliver something, missing items may be due to a protocol - // limit or a previous timeout + delayed delivery. Both cases should permit - // the node to retry the missing items (to avoid single-peer stalls). - if len(req.response) > 0 || req.timedOut() { - delete(task.attempts, req.peer.id) - } - // If we've requested the node too many times already, it may be a malicious - // sync where nobody has the right data. Abort. - if len(task.attempts) >= npeers { - return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) - } - // Missing item, place into the retry queue. - s.codeTasks[hash] = task - } - return successful, nil -} - -// processNodeData tries to inject a trie node data blob delivered from a remote -// peer into the state trie, returning whether anything useful was written or any -// error occurred. -func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) { - res := trie.SyncResult{Data: blob} - s.keccak.Reset() - s.keccak.Write(blob) - s.keccak.Read(res.Hash[:]) - err := s.sched.Process(res) - return res.Hash, err -} - -// updateStats bumps the various state sync progress counters and displays a log -// message for the user to see. -func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) { - s.d.syncStatsLock.Lock() - defer s.d.syncStatsLock.Unlock() - - s.d.syncStatsState.pending = uint64(s.sched.Pending()) - s.d.syncStatsState.processed += uint64(written) - s.d.syncStatsState.duplicate += uint64(duplicate) - s.d.syncStatsState.unexpected += uint64(unexpected) - - if written > 0 || duplicate > 0 || unexpected > 0 { - log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected) - } - if written > 0 { - rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed) - } -} diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go index b9865f7e0..8b873343c 100644 --- a/eth/downloader/testchain_test.go +++ b/eth/downloader/testchain_test.go @@ -20,12 +20,14 @@ import ( "fmt" "math/big" "sync" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" ) @@ -39,73 +41,110 @@ var ( ) // The common prefix of all test chains: -var testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis) +var testChainBase *testChain // Different forks on top of the base chain: var testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain +var pregenerated bool + func init() { + // Reduce some of the parameters to make the tester faster + fullMaxForkAncestry = 10000 + lightMaxForkAncestry = 10000 + blockCacheMaxItems = 1024 + fsHeaderSafetyNet = 256 + fsHeaderContCheck = 500 * time.Millisecond + + testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis) + var forkLen = int(fullMaxForkAncestry + 50) var wg sync.WaitGroup + + // Generate the test chains to seed the peers with wg.Add(3) go func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }() go func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }() go func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }() wg.Wait() + + // Generate the test peers used by the tests to avoid overloading during testing. + // These seemingly random chains are used in various downloader tests. We're just + // pre-generating them here. + chains := []*testChain{ + testChainBase, + testChainForkLightA, + testChainForkLightB, + testChainForkHeavy, + testChainBase.shorten(1), + testChainBase.shorten(blockCacheMaxItems - 15), + testChainBase.shorten((blockCacheMaxItems - 15) / 2), + testChainBase.shorten(blockCacheMaxItems - 15 - 5), + testChainBase.shorten(MaxHeaderFetch), + testChainBase.shorten(800), + testChainBase.shorten(800 / 2), + testChainBase.shorten(800 / 3), + testChainBase.shorten(800 / 4), + testChainBase.shorten(800 / 5), + testChainBase.shorten(800 / 6), + testChainBase.shorten(800 / 7), + testChainBase.shorten(800 / 8), + testChainBase.shorten(3*fsHeaderSafetyNet + 256 + fsMinFullBlocks), + testChainBase.shorten(fsMinFullBlocks + 256 - 1), + testChainForkLightA.shorten(len(testChainBase.blocks) + 80), + testChainForkLightB.shorten(len(testChainBase.blocks) + 81), + testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch), + testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch), + testChainForkHeavy.shorten(len(testChainBase.blocks) + 79), + } + wg.Add(len(chains)) + for _, chain := range chains { + go func(blocks []*types.Block) { + newTestBlockchain(blocks) + wg.Done() + }(chain.blocks[1:]) + } + wg.Wait() + + // Mark the chains pregenerated. Generating a new one will lead to a panic. + pregenerated = true } type testChain struct { - genesis *types.Block - chain []common.Hash - headerm map[common.Hash]*types.Header - blockm map[common.Hash]*types.Block - receiptm map[common.Hash][]*types.Receipt - tdm map[common.Hash]*big.Int + blocks []*types.Block } // newTestChain creates a blockchain of the given length. func newTestChain(length int, genesis *types.Block) *testChain { - tc := new(testChain).copy(length) - tc.genesis = genesis - tc.chain = append(tc.chain, genesis.Hash()) - tc.headerm[tc.genesis.Hash()] = tc.genesis.Header() - tc.tdm[tc.genesis.Hash()] = tc.genesis.Difficulty() - tc.blockm[tc.genesis.Hash()] = tc.genesis + tc := &testChain{ + blocks: []*types.Block{genesis}, + } tc.generate(length-1, 0, genesis, false) return tc } // makeFork creates a fork on top of the test chain. func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain { - fork := tc.copy(tc.len() + length) - fork.generate(length, seed, tc.headBlock(), heavy) + fork := tc.copy(len(tc.blocks) + length) + fork.generate(length, seed, tc.blocks[len(tc.blocks)-1], heavy) return fork } // shorten creates a copy of the chain with the given length. It panics if the // length is longer than the number of available blocks. func (tc *testChain) shorten(length int) *testChain { - if length > tc.len() { - panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, tc.len())) + if length > len(tc.blocks) { + panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, len(tc.blocks))) } return tc.copy(length) } func (tc *testChain) copy(newlen int) *testChain { - cpy := &testChain{ - genesis: tc.genesis, - headerm: make(map[common.Hash]*types.Header, newlen), - blockm: make(map[common.Hash]*types.Block, newlen), - receiptm: make(map[common.Hash][]*types.Receipt, newlen), - tdm: make(map[common.Hash]*big.Int, newlen), + if newlen > len(tc.blocks) { + newlen = len(tc.blocks) } - for i := 0; i < len(tc.chain) && i < newlen; i++ { - hash := tc.chain[i] - cpy.chain = append(cpy.chain, tc.chain[i]) - cpy.tdm[hash] = tc.tdm[hash] - cpy.blockm[hash] = tc.blockm[hash] - cpy.headerm[hash] = tc.headerm[hash] - cpy.receiptm[hash] = tc.receiptm[hash] + cpy := &testChain{ + blocks: append([]*types.Block{}, tc.blocks[:newlen]...), } return cpy } @@ -115,17 +154,14 @@ func (tc *testChain) copy(newlen int) *testChain { // contains a transaction and every 5th an uncle to allow testing correct block // reassembly. func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) { - // start := time.Now() - // defer func() { fmt.Printf("test chain generated in %v\n", time.Since(start)) }() - - blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { + blocks, _ := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { block.SetCoinbase(common.Address{seed}) // If a heavy chain is requested, delay blocks to raise difficulty if heavy { - block.OffsetTime(-1) + block.OffsetTime(-9) } // Include transactions to the miner to make blocks more interesting. - if parent == tc.genesis && i%22 == 0 { + if parent == tc.blocks[0] && i%22 == 0 { signer := types.MakeSigner(params.TestChainConfig, block.Number()) tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) if err != nil { @@ -136,95 +172,56 @@ func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) // if the block number is a multiple of 5, add a bonus uncle to the block if i > 0 && i%5 == 0 { block.AddUncle(&types.Header{ - ParentHash: block.PrevBlock(i - 1).Hash(), + ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(block.Number().Int64() - 1), }) } }) + tc.blocks = append(tc.blocks, blocks...) +} - // Convert the block-chain into a hash-chain and header/block maps - td := new(big.Int).Set(tc.td(parent.Hash())) - for i, b := range blocks { - td := td.Add(td, b.Difficulty()) - hash := b.Hash() - tc.chain = append(tc.chain, hash) - tc.blockm[hash] = b - tc.headerm[hash] = b.Header() - tc.receiptm[hash] = receipts[i] - tc.tdm[hash] = new(big.Int).Set(td) +var ( + testBlockchains = make(map[common.Hash]*testBlockchain) + testBlockchainsLock sync.Mutex +) + +type testBlockchain struct { + chain *core.BlockChain + gen sync.Once +} + +// newTestBlockchain creates a blockchain database built by running the given blocks, +// either actually running them, or reusing a previously created one. The returned +// chains are *shared*, so *do not* mutate them. +func newTestBlockchain(blocks []*types.Block) *core.BlockChain { + // Retrieve an existing database, or create a new one + head := testGenesis.Hash() + if len(blocks) > 0 { + head = blocks[len(blocks)-1].Hash() } -} - -// len returns the total number of blocks in the chain. -func (tc *testChain) len() int { - return len(tc.chain) -} - -// headBlock returns the head of the chain. -func (tc *testChain) headBlock() *types.Block { - return tc.blockm[tc.chain[len(tc.chain)-1]] -} - -// td returns the total difficulty of the given block. -func (tc *testChain) td(hash common.Hash) *big.Int { - return tc.tdm[hash] -} - -// headersByHash returns headers in order from the given hash. -func (tc *testChain) headersByHash(origin common.Hash, amount int, skip int, reverse bool) []*types.Header { - num, _ := tc.hashToNumber(origin) - return tc.headersByNumber(num, amount, skip, reverse) -} - -// headersByNumber returns headers from the given number. -func (tc *testChain) headersByNumber(origin uint64, amount int, skip int, reverse bool) []*types.Header { - result := make([]*types.Header, 0, amount) - - if !reverse { - for num := origin; num < uint64(len(tc.chain)) && len(result) < amount; num += uint64(skip) + 1 { - if header, ok := tc.headerm[tc.chain[int(num)]]; ok { - result = append(result, header) - } - } - } else { - for num := int64(origin); num >= 0 && len(result) < amount; num -= int64(skip) + 1 { - if header, ok := tc.headerm[tc.chain[int(num)]]; ok { - result = append(result, header) - } - } + testBlockchainsLock.Lock() + if _, ok := testBlockchains[head]; !ok { + testBlockchains[head] = new(testBlockchain) } - return result -} + tbc := testBlockchains[head] + testBlockchainsLock.Unlock() -// receipts returns the receipts of the given block hashes. -func (tc *testChain) receipts(hashes []common.Hash) [][]*types.Receipt { - results := make([][]*types.Receipt, 0, len(hashes)) - for _, hash := range hashes { - if receipt, ok := tc.receiptm[hash]; ok { - results = append(results, receipt) + // Ensure that the database is generated + tbc.gen.Do(func() { + if pregenerated { + panic("Requested chain generation outside of init") } - } - return results -} + db := rawdb.NewMemoryDatabase() + core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000)) -// bodies returns the block bodies of the given block hashes. -func (tc *testChain) bodies(hashes []common.Hash) ([][]*types.Transaction, [][]*types.Header) { - transactions := make([][]*types.Transaction, 0, len(hashes)) - uncles := make([][]*types.Header, 0, len(hashes)) - for _, hash := range hashes { - if block, ok := tc.blockm[hash]; ok { - transactions = append(transactions, block.Transactions()) - uncles = append(uncles, block.Uncles()) + chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) + if err != nil { + panic(err) } - } - return transactions, uncles -} - -func (tc *testChain) hashToNumber(target common.Hash) (uint64, bool) { - for num, hash := range tc.chain { - if hash == target { - return uint64(num), true + if n, err := chain.InsertChain(blocks); err != nil { + panic(fmt.Sprintf("block %d: %v", n, err)) } - } - return 0, false + tbc.chain = chain + }) + return tbc.chain } diff --git a/eth/downloader/types.go b/eth/downloader/types.go deleted file mode 100644 index ff70bfa0e..000000000 --- a/eth/downloader/types.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/core/types" -) - -// peerDropFn is a callback type for dropping a peer detected as malicious. -type peerDropFn func(id string) - -// dataPack is a data message returned by a peer for some query. -type dataPack interface { - PeerId() string - Items() int - Stats() string -} - -// headerPack is a batch of block headers returned by a peer. -type headerPack struct { - peerID string - headers []*types.Header -} - -func (p *headerPack) PeerId() string { return p.peerID } -func (p *headerPack) Items() int { return len(p.headers) } -func (p *headerPack) Stats() string { return fmt.Sprintf("%d", len(p.headers)) } - -// bodyPack is a batch of block bodies returned by a peer. -type bodyPack struct { - peerID string - transactions [][]*types.Transaction - uncles [][]*types.Header -} - -func (p *bodyPack) PeerId() string { return p.peerID } -func (p *bodyPack) Items() int { - if len(p.transactions) <= len(p.uncles) { - return len(p.transactions) - } - return len(p.uncles) -} -func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) } - -// receiptPack is a batch of receipts returned by a peer. -type receiptPack struct { - peerID string - receipts [][]*types.Receipt -} - -func (p *receiptPack) PeerId() string { return p.peerID } -func (p *receiptPack) Items() int { return len(p.receipts) } -func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) } - -// statePack is a batch of states returned by a peer. -type statePack struct { - peerID string - states [][]byte -} - -func (p *statePack) PeerId() string { return p.peerID } -func (p *statePack) Items() int { return len(p.states) } -func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 29b47af25..1dbd5a7f1 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" @@ -204,35 +205,39 @@ type Config struct { // Arrow Glacier block override (TODO: remove after the fork) OverrideArrowGlacier *big.Int `toml:",omitempty"` + + // OverrideTerminalTotalDifficulty (TODO: remove after the fork) + OverrideTerminalTotalDifficulty *big.Int `toml:",omitempty"` } // CreateConsensusEngine creates a consensus engine for the given chain configuration. func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine { // If proof-of-authority is requested, set it up + var engine consensus.Engine if chainConfig.Clique != nil { - return clique.New(chainConfig.Clique, db) + engine = clique.New(chainConfig.Clique, db) + } else { + switch config.PowMode { + case ethash.ModeFake: + log.Warn("Ethash used in fake mode") + case ethash.ModeTest: + log.Warn("Ethash used in test mode") + case ethash.ModeShared: + log.Warn("Ethash used in shared mode") + } + engine = ethash.New(ethash.Config{ + PowMode: config.PowMode, + CacheDir: stack.ResolvePath(config.CacheDir), + CachesInMem: config.CachesInMem, + CachesOnDisk: config.CachesOnDisk, + CachesLockMmap: config.CachesLockMmap, + DatasetDir: config.DatasetDir, + DatasetsInMem: config.DatasetsInMem, + DatasetsOnDisk: config.DatasetsOnDisk, + DatasetsLockMmap: config.DatasetsLockMmap, + NotifyFull: config.NotifyFull, + }, notify, noverify) + engine.(*ethash.Ethash).SetThreads(-1) // Disable CPU mining } - // Otherwise assume proof-of-work - switch config.PowMode { - case ethash.ModeFake: - log.Warn("Ethash used in fake mode") - case ethash.ModeTest: - log.Warn("Ethash used in test mode") - case ethash.ModeShared: - log.Warn("Ethash used in shared mode") - } - engine := ethash.New(ethash.Config{ - PowMode: config.PowMode, - CacheDir: stack.ResolvePath(config.CacheDir), - CachesInMem: config.CachesInMem, - CachesOnDisk: config.CachesOnDisk, - CachesLockMmap: config.CachesLockMmap, - DatasetDir: config.DatasetDir, - DatasetsInMem: config.DatasetsInMem, - DatasetsOnDisk: config.DatasetsOnDisk, - DatasetsLockMmap: config.DatasetsLockMmap, - NotifyFull: config.NotifyFull, - }, notify, noverify) - engine.SetThreads(-1) // Disable CPU mining - return engine + return beacon.New(engine) } diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 1f1ee3aaf..70a9649bf 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -18,48 +18,49 @@ import ( // MarshalTOML marshals as TOML. func (c Config) MarshalTOML() (interface{}, error) { type Config struct { - Genesis *core.Genesis `toml:",omitempty"` - NetworkId uint64 - SyncMode downloader.SyncMode - EthDiscoveryURLs []string - SnapDiscoveryURLs []string - NoPruning bool - NoPrefetch bool - TxLookupLimit uint64 `toml:",omitempty"` - Whitelist map[uint64]common.Hash `toml:"-"` - LightServ int `toml:",omitempty"` - LightIngress int `toml:",omitempty"` - LightEgress int `toml:",omitempty"` - LightPeers int `toml:",omitempty"` - LightNoPrune bool `toml:",omitempty"` - LightNoSyncServe bool `toml:",omitempty"` - SyncFromCheckpoint bool `toml:",omitempty"` - UltraLightServers []string `toml:",omitempty"` - UltraLightFraction int `toml:",omitempty"` - UltraLightOnlyAnnounce bool `toml:",omitempty"` - SkipBcVersionCheck bool `toml:"-"` - DatabaseHandles int `toml:"-"` - DatabaseCache int - DatabaseFreezer string - TrieCleanCache int - TrieCleanCacheJournal string `toml:",omitempty"` - TrieCleanCacheRejournal time.Duration `toml:",omitempty"` - TrieDirtyCache int - TrieTimeout time.Duration - SnapshotCache int - Preimages bool - Miner miner.Config - Ethash ethash.Config - TxPool core.TxPoolConfig - GPO gasprice.Config - EnablePreimageRecording bool - DocRoot string `toml:"-"` - RPCGasCap uint64 - RPCEVMTimeout time.Duration - RPCTxFeeCap float64 - Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` - CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` - OverrideArrowGlacier *big.Int `toml:",omitempty"` + Genesis *core.Genesis `toml:",omitempty"` + NetworkId uint64 + SyncMode downloader.SyncMode + EthDiscoveryURLs []string + SnapDiscoveryURLs []string + NoPruning bool + NoPrefetch bool + TxLookupLimit uint64 `toml:",omitempty"` + Whitelist map[uint64]common.Hash `toml:"-"` + LightServ int `toml:",omitempty"` + LightIngress int `toml:",omitempty"` + LightEgress int `toml:",omitempty"` + LightPeers int `toml:",omitempty"` + LightNoPrune bool `toml:",omitempty"` + LightNoSyncServe bool `toml:",omitempty"` + SyncFromCheckpoint bool `toml:",omitempty"` + UltraLightServers []string `toml:",omitempty"` + UltraLightFraction int `toml:",omitempty"` + UltraLightOnlyAnnounce bool `toml:",omitempty"` + SkipBcVersionCheck bool `toml:"-"` + DatabaseHandles int `toml:"-"` + DatabaseCache int + DatabaseFreezer string + TrieCleanCache int + TrieCleanCacheJournal string `toml:",omitempty"` + TrieCleanCacheRejournal time.Duration `toml:",omitempty"` + TrieDirtyCache int + TrieTimeout time.Duration + SnapshotCache int + Preimages bool + Miner miner.Config + Ethash ethash.Config + TxPool core.TxPoolConfig + GPO gasprice.Config + EnablePreimageRecording bool + DocRoot string `toml:"-"` + RPCGasCap uint64 + RPCEVMTimeout time.Duration + RPCTxFeeCap float64 + Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` + CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` + OverrideArrowGlacier *big.Int `toml:",omitempty"` + OverrideTerminalTotalDifficulty *big.Int `toml:",omitempty"` } var enc Config enc.Genesis = c.Genesis @@ -104,54 +105,56 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.Checkpoint = c.Checkpoint enc.CheckpointOracle = c.CheckpointOracle enc.OverrideArrowGlacier = c.OverrideArrowGlacier + enc.OverrideTerminalTotalDifficulty = c.OverrideTerminalTotalDifficulty return &enc, nil } // UnmarshalTOML unmarshals from TOML. func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { type Config struct { - Genesis *core.Genesis `toml:",omitempty"` - NetworkId *uint64 - SyncMode *downloader.SyncMode - EthDiscoveryURLs []string - SnapDiscoveryURLs []string - NoPruning *bool - NoPrefetch *bool - TxLookupLimit *uint64 `toml:",omitempty"` - Whitelist map[uint64]common.Hash `toml:"-"` - LightServ *int `toml:",omitempty"` - LightIngress *int `toml:",omitempty"` - LightEgress *int `toml:",omitempty"` - LightPeers *int `toml:",omitempty"` - LightNoPrune *bool `toml:",omitempty"` - LightNoSyncServe *bool `toml:",omitempty"` - SyncFromCheckpoint *bool `toml:",omitempty"` - UltraLightServers []string `toml:",omitempty"` - UltraLightFraction *int `toml:",omitempty"` - UltraLightOnlyAnnounce *bool `toml:",omitempty"` - SkipBcVersionCheck *bool `toml:"-"` - DatabaseHandles *int `toml:"-"` - DatabaseCache *int - DatabaseFreezer *string - TrieCleanCache *int - TrieCleanCacheJournal *string `toml:",omitempty"` - TrieCleanCacheRejournal *time.Duration `toml:",omitempty"` - TrieDirtyCache *int - TrieTimeout *time.Duration - SnapshotCache *int - Preimages *bool - Miner *miner.Config - Ethash *ethash.Config - TxPool *core.TxPoolConfig - GPO *gasprice.Config - EnablePreimageRecording *bool - DocRoot *string `toml:"-"` - RPCGasCap *uint64 - RPCEVMTimeout *time.Duration - RPCTxFeeCap *float64 - Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` - CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` - OverrideArrowGlacier *big.Int `toml:",omitempty"` + Genesis *core.Genesis `toml:",omitempty"` + NetworkId *uint64 + SyncMode *downloader.SyncMode + EthDiscoveryURLs []string + SnapDiscoveryURLs []string + NoPruning *bool + NoPrefetch *bool + TxLookupLimit *uint64 `toml:",omitempty"` + Whitelist map[uint64]common.Hash `toml:"-"` + LightServ *int `toml:",omitempty"` + LightIngress *int `toml:",omitempty"` + LightEgress *int `toml:",omitempty"` + LightPeers *int `toml:",omitempty"` + LightNoPrune *bool `toml:",omitempty"` + LightNoSyncServe *bool `toml:",omitempty"` + SyncFromCheckpoint *bool `toml:",omitempty"` + UltraLightServers []string `toml:",omitempty"` + UltraLightFraction *int `toml:",omitempty"` + UltraLightOnlyAnnounce *bool `toml:",omitempty"` + SkipBcVersionCheck *bool `toml:"-"` + DatabaseHandles *int `toml:"-"` + DatabaseCache *int + DatabaseFreezer *string + TrieCleanCache *int + TrieCleanCacheJournal *string `toml:",omitempty"` + TrieCleanCacheRejournal *time.Duration `toml:",omitempty"` + TrieDirtyCache *int + TrieTimeout *time.Duration + SnapshotCache *int + Preimages *bool + Miner *miner.Config + Ethash *ethash.Config + TxPool *core.TxPoolConfig + GPO *gasprice.Config + EnablePreimageRecording *bool + DocRoot *string `toml:"-"` + RPCGasCap *uint64 + RPCEVMTimeout *time.Duration + RPCTxFeeCap *float64 + Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` + CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` + OverrideArrowGlacier *big.Int `toml:",omitempty"` + OverrideTerminalTotalDifficulty *big.Int `toml:",omitempty"` } var dec Config if err := unmarshal(&dec); err != nil { @@ -283,5 +286,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.OverrideArrowGlacier != nil { c.OverrideArrowGlacier = dec.OverrideArrowGlacier } + if dec.OverrideTerminalTotalDifficulty != nil { + c.OverrideTerminalTotalDifficulty = dec.OverrideTerminalTotalDifficulty + } return nil } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 7624268a7..247d0eac6 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/trie" @@ -74,10 +75,10 @@ type HeaderRetrievalFn func(common.Hash) *types.Header type blockRetrievalFn func(common.Hash) *types.Block // headerRequesterFn is a callback type for sending a header retrieval request. -type headerRequesterFn func(common.Hash) error +type headerRequesterFn func(common.Hash, chan *eth.Response) (*eth.Request, error) // bodyRequesterFn is a callback type for sending a body retrieval request. -type bodyRequesterFn func([]common.Hash) error +type bodyRequesterFn func([]common.Hash, chan *eth.Response) (*eth.Request, error) // headerVerifierFn is a callback type to verify a block's header for fast propagation. type headerVerifierFn func(header *types.Header) error @@ -461,15 +462,28 @@ func (f *BlockFetcher) loop() { // Create a closure of the fetch and schedule in on a new thread fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes - go func() { + go func(peer string) { if f.fetchingHook != nil { f.fetchingHook(hashes) } for _, hash := range hashes { headerFetchMeter.Mark(1) - fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals + go func(hash common.Hash) { + resCh := make(chan *eth.Response) + + req, err := fetchHeader(hash, resCh) + if err != nil { + return // Legacy code, yolo + } + defer req.Close() + + res := <-resCh + res.Done <- nil + + f.FilterHeaders(peer, *res.Res.(*eth.BlockHeadersPacket), time.Now().Add(res.Time)) + }(hash) } - }() + }(peer) } // Schedule the next fetch if blocks are still pending f.rescheduleFetch(fetchTimer) @@ -497,8 +511,24 @@ func (f *BlockFetcher) loop() { if f.completingHook != nil { f.completingHook(hashes) } + fetchBodies := f.completing[hashes[0]].fetchBodies bodyFetchMeter.Mark(int64(len(hashes))) - go f.completing[hashes[0]].fetchBodies(hashes) + + go func(peer string, hashes []common.Hash) { + resCh := make(chan *eth.Response) + + req, err := fetchBodies(hashes, resCh) + if err != nil { + return // Legacy code, yolo + } + defer req.Close() + + res := <-resCh + res.Done <- nil + + txs, uncles := res.Res.(*eth.BlockBodiesPacket).Unpack() + f.FilterBodies(peer, txs, uncles, time.Now()) + }(peer, hashes) } // Schedule the next fetch if blocks are still pending f.rescheduleComplete(completeTimer) diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index b6d1125b5..628a56504 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" ) @@ -60,8 +61,8 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common block.AddTx(tx) } // If the block number is a multiple of 5, add a bonus uncle to the block - if i%5 == 0 { - block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))}) + if i > 0 && i%5 == 0 { + block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i - 1))}) } }) hashes := make([]common.Hash, n+1) @@ -195,16 +196,26 @@ func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*t closure[hash] = block } // Create a function that return a header from the closure - return func(hash common.Hash) error { + return func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { // Gather the blocks to return headers := make([]*types.Header, 0, 1) if block, ok := closure[hash]; ok { headers = append(headers, block.Header()) } // Return on a new thread - go f.fetcher.FilterHeaders(peer, headers, time.Now().Add(drift)) - - return nil + req := ð.Request{ + Peer: peer, + } + res := ð.Response{ + Req: req, + Res: (*eth.BlockHeadersPacket)(&headers), + Time: drift, + Done: make(chan error, 1), // Ignore the returned status + } + go func() { + sink <- res + }() + return req, nil } } @@ -215,7 +226,7 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ closure[hash] = block } // Create a function that returns blocks from the closure - return func(hashes []common.Hash) error { + return func(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { // Gather the block bodies to return transactions := make([][]*types.Transaction, 0, len(hashes)) uncles := make([][]*types.Header, 0, len(hashes)) @@ -227,14 +238,33 @@ func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*typ } } // Return on a new thread - go f.fetcher.FilterBodies(peer, transactions, uncles, time.Now().Add(drift)) - - return nil + bodies := make([]*eth.BlockBody, len(transactions)) + for i, txs := range transactions { + bodies[i] = ð.BlockBody{ + Transactions: txs, + Uncles: uncles[i], + } + } + req := ð.Request{ + Peer: peer, + } + res := ð.Response{ + Req: req, + Res: (*eth.BlockBodiesPacket)(&bodies), + Time: drift, + Done: make(chan error, 1), // Ignore the returned status + } + go func() { + sink <- res + }() + return req, nil } } // verifyFetchingEvent verifies that one single event arrive on a fetching channel. func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) { + t.Helper() + if arrive { select { case <-fetching: @@ -252,6 +282,8 @@ func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) // verifyCompletingEvent verifies that one single event arrive on an completing channel. func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) { + t.Helper() + if arrive { select { case <-completing: @@ -269,6 +301,8 @@ func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive b // verifyImportEvent verifies that one single event arrive on an import channel. func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { + t.Helper() + if arrive { select { case <-imported: @@ -287,6 +321,8 @@ func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { // verifyImportCount verifies that exactly count number of events arrive on an // import hook channel. func verifyImportCount(t *testing.T, imported chan interface{}, count int) { + t.Helper() + for i := 0; i < count; i++ { select { case <-imported: @@ -299,6 +335,8 @@ func verifyImportCount(t *testing.T, imported chan interface{}, count int) { // verifyImportDone verifies that no more events are arriving on an import channel. func verifyImportDone(t *testing.T, imported chan interface{}) { + t.Helper() + select { case <-imported: t.Fatalf("extra block imported") @@ -308,6 +346,8 @@ func verifyImportDone(t *testing.T, imported chan interface{}) { // verifyChainHeight verifies the chain height is as expected. func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) { + t.Helper() + if fetcher.chainHeight() != height { t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height) } @@ -368,13 +408,13 @@ func testConcurrentAnnouncements(t *testing.T, light bool) { secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0) counter := uint32(0) - firstHeaderWrapper := func(hash common.Hash) error { + firstHeaderWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { atomic.AddUint32(&counter, 1) - return firstHeaderFetcher(hash) + return firstHeaderFetcher(hash, sink) } - secondHeaderWrapper := func(hash common.Hash) error { + secondHeaderWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { atomic.AddUint32(&counter, 1) - return secondHeaderFetcher(hash) + return secondHeaderFetcher(hash, sink) } // Iteratively announce blocks until all are imported imported := make(chan interface{}) @@ -468,15 +508,20 @@ func testPendingDeduplication(t *testing.T, light bool) { delay := 50 * time.Millisecond counter := uint32(0) - headerWrapper := func(hash common.Hash) error { + headerWrapper := func(hash common.Hash, sink chan *eth.Response) (*eth.Request, error) { atomic.AddUint32(&counter, 1) // Simulate a long running fetch - go func() { - time.Sleep(delay) - headerFetcher(hash) - }() - return nil + resink := make(chan *eth.Response) + req, err := headerFetcher(hash, resink) + if err == nil { + go func() { + res := <-resink + time.Sleep(delay) + sink <- res + }() + } + return req, err } checkNonExist := func() bool { return tester.getBlock(hashes[0]) == nil diff --git a/eth/filters/api.go b/eth/filters/api.go index e0b07e318..6b28ec961 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -29,7 +29,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/rpc" ) @@ -51,7 +50,6 @@ type PublicFilterAPI struct { backend Backend mux *event.TypeMux quit chan struct{} - chainDb ethdb.Database events *EventSystem filtersMu sync.Mutex filters map[rpc.ID]*filter @@ -62,7 +60,6 @@ type PublicFilterAPI struct { func NewPublicFilterAPI(backend Backend, lightMode bool, timeout time.Duration) *PublicFilterAPI { api := &PublicFilterAPI{ backend: backend, - chainDb: backend.ChainDb(), events: NewEventSystem(backend, lightMode), filters: make(map[rpc.ID]*filter), timeout: timeout, diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 2d394200a..c0d3c6b60 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -144,7 +144,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke // Construct testing chain diskdb := rawdb.NewMemoryDatabase() gspec.Commit(diskdb) - chain, err := core.NewBlockChain(diskdb, &core.CacheConfig{TrieCleanNoPrefetch: true}, &config, engine, vm.Config{}, nil, nil) + chain, err := core.NewBlockChain(diskdb, &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec.Config, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create local chain, %v", err) } diff --git a/eth/handler.go b/eth/handler.go index 41d89c5fb..55ca869c7 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -25,6 +25,8 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" @@ -37,7 +39,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" ) const ( @@ -79,9 +80,10 @@ type handlerConfig struct { Database ethdb.Database // Database for direct sync insertions Chain *core.BlockChain // Blockchain to serve data from TxPool txPool // Transaction pool to propagate from + Merger *consensus.Merger // The manager for eth1/2 transition Network uint64 // Network identifier to adfvertise - Sync downloader.SyncMode // Whether to fast or full sync - BloomCache uint64 // Megabytes to alloc for fast sync bloom + Sync downloader.SyncMode // Whether to snap or full sync + BloomCache uint64 // Megabytes to alloc for snap sync bloom EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged @@ -91,8 +93,7 @@ type handler struct { networkID uint64 forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node - fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks) - snapSync uint32 // Flag whether fast sync should operate on top of the snap protocol + snapSync uint32 // Flag whether snap sync is enabled (gets disabled if we already have blocks) acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing) checkpointNumber uint64 // Block number for the sync progress validator to cross reference @@ -104,10 +105,10 @@ type handler struct { maxPeers int downloader *downloader.Downloader - stateBloom *trie.SyncBloom blockFetcher *fetcher.BlockFetcher txFetcher *fetcher.TxFetcher peers *peerSet + merger *consensus.Merger eventMux *event.TypeMux txsCh chan core.NewTxsEvent @@ -138,33 +139,31 @@ func newHandler(config *handlerConfig) (*handler, error) { txpool: config.TxPool, chain: config.Chain, peers: newPeerSet(), + merger: config.Merger, whitelist: config.Whitelist, quitSync: make(chan struct{}), } if config.Sync == downloader.FullSync { - // The database seems empty as the current block is the genesis. Yet the fast - // block is ahead, so fast sync was enabled for this node at a certain point. + // The database seems empty as the current block is the genesis. Yet the snap + // block is ahead, so snap sync was enabled for this node at a certain point. // The scenarios where this can happen is - // * if the user manually (or via a bad block) rolled back a fast sync node + // * if the user manually (or via a bad block) rolled back a snap sync node // below the sync point. - // * the last fast sync is not finished while user specifies a full sync this + // * the last snap sync is not finished while user specifies a full sync this // time. But we don't have any recent state for full sync. - // In these cases however it's safe to reenable fast sync. + // In these cases however it's safe to reenable snap sync. fullBlock, fastBlock := h.chain.CurrentBlock(), h.chain.CurrentFastBlock() if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 { - h.fastSync = uint32(1) - log.Warn("Switch sync mode from full sync to fast sync") + h.snapSync = uint32(1) + log.Warn("Switch sync mode from full sync to snap sync") } } else { if h.chain.CurrentBlock().NumberU64() > 0 { - // Print warning log if database is not empty to run fast sync. - log.Warn("Switch sync mode from fast sync to full sync") + // Print warning log if database is not empty to run snap sync. + log.Warn("Switch sync mode from snap sync to full sync") } else { - // If fast sync was requested and our database is empty, grant it - h.fastSync = uint32(1) - if config.Sync == downloader.SnapSync { - h.snapSync = uint32(1) - } + // If snap sync was requested and our database is empty, grant it + h.snapSync = uint32(1) } } // If we have trusted checkpoints, enforce them on the chain @@ -172,26 +171,48 @@ func newHandler(config *handlerConfig) (*handler, error) { h.checkpointNumber = (config.Checkpoint.SectionIndex+1)*params.CHTFrequency - 1 h.checkpointHash = config.Checkpoint.SectionHead } - // Construct the downloader (long sync) and its backing state bloom if fast + // Construct the downloader (long sync) and its backing state bloom if snap // sync is requested. The downloader is responsible for deallocating the state // bloom when it's done. - // Note: we don't enable it if snap-sync is performed, since it's very heavy - // and the heal-portion of the snap sync is much lighter than fast. What we particularly - // want to avoid, is a 90%-finished (but restarted) snap-sync to begin - // indexing the entire trie - if atomic.LoadUint32(&h.fastSync) == 1 && atomic.LoadUint32(&h.snapSync) == 0 { - h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database) - } - h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer) + h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, h.chain, nil, h.removePeer) // Construct the fetcher (short sync) validator := func(header *types.Header) error { + // All the block fetcher activities should be disabled + // after the transition. Print the warning log. + if h.merger.PoSFinalized() { + log.Warn("Unexpected validation activity", "hash", header.Hash(), "number", header.Number) + return errors.New("unexpected behavior after transition") + } + // Reject all the PoS style headers in the first place. No matter + // the chain has finished the transition or not, the PoS headers + // should only come from the trusted consensus layer instead of + // p2p network. + if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok { + if beacon.IsPoSHeader(header) { + return errors.New("unexpected post-merge header") + } + } return h.chain.Engine().VerifyHeader(h.chain, header, true) } heighter := func() uint64 { return h.chain.CurrentBlock().NumberU64() } inserter := func(blocks types.Blocks) (int, error) { + // All the block fetcher activities should be disabled + // after the transition. Print the warning log. + if h.merger.PoSFinalized() { + var ctx []interface{} + ctx = append(ctx, "blocks", len(blocks)) + if len(blocks) > 0 { + ctx = append(ctx, "firsthash", blocks[0].Hash()) + ctx = append(ctx, "firstnumber", blocks[0].Number()) + ctx = append(ctx, "lasthash", blocks[len(blocks)-1].Hash()) + ctx = append(ctx, "lastnumber", blocks[len(blocks)-1].Number()) + } + log.Warn("Unexpected insertion activity", ctx...) + return 0, errors.New("unexpected behavior after transition") + } // If sync hasn't reached the checkpoint yet, deny importing weird blocks. // // Ideally we would also compare the head block's timestamp and similarly reject @@ -202,15 +223,38 @@ func newHandler(config *handlerConfig) (*handler, error) { log.Warn("Unsynced yet, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) return 0, nil } - // If fast sync is running, deny importing weird blocks. This is a problematic - // clause when starting up a new network, because fast-syncing miners might not + // If snap sync is running, deny importing weird blocks. This is a problematic + // clause when starting up a new network, because snap-syncing miners might not // accept each others' blocks until a restart. Unfortunately we haven't figured // out a way yet where nodes can decide unilaterally whether the network is new // or not. This should be fixed if we figure out a solution. - if atomic.LoadUint32(&h.fastSync) == 1 { + if atomic.LoadUint32(&h.snapSync) == 1 { log.Warn("Fast syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) return 0, nil } + if h.merger.TDDReached() { + // The blocks from the p2p network is regarded as untrusted + // after the transition. In theory block gossip should be disabled + // entirely whenever the transition is started. But in order to + // handle the transition boundary reorg in the consensus-layer, + // the legacy blocks are still accepted, but only for the terminal + // pow blocks. Spec: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-3675.md#halt-the-importing-of-pow-blocks + for i, block := range blocks { + ptd := h.chain.GetTd(block.ParentHash(), block.NumberU64()-1) + if ptd == nil { + return 0, nil + } + td := new(big.Int).Add(ptd, block.Difficulty()) + if !h.chain.Config().IsTerminalPoWBlock(ptd, td) { + log.Info("Filtered out non-termimal pow block", "number", block.NumberU64(), "hash", block.Hash()) + return 0, nil + } + if err := h.chain.InsertBlockWithoutSetHead(block); err != nil { + return i, err + } + } + return 0, nil + } n, err := h.chain.InsertChain(blocks) if err == nil { atomic.StoreUint32(&h.acceptTxs, 1) // Mark initial sync done on any fetcher import @@ -308,30 +352,93 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { // after this will be sent via broadcasts. h.syncTransactions(peer) + // Create a notification channel for pending requests if the peer goes down + dead := make(chan struct{}) + defer close(dead) + // If we have a trusted CHT, reject all peers below that (avoid fast sync eclipse) if h.checkpointHash != (common.Hash{}) { // Request the peer's checkpoint header for chain height/weight validation - if err := peer.RequestHeadersByNumber(h.checkpointNumber, 1, 0, false); err != nil { + resCh := make(chan *eth.Response) + if _, err := peer.RequestHeadersByNumber(h.checkpointNumber, 1, 0, false, resCh); err != nil { return err } // Start a timer to disconnect if the peer doesn't reply in time - p.syncDrop = time.AfterFunc(syncChallengeTimeout, func() { - peer.Log().Warn("Checkpoint challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name()) - h.removePeer(peer.ID()) - }) - // Make sure it's cleaned up if the peer dies off - defer func() { - if p.syncDrop != nil { - p.syncDrop.Stop() - p.syncDrop = nil + go func() { + timeout := time.NewTimer(syncChallengeTimeout) + defer timeout.Stop() + + select { + case res := <-resCh: + headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) + if len(headers) == 0 { + // If we're doing a snap sync, we must enforce the checkpoint + // block to avoid eclipse attacks. Unsynced nodes are welcome + // to connect after we're done joining the network. + if atomic.LoadUint32(&h.snapSync) == 1 { + peer.Log().Warn("Dropping unsynced node during sync", "addr", peer.RemoteAddr(), "type", peer.Name()) + res.Done <- errors.New("unsynced node cannot serve sync") + return + } + res.Done <- nil + return + } + // Validate the header and either drop the peer or continue + if len(headers) > 1 { + res.Done <- errors.New("too many headers in checkpoint response") + return + } + if headers[0].Hash() != h.checkpointHash { + res.Done <- errors.New("checkpoint hash mismatch") + return + } + res.Done <- nil + + case <-timeout.C: + peer.Log().Warn("Checkpoint challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name()) + h.removePeer(peer.ID()) + + case <-dead: + // Peer handler terminated, abort all goroutines } }() } // If we have any explicit whitelist block hashes, request them - for number := range h.whitelist { - if err := peer.RequestHeadersByNumber(number, 1, 0, false); err != nil { + for number, hash := range h.whitelist { + resCh := make(chan *eth.Response) + if _, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh); err != nil { return err } + go func(number uint64, hash common.Hash) { + timeout := time.NewTimer(syncChallengeTimeout) + defer timeout.Stop() + + select { + case res := <-resCh: + headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersPacket)) + if len(headers) == 0 { + // Whitelisted blocks are allowed to be missing if the remote + // node is not yet synced + res.Done <- nil + return + } + // Validate the header and either drop the peer or continue + if len(headers) > 1 { + res.Done <- errors.New("too many headers in whitelist response") + return + } + if headers[0].Number.Uint64() != number || headers[0].Hash() != hash { + peer.Log().Info("Whitelist mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash) + res.Done <- errors.New("whitelist block mismatch") + return + } + peer.Log().Debug("Whitelist block verified", "number", number, "hash", hash) + + case <-timeout.C: + peer.Log().Warn("Whitelist challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name()) + h.removePeer(peer.ID()) + } + }(number, hash) } // Handle incoming messages until the connection is torn down return handler(peer) @@ -432,6 +539,17 @@ func (h *handler) Stop() { // BroadcastBlock will either propagate a block to a subset of its peers, or // will only announce its availability (depending what's requested). func (h *handler) BroadcastBlock(block *types.Block, propagate bool) { + // Disable the block propagation if the chain has already entered the PoS + // stage. The block propagation is delegated to the consensus layer. + if h.merger.PoSFinalized() { + return + } + // Disable the block propagation if it's the post-merge block. + if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok { + if beacon.IsPoSHeader(block.Header()) { + return + } + } hash := block.Hash() peers := h.peers.peersWithoutBlock(hash) diff --git a/eth/handler_eth.go b/eth/handler_eth.go index 3ff9f2245..bfe95e8c4 100644 --- a/eth/handler_eth.go +++ b/eth/handler_eth.go @@ -17,7 +17,6 @@ package eth import ( - "errors" "fmt" "math/big" "sync/atomic" @@ -27,18 +26,15 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/trie" ) // ethHandler implements the eth.Backend interface to handle the various network // packets that are sent as replies or broadcasts. type ethHandler handler -func (h *ethHandler) Chain() *core.BlockChain { return h.chain } -func (h *ethHandler) StateBloom() *trie.SyncBloom { return h.stateBloom } -func (h *ethHandler) TxPool() eth.TxPool { return h.txpool } +func (h *ethHandler) Chain() *core.BlockChain { return h.chain } +func (h *ethHandler) TxPool() eth.TxPool { return h.txpool } // RunPeer is invoked when a peer joins on the `eth` protocol. func (h *ethHandler) RunPeer(peer *eth.Peer, hand eth.Handler) error { @@ -64,25 +60,6 @@ func (h *ethHandler) AcceptTxs() bool { func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { // Consume any broadcasts and announces, forwarding the rest to the downloader switch packet := packet.(type) { - case *eth.BlockHeadersPacket: - return h.handleHeaders(peer, *packet) - - case *eth.BlockBodiesPacket: - txset, uncleset := packet.Unpack() - return h.handleBodies(peer, txset, uncleset) - - case *eth.NodeDataPacket: - if err := h.downloader.DeliverNodeData(peer.ID(), *packet); err != nil { - log.Debug("Failed to deliver node state data", "err", err) - } - return nil - - case *eth.ReceiptsPacket: - if err := h.downloader.DeliverReceipts(peer.ID(), *packet); err != nil { - log.Debug("Failed to deliver receipts", "err", err) - } - return nil - case *eth.NewBlockHashesPacket: hashes, numbers := packet.Unpack() return h.handleBlockAnnounces(peer, hashes, numbers) @@ -104,82 +81,17 @@ func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { } } -// handleHeaders is invoked from a peer's message handler when it transmits a batch -// of headers for the local node to process. -func (h *ethHandler) handleHeaders(peer *eth.Peer, headers []*types.Header) error { - p := h.peers.peer(peer.ID()) - if p == nil { - return errors.New("unregistered during callback") - } - // If no headers were received, but we're expencting a checkpoint header, consider it that - if len(headers) == 0 && p.syncDrop != nil { - // Stop the timer either way, decide later to drop or not - p.syncDrop.Stop() - p.syncDrop = nil - - // If we're doing a fast (or snap) sync, we must enforce the checkpoint block to avoid - // eclipse attacks. Unsynced nodes are welcome to connect after we're done - // joining the network - if atomic.LoadUint32(&h.fastSync) == 1 { - peer.Log().Warn("Dropping unsynced node during sync", "addr", peer.RemoteAddr(), "type", peer.Name()) - return errors.New("unsynced node cannot serve sync") - } - } - // Filter out any explicitly requested headers, deliver the rest to the downloader - filter := len(headers) == 1 - if filter { - // If it's a potential sync progress check, validate the content and advertised chain weight - if p.syncDrop != nil && headers[0].Number.Uint64() == h.checkpointNumber { - // Disable the sync drop timer - p.syncDrop.Stop() - p.syncDrop = nil - - // Validate the header and either drop the peer or continue - if headers[0].Hash() != h.checkpointHash { - return errors.New("checkpoint hash mismatch") - } - return nil - } - // Otherwise if it's a whitelisted block, validate against the set - if want, ok := h.whitelist[headers[0].Number.Uint64()]; ok { - if hash := headers[0].Hash(); want != hash { - peer.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want) - return errors.New("whitelist block mismatch") - } - peer.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want) - } - // Irrelevant of the fork checks, send the header to the fetcher just in case - headers = h.blockFetcher.FilterHeaders(peer.ID(), headers, time.Now()) - } - if len(headers) > 0 || !filter { - err := h.downloader.DeliverHeaders(peer.ID(), headers) - if err != nil { - log.Debug("Failed to deliver headers", "err", err) - } - } - return nil -} - -// handleBodies is invoked from a peer's message handler when it transmits a batch -// of block bodies for the local node to process. -func (h *ethHandler) handleBodies(peer *eth.Peer, txs [][]*types.Transaction, uncles [][]*types.Header) error { - // Filter out any explicitly requested bodies, deliver the rest to the downloader - filter := len(txs) > 0 || len(uncles) > 0 - if filter { - txs, uncles = h.blockFetcher.FilterBodies(peer.ID(), txs, uncles, time.Now()) - } - if len(txs) > 0 || len(uncles) > 0 || !filter { - err := h.downloader.DeliverBodies(peer.ID(), txs, uncles) - if err != nil { - log.Debug("Failed to deliver bodies", "err", err) - } - } - return nil -} - // handleBlockAnnounces is invoked from a peer's message handler when it transmits a // batch of block announcements for the local node to process. func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash, numbers []uint64) error { + // Drop all incoming block announces from the p2p network if + // the chain already entered the pos stage and disconnect the + // remote peer. + if h.merger.PoSFinalized() { + // TODO (MariusVanDerWijden) drop non-updated peers after the merge + return nil + // return errors.New("unexpected block announces") + } // Schedule all the unknown hashes for retrieval var ( unknownHashes = make([]common.Hash, 0, len(hashes)) @@ -200,6 +112,14 @@ func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash, // handleBlockBroadcast is invoked from a peer's message handler when it transmits a // block broadcast for the local node to process. func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td *big.Int) error { + // Drop all incoming block announces from the p2p network if + // the chain already entered the pos stage and disconnect the + // remote peer. + if h.merger.PoSFinalized() { + // TODO (MariusVanDerWijden) drop non-updated peers after the merge + return nil + // return errors.New("unexpected block announces") + } // Schedule the block for import h.blockFetcher.Enqueue(peer.ID(), block) diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go index b8db5039c..6e1c57cb6 100644 --- a/eth/handler_eth_test.go +++ b/eth/handler_eth_test.go @@ -25,6 +25,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" @@ -37,7 +38,7 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/rlp" ) // testEthHandler is a mock event handler to listen for inbound network requests @@ -49,7 +50,6 @@ type testEthHandler struct { } func (h *testEthHandler) Chain() *core.BlockChain { panic("no backing chain") } -func (h *testEthHandler) StateBloom() *trie.SyncBloom { panic("no backing state bloom") } func (h *testEthHandler) TxPool() eth.TxPool { panic("no backing tx pool") } func (h *testEthHandler) AcceptTxs() bool { return true } func (h *testEthHandler) RunPeer(*eth.Peer, eth.Handler) error { panic("not used in tests") } @@ -115,6 +115,7 @@ func testForkIDSplit(t *testing.T, protocol uint) { Database: dbNoFork, Chain: chainNoFork, TxPool: newTestTxPool(), + Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), Network: 1, Sync: downloader.FullSync, BloomCache: 1, @@ -123,6 +124,7 @@ func testForkIDSplit(t *testing.T, protocol uint) { Database: dbProFork, Chain: chainProFork, TxPool: newTestTxPool(), + Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), Network: 1, Sync: downloader.FullSync, BloomCache: 1, @@ -351,7 +353,7 @@ func testSendTransactions(t *testing.T, protocol uint) { seen := make(map[common.Hash]struct{}) for len(seen) < len(insert) { switch protocol { - case 65, 66: + case 66: select { case hashes := <-anns: for _, hash := range hashes { @@ -361,7 +363,7 @@ func testSendTransactions(t *testing.T, protocol uint) { seen[hash] = struct{}{} } case <-bcasts: - t.Errorf("initial tx broadcast received on post eth/65") + t.Errorf("initial tx broadcast received on post eth/66") } default: @@ -386,6 +388,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { // to receive them. We need multiple sinks since a one-to-one peering would // broadcast all transactions without announcement. source := newTestHandler() + source.handler.snapSync = 0 // Avoid requiring snap, otherwise some will be dropped below defer source.close() sinks := make([]*testHandler, 10) @@ -403,7 +406,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) { defer sourcePipe.Close() defer sinkPipe.Close() - sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, source.txpool) + sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i + 1)}, "", nil, sourcePipe), sourcePipe, source.txpool) sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool) defer sourcePeer.Close() defer sinkPeer.Close() @@ -435,12 +438,13 @@ func testTransactionPropagation(t *testing.T, protocol uint) { // Iterate through all the sinks and ensure they all got the transactions for i := range sinks { - for arrived := 0; arrived < len(txs); { + for arrived, timeout := 0, false; arrived < len(txs) && !timeout; { select { case event := <-txChs[i]: arrived += len(event.Txs) - case <-time.NewTimer(time.Second).C: + case <-time.After(time.Second): t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs)) + timeout = true } } } @@ -460,23 +464,23 @@ func TestCheckpointChallenge(t *testing.T) { }{ // If checkpointing is not enabled locally, don't challenge and don't drop {downloader.FullSync, false, false, false, false, false}, - {downloader.FastSync, false, false, false, false, false}, + {downloader.SnapSync, false, false, false, false, false}, // If checkpointing is enabled locally and remote response is empty, only drop during fast sync {downloader.FullSync, true, false, true, false, false}, - {downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer + {downloader.SnapSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer // If checkpointing is enabled locally and remote response mismatches, always drop {downloader.FullSync, true, false, false, false, true}, - {downloader.FastSync, true, false, false, false, true}, + {downloader.SnapSync, true, false, false, false, true}, // If checkpointing is enabled locally and remote response matches, never drop {downloader.FullSync, true, false, false, true, false}, - {downloader.FastSync, true, false, false, true, false}, + {downloader.SnapSync, true, false, false, true, false}, // If checkpointing is enabled locally and remote times out, always drop {downloader.FullSync, true, true, false, true, true}, - {downloader.FastSync, true, true, false, true, true}, + {downloader.SnapSync, true, true, false, true, true}, } for _, tt := range tests { t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) { @@ -497,10 +501,10 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo handler := newTestHandler() defer handler.close() - if syncmode == downloader.FastSync { - atomic.StoreUint32(&handler.handler.fastSync, 1) + if syncmode == downloader.SnapSync { + atomic.StoreUint32(&handler.handler.snapSync, 1) } else { - atomic.StoreUint32(&handler.handler.fastSync, 0) + atomic.StoreUint32(&handler.handler.snapSync, 0) } var response *types.Header if checkpoint { @@ -557,15 +561,17 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo // Create a block to reply to the challenge if no timeout is simulated. if !timeout { if empty { - if err := remote.ReplyBlockHeaders(request.RequestId, []*types.Header{}); err != nil { + if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{}); err != nil { t.Fatalf("failed to answer challenge: %v", err) } } else if match { - if err := remote.ReplyBlockHeaders(request.RequestId, []*types.Header{response}); err != nil { + responseRlp, _ := rlp.EncodeToBytes(response) + if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil { t.Fatalf("failed to answer challenge: %v", err) } } else { - if err := remote.ReplyBlockHeaders(request.RequestId, []*types.Header{{Number: response.Number}}); err != nil { + responseRlp, _ := rlp.EncodeToBytes(types.Header{Number: response.Number}) + if err := remote.ReplyBlockHeadersRLP(request.RequestId, []rlp.RawValue{responseRlp}); err != nil { t.Fatalf("failed to answer challenge: %v", err) } } diff --git a/eth/handler_test.go b/eth/handler_test.go index b2f00b797..d967b6df9 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -22,6 +22,7 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -149,8 +150,9 @@ func newTestHandlerWithBlocks(blocks int) *testHandler { Database: db, Chain: chain, TxPool: txpool, + Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), Network: 1, - Sync: downloader.FastSync, + Sync: downloader.SnapSync, BloomCache: 1, }) handler.Start(1000) diff --git a/eth/peer.go b/eth/peer.go index 1cea9c640..024a6e619 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -18,8 +18,6 @@ package eth import ( "math/big" - "sync" - "time" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/snap" @@ -36,11 +34,8 @@ type ethPeerInfo struct { // ethPeer is a wrapper around eth.Peer to maintain a few extra metadata. type ethPeer struct { *eth.Peer - snapExt *snapPeer // Satellite `snap` connection - - syncDrop *time.Timer // Connection dropper if `eth` sync progress isn't validated in time + snapExt *snapPeer // Satellite `snap` connection snapWait chan struct{} // Notification channel for snap connections - lock sync.RWMutex // Mutex protecting the internal fields } // info gathers and returns some `eth` protocol metadata known about a peer. diff --git a/eth/protocols/eth/dispatcher.go b/eth/protocols/eth/dispatcher.go new file mode 100644 index 000000000..bf88d400d --- /dev/null +++ b/eth/protocols/eth/dispatcher.go @@ -0,0 +1,253 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "errors" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/p2p" +) + +var ( + // errDisconnected is returned if a request is attempted to be made to a peer + // that was already closed. + errDisconnected = errors.New("disconnected") + + // errDanglingResponse is returned if a response arrives with a request id + // which does not match to any existing pending requests. + errDanglingResponse = errors.New("response to non-existent request") + + // errMismatchingResponseType is returned if the remote peer sent a different + // packet type as a response to a request than what the local node expected. + errMismatchingResponseType = errors.New("mismatching response type") +) + +// Request is a pending request to allow tracking it and delivering a response +// back to the requester on their chosen channel. +type Request struct { + peer *Peer // Peer to which this request belogs for untracking + id uint64 // Request ID to match up replies to + + sink chan *Response // Channel to deliver the response on + cancel chan struct{} // Channel to cancel requests ahead of time + + code uint64 // Message code of the request packet + want uint64 // Message code of the response packet + data interface{} // Data content of the request packet + + Peer string // Demultiplexer if cross-peer requests are batched together + Sent time.Time // Timestamp when the request was sent +} + +// Close aborts an in-flight request. Although there's no way to notify the +// remote peer about the cancellation, this method notifies the dispatcher to +// discard any late responses. +func (r *Request) Close() error { + if r.peer == nil { // Tests mock out the dispatcher, skip internal cancellation + return nil + } + cancelOp := &cancel{ + id: r.id, + fail: make(chan error), + } + select { + case r.peer.reqCancel <- cancelOp: + if err := <-cancelOp.fail; err != nil { + return err + } + close(r.cancel) + return nil + case <-r.peer.term: + return errDisconnected + } +} + +// request is a wrapper around a client Request that has an error channel to +// signal on if sending the request already failed on a network level. +type request struct { + req *Request + fail chan error +} + +// cancel is a maintenance type on the dispatcher to stop tracking a pending +// request. +type cancel struct { + id uint64 // Request ID to stop tracking + fail chan error +} + +// Response is a reply packet to a previously created request. It is delivered +// on the channel assigned by the requester subsystem and contains the original +// request embedded to allow uniquely matching it caller side. +type Response struct { + id uint64 // Request ID to match up this reply to + recv time.Time // Timestamp when the request was received + code uint64 // Response packet type to cross validate with request + + Req *Request // Original request to cross-reference with + Res interface{} // Remote response for the request query + Meta interface{} // Metadata generated locally on the receiver thread + Time time.Duration // Time it took for the request to be served + Done chan error // Channel to signal message handling to the reader +} + +// response is a wrapper around a remote Response that has an error channel to +// signal on if processing the response failed. +type response struct { + res *Response + fail chan error +} + +// dispatchRequest schedules the request to the dispatcher for tracking and +// network serialization, blocking until it's successfully sent. +// +// The returned Request must either be closed before discarding it, or the reply +// must be waited for and the Response's Done channel signalled. +func (p *Peer) dispatchRequest(req *Request) error { + reqOp := &request{ + req: req, + fail: make(chan error), + } + req.cancel = make(chan struct{}) + req.peer = p + req.Peer = p.id + + select { + case p.reqDispatch <- reqOp: + return <-reqOp.fail + case <-p.term: + return errDisconnected + } +} + +// dispatchRequest fulfils a pending request and delivers it to the requested +// sink. +func (p *Peer) dispatchResponse(res *Response, metadata func() interface{}) error { + resOp := &response{ + res: res, + fail: make(chan error), + } + res.recv = time.Now() + res.Done = make(chan error) + + select { + case p.resDispatch <- resOp: + // Ensure the response is accepted by the dispatcher + if err := <-resOp.fail; err != nil { + return nil + } + // Request was accepted, run any postprocessing step to generate metadata + // on the receiver thread, not the sink thread + if metadata != nil { + res.Meta = metadata() + } + // Deliver the filled out response and wait until it's handled. This + // path is a bit funky as Go's select has no order, so if a response + // arrives to an already cancelled request, there's a 50-50% changes + // of picking on channel or the other. To avoid such cases delivering + // the packet upstream, check for cancellation first and only after + // block on delivery. + select { + case <-res.Req.cancel: + return nil // Request cancelled, silently discard response + default: + // Request not yet cancelled, attempt to deliver it, but do watch + // for fresh cancellations too + select { + case res.Req.sink <- res: + return <-res.Done // Response delivered, return any errors + case <-res.Req.cancel: + return nil // Request cancelled, silently discard response + } + } + + case <-p.term: + return errDisconnected + } +} + +// dispatcher is a loop that accepts requests from higher layer packages, pushes +// it to the network and tracks and dispatches the responses back to the original +// requester. +func (p *Peer) dispatcher() { + pending := make(map[uint64]*Request) + + for { + select { + case reqOp := <-p.reqDispatch: + req := reqOp.req + req.Sent = time.Now() + + requestTracker.Track(p.id, p.version, req.code, req.want, req.id) + err := p2p.Send(p.rw, req.code, req.data) + reqOp.fail <- err + + if err == nil { + pending[req.id] = req + } + + case cancelOp := <-p.reqCancel: + // Retrieve the pendign request to cancel and short circuit if it + // has already been serviced and is not available anymore + req := pending[cancelOp.id] + if req == nil { + cancelOp.fail <- nil + continue + } + // Stop tracking the request + delete(pending, cancelOp.id) + cancelOp.fail <- nil + + case resOp := <-p.resDispatch: + res := resOp.res + res.Req = pending[res.id] + + // Independent if the request exists or not, track this packet + requestTracker.Fulfil(p.id, p.version, res.code, res.id) + + switch { + case res.Req == nil: + // Response arrived with an untracked ID. Since even cancelled + // requests are tracked until fulfilment, a dangling repsponse + // means the remote peer implements the protocol badly. + resOp.fail <- errDanglingResponse + + case res.Req.want != res.code: + // Response arrived, but it's a different packet type than the + // one expected by the requester. Either the local code is bad, + // or the remote peer send junk. In neither cases can we handle + // the packet. + resOp.fail <- fmt.Errorf("%w: have %d, want %d", errMismatchingResponseType, res.code, res.Req.want) + + default: + // All dispatcher checks passed and the response was initialized + // with the matching request. Signal to the delivery routine that + // it can wait for a handler response and dispatch the data. + res.Time = res.recv.Sub(res.Req.Sent) + resOp.fail <- nil + + // Stop tracking the request, the response dispatcher will deliver + delete(pending, res.id) + } + + case <-p.term: + return + } + } +} diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 828930014..81d45d8b8 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -29,16 +29,12 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" ) const ( // softResponseLimit is the target maximum size of replies to data retrievals. softResponseLimit = 2 * 1024 * 1024 - // estHeaderSize is the approximate size of an RLP encoded block header. - estHeaderSize = 500 - // maxHeadersServe is the maximum number of block headers to serve. This number // is there to limit the number of disk lookups. maxHeadersServe = 1024 @@ -69,9 +65,6 @@ type Backend interface { // Chain retrieves the blockchain object to serve data. Chain() *core.BlockChain - // StateBloom retrieves the bloom filter - if any - for state trie nodes. - StateBloom() *trie.SyncBloom - // TxPool retrieves the transaction pool object to serve data. TxPool() TxPool @@ -96,7 +89,7 @@ type Backend interface { // TxPool defines the methods needed by the protocol handler to serve transactions. type TxPool interface { - // Get retrieves the the transaction from the local txpool with the given hash. + // Get retrieves the transaction from the local txpool with the given hash. Get(hash common.Hash) *types.Transaction } diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index 66f013409..7d9b37883 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -34,7 +34,6 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" ) var ( @@ -91,9 +90,8 @@ func (b *testBackend) close() { b.chain.Stop() } -func (b *testBackend) Chain() *core.BlockChain { return b.chain } -func (b *testBackend) StateBloom() *trie.SyncBloom { return nil } -func (b *testBackend) TxPool() TxPool { return b.txpool } +func (b *testBackend) Chain() *core.BlockChain { return b.chain } +func (b *testBackend) TxPool() TxPool { return b.txpool } func (b *testBackend) RunPeer(peer *Peer, handler Handler) error { // Normally the backend would do peer mainentance and handshakes. All that @@ -138,11 +136,13 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { query *GetBlockHeadersPacket // The query to execute for header retrieval expect []common.Hash // The hashes of the block whose headers are expected }{ - // A single random block should be retrievable by hash and number too + // A single random block should be retrievable by hash { &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, - }, { + }, + // A single random block should be retrievable by number + { &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, }, @@ -182,10 +182,15 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { { &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1}, []common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, - }, { + }, + { &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64()}, Amount: 1}, []common.Hash{backend.chain.CurrentBlock().Hash()}, }, + { // If the peer requests a bit into the future, we deliver what we have + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64()}, Amount: 10}, + []common.Hash{backend.chain.CurrentBlock().Hash()}, + }, // Ensure protocol limits are honored { &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, @@ -282,7 +287,7 @@ func testGetBlockHeaders(t *testing.T, protocol uint) { RequestId: 456, BlockHeadersPacket: headers, }); err != nil { - t.Errorf("test %d: headers mismatch: %v", i, err) + t.Errorf("test %d by hash: headers mismatch: %v", i, err) } } } diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index e54838cbc..8fc966e7a 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -34,11 +35,22 @@ func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := answerGetBlockHeadersQuery(backend, query.GetBlockHeadersPacket, peer) - return peer.ReplyBlockHeaders(query.RequestId, response) + response := ServiceGetBlockHeadersQuery(backend.Chain(), query.GetBlockHeadersPacket, peer) + return peer.ReplyBlockHeadersRLP(query.RequestId, response) } -func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, peer *Peer) []*types.Header { +// ServiceGetBlockHeadersQuery assembles the response to a header query. It is +// exposed to allow external packages to test protocol behavior. +func ServiceGetBlockHeadersQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { + if query.Skip == 0 { + // The fast path: when the request is for a contiguous segment of headers. + return serviceContiguousBlockHeaderQuery(chain, query) + } else { + return serviceNonContiguousBlockHeaderQuery(chain, query, peer) + } +} + +func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket, peer *Peer) []rlp.RawValue { hashMode := query.Origin.Hash != (common.Hash{}) first := true maxNonCanonical := uint64(100) @@ -46,7 +58,7 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p // Gather headers until the fetch or network limits is reached var ( bytes common.StorageSize - headers []*types.Header + headers []rlp.RawValue unknown bool lookups int ) @@ -58,22 +70,25 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p if hashMode { if first { first = false - origin = backend.Chain().GetHeaderByHash(query.Origin.Hash) + origin = chain.GetHeaderByHash(query.Origin.Hash) if origin != nil { query.Origin.Number = origin.Number.Uint64() } } else { - origin = backend.Chain().GetHeader(query.Origin.Hash, query.Origin.Number) + origin = chain.GetHeader(query.Origin.Hash, query.Origin.Number) } } else { - origin = backend.Chain().GetHeaderByNumber(query.Origin.Number) + origin = chain.GetHeaderByNumber(query.Origin.Number) } if origin == nil { break } - headers = append(headers, origin) - bytes += estHeaderSize - + if rlpData, err := rlp.EncodeToBytes(origin); err != nil { + log.Crit("Unable to decode our own headers", "err", err) + } else { + headers = append(headers, rlp.RawValue(rlpData)) + bytes += common.StorageSize(len(rlpData)) + } // Advance to the next header of the query switch { case hashMode && query.Reverse: @@ -82,7 +97,7 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p if ancestor == 0 { unknown = true } else { - query.Origin.Hash, query.Origin.Number = backend.Chain().GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) + query.Origin.Hash, query.Origin.Number = chain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) unknown = (query.Origin.Hash == common.Hash{}) } case hashMode && !query.Reverse: @@ -96,9 +111,9 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p peer.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos) unknown = true } else { - if header := backend.Chain().GetHeaderByNumber(next); header != nil { + if header := chain.GetHeaderByNumber(next); header != nil { nextHash := header.Hash() - expOldHash, _ := backend.Chain().GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) + expOldHash, _ := chain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) if expOldHash == query.Origin.Hash { query.Origin.Hash, query.Origin.Number = nextHash, next } else { @@ -124,17 +139,82 @@ func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, p return headers } +func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersPacket) []rlp.RawValue { + count := query.Amount + if count > maxHeadersServe { + count = maxHeadersServe + } + if query.Origin.Hash == (common.Hash{}) { + // Number mode, just return the canon chain segment. The backend + // delivers in [N, N-1, N-2..] descending order, so we need to + // accommodate for that. + from := query.Origin.Number + if !query.Reverse { + from = from + count - 1 + } + headers := chain.GetHeadersFrom(from, count) + if !query.Reverse { + for i, j := 0, len(headers)-1; i < j; i, j = i+1, j-1 { + headers[i], headers[j] = headers[j], headers[i] + } + } + return headers + } + // Hash mode. + var ( + headers []rlp.RawValue + hash = query.Origin.Hash + header = chain.GetHeaderByHash(hash) + ) + if header != nil { + rlpData, _ := rlp.EncodeToBytes(header) + headers = append(headers, rlpData) + } else { + // We don't even have the origin header + return headers + } + num := header.Number.Uint64() + if !query.Reverse { + // Theoretically, we are tasked to deliver header by hash H, and onwards. + // However, if H is not canon, we will be unable to deliver any descendants of + // H. + if canonHash := chain.GetCanonicalHash(num); canonHash != hash { + // Not canon, we can't deliver descendants + return headers + } + descendants := chain.GetHeadersFrom(num+count-1, count-1) + for i, j := 0, len(descendants)-1; i < j; i, j = i+1, j-1 { + descendants[i], descendants[j] = descendants[j], descendants[i] + } + headers = append(headers, descendants...) + return headers + } + { // Last mode: deliver ancestors of H + for i := uint64(1); header != nil && i < count; i++ { + header = chain.GetHeaderByHash(header.ParentHash) + if header == nil { + break + } + rlpData, _ := rlp.EncodeToBytes(header) + headers = append(headers, rlpData) + } + return headers + } +} + func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { // Decode the block body retrieval message var query GetBlockBodiesPacket66 if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := answerGetBlockBodiesQuery(backend, query.GetBlockBodiesPacket, peer) + response := ServiceGetBlockBodiesQuery(backend.Chain(), query.GetBlockBodiesPacket) return peer.ReplyBlockBodiesRLP(query.RequestId, response) } -func answerGetBlockBodiesQuery(backend Backend, query GetBlockBodiesPacket, peer *Peer) []rlp.RawValue { +// ServiceGetBlockBodiesQuery assembles the response to a body query. It is +// exposed to allow external packages to test protocol behavior. +func ServiceGetBlockBodiesQuery(chain *core.BlockChain, query GetBlockBodiesPacket) []rlp.RawValue { // Gather blocks until the fetch or network limits is reached var ( bytes int @@ -145,7 +225,7 @@ func answerGetBlockBodiesQuery(backend Backend, query GetBlockBodiesPacket, peer lookups >= 2*maxBodiesServe { break } - if data := backend.Chain().GetBodyRLP(hash); len(data) != 0 { + if data := chain.GetBodyRLP(hash); len(data) != 0 { bodies = append(bodies, data) bytes += len(data) } @@ -159,11 +239,13 @@ func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := answerGetNodeDataQuery(backend, query.GetNodeDataPacket, peer) + response := ServiceGetNodeDataQuery(backend.Chain(), query.GetNodeDataPacket) return peer.ReplyNodeData(query.RequestId, response) } -func answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer) [][]byte { +// ServiceGetNodeDataQuery assembles the response to a node data query. It is +// exposed to allow external packages to test protocol behavior. +func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte { // Gather state data until the fetch or network limits is reached var ( bytes int @@ -175,14 +257,10 @@ func answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer break } // Retrieve the requested state entry - if bloom := backend.StateBloom(); bloom != nil && !bloom.Contains(hash[:]) { - // Only lookup the trie node if there's chance that we actually have it - continue - } - entry, err := backend.Chain().TrieNode(hash) + entry, err := chain.TrieNode(hash) if len(entry) == 0 || err != nil { // Read the contract code with prefix only to save unnecessary lookups. - entry, err = backend.Chain().ContractCodeWithPrefix(hash) + entry, err = chain.ContractCodeWithPrefix(hash) } if err == nil && len(entry) > 0 { nodes = append(nodes, entry) @@ -198,11 +276,13 @@ func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(&query); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - response := answerGetReceiptsQuery(backend, query.GetReceiptsPacket, peer) + response := ServiceGetReceiptsQuery(backend.Chain(), query.GetReceiptsPacket) return peer.ReplyReceiptsRLP(query.RequestId, response) } -func answerGetReceiptsQuery(backend Backend, query GetReceiptsPacket, peer *Peer) []rlp.RawValue { +// ServiceGetReceiptsQuery assembles the response to a receipt query. It is +// exposed to allow external packages to test protocol behavior. +func ServiceGetReceiptsQuery(chain *core.BlockChain, query GetReceiptsPacket) []rlp.RawValue { // Gather state data until the fetch or network limits is reached var ( bytes int @@ -214,9 +294,9 @@ func answerGetReceiptsQuery(backend Backend, query GetReceiptsPacket, peer *Peer break } // Retrieve the requested block's receipts - results := backend.Chain().GetReceiptsByHash(hash) + results := chain.GetReceiptsByHash(hash) if results == nil { - if header := backend.Chain().GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { + if header := chain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { continue } } @@ -277,9 +357,18 @@ func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - requestTracker.Fulfil(peer.id, peer.version, BlockHeadersMsg, res.RequestId) - - return backend.Handle(peer, &res.BlockHeadersPacket) + metadata := func() interface{} { + hashes := make([]common.Hash, len(res.BlockHeadersPacket)) + for i, header := range res.BlockHeadersPacket { + hashes[i] = header.Hash() + } + return hashes + } + return peer.dispatchResponse(&Response{ + id: res.RequestId, + code: BlockHeadersMsg, + Res: &res.BlockHeadersPacket, + }, metadata) } func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { @@ -288,9 +377,23 @@ func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - requestTracker.Fulfil(peer.id, peer.version, BlockBodiesMsg, res.RequestId) - - return backend.Handle(peer, &res.BlockBodiesPacket) + metadata := func() interface{} { + var ( + txsHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + uncleHashes = make([]common.Hash, len(res.BlockBodiesPacket)) + ) + hasher := trie.NewStackTrie(nil) + for i, body := range res.BlockBodiesPacket { + txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) + uncleHashes[i] = types.CalcUncleHash(body.Uncles) + } + return [][]common.Hash{txsHashes, uncleHashes} + } + return peer.dispatchResponse(&Response{ + id: res.RequestId, + code: BlockBodiesMsg, + Res: &res.BlockBodiesPacket, + }, metadata) } func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { @@ -299,9 +402,11 @@ func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - requestTracker.Fulfil(peer.id, peer.version, NodeDataMsg, res.RequestId) - - return backend.Handle(peer, &res.NodeDataPacket) + return peer.dispatchResponse(&Response{ + id: res.RequestId, + code: NodeDataMsg, + Res: &res.NodeDataPacket, + }, nil) // No post-processing, we're not using this packet anymore } func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { @@ -310,9 +415,19 @@ func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { if err := msg.Decode(res); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - requestTracker.Fulfil(peer.id, peer.version, ReceiptsMsg, res.RequestId) - - return backend.Handle(peer, &res.ReceiptsPacket) + metadata := func() interface{} { + hasher := trie.NewStackTrie(nil) + hashes := make([]common.Hash, len(res.ReceiptsPacket)) + for i, receipt := range res.ReceiptsPacket { + hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) + } + return hashes + } + return peer.dispatchResponse(&Response{ + id: res.RequestId, + code: ReceiptsMsg, + Res: &res.ReceiptsPacket, + }, metadata) } func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) error { diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go index 1b4cfeb3d..4161420f3 100644 --- a/eth/protocols/eth/peer.go +++ b/eth/protocols/eth/peer.go @@ -84,6 +84,10 @@ type Peer struct { txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests + reqDispatch chan *request // Dispatch channel to send requests and track then until fulfilment + reqCancel chan *cancel // Dispatch channel to cancel pending requests and untrack them + resDispatch chan *response // Dispatch channel to fulfil pending requests and untrack them + term chan struct{} // Termination channel to stop the broadcasters lock sync.RWMutex // Mutex protecting the internal fields } @@ -102,6 +106,9 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Pe queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns), txBroadcast: make(chan []common.Hash), txAnnounce: make(chan []common.Hash), + reqDispatch: make(chan *request), + reqCancel: make(chan *cancel), + resDispatch: make(chan *response), txpool: txpool, term: make(chan struct{}), } @@ -109,6 +116,7 @@ func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Pe go peer.broadcastBlocks() go peer.broadcastTransactions() go peer.announceTransactions() + go peer.dispatcher() return peer } @@ -289,10 +297,10 @@ func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) { } // ReplyBlockHeaders is the eth/66 version of SendBlockHeaders. -func (p *Peer) ReplyBlockHeaders(id uint64, headers []*types.Header) error { - return p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersPacket66{ - RequestId: id, - BlockHeadersPacket: headers, +func (p *Peer) ReplyBlockHeadersRLP(id uint64, headers []rlp.RawValue) error { + return p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersRLPPacket66{ + RequestId: id, + BlockHeadersRLPPacket: headers, }) } @@ -323,94 +331,148 @@ func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error { // RequestOneHeader is a wrapper around the header query functions to fetch a // single header. It is used solely by the fetcher. -func (p *Peer) RequestOneHeader(hash common.Hash) error { +func (p *Peer) RequestOneHeader(hash common.Hash, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching single header", "hash", hash) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id) - return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ - Origin: HashOrNumber{Hash: hash}, - Amount: uint64(1), - Skip: uint64(0), - Reverse: false, + req := &Request{ + id: id, + sink: sink, + code: GetBlockHeadersMsg, + want: BlockHeadersMsg, + data: &GetBlockHeadersPacket66{ + RequestId: id, + GetBlockHeadersPacket: &GetBlockHeadersPacket{ + Origin: HashOrNumber{Hash: hash}, + Amount: uint64(1), + Skip: uint64(0), + Reverse: false, + }, }, - }) + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestHeadersByHash fetches a batch of blocks' headers corresponding to the // specified header query, based on the hash of an origin block. -func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { +func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id) - return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ - Origin: HashOrNumber{Hash: origin}, - Amount: uint64(amount), - Skip: uint64(skip), - Reverse: reverse, + req := &Request{ + id: id, + sink: sink, + code: GetBlockHeadersMsg, + want: BlockHeadersMsg, + data: &GetBlockHeadersPacket66{ + RequestId: id, + GetBlockHeadersPacket: &GetBlockHeadersPacket{ + Origin: HashOrNumber{Hash: origin}, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + }, }, - }) + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the // specified header query, based on the number of an origin block. -func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { +func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetBlockHeadersMsg, BlockHeadersMsg, id) - return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ - RequestId: id, - GetBlockHeadersPacket: &GetBlockHeadersPacket{ - Origin: HashOrNumber{Number: origin}, - Amount: uint64(amount), - Skip: uint64(skip), - Reverse: reverse, + req := &Request{ + id: id, + sink: sink, + code: GetBlockHeadersMsg, + want: BlockHeadersMsg, + data: &GetBlockHeadersPacket66{ + RequestId: id, + GetBlockHeadersPacket: &GetBlockHeadersPacket{ + Origin: HashOrNumber{Number: origin}, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + }, }, - }) + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestBodies fetches a batch of blocks' bodies corresponding to the hashes // specified. -func (p *Peer) RequestBodies(hashes []common.Hash) error { +func (p *Peer) RequestBodies(hashes []common.Hash, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching batch of block bodies", "count", len(hashes)) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetBlockBodiesMsg, BlockBodiesMsg, id) - return p2p.Send(p.rw, GetBlockBodiesMsg, &GetBlockBodiesPacket66{ - RequestId: id, - GetBlockBodiesPacket: hashes, - }) + req := &Request{ + id: id, + sink: sink, + code: GetBlockBodiesMsg, + want: BlockBodiesMsg, + data: &GetBlockBodiesPacket66{ + RequestId: id, + GetBlockBodiesPacket: hashes, + }, + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestNodeData fetches a batch of arbitrary data from a node's known state // data, corresponding to the specified hashes. -func (p *Peer) RequestNodeData(hashes []common.Hash) error { +func (p *Peer) RequestNodeData(hashes []common.Hash, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching batch of state data", "count", len(hashes)) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetNodeDataMsg, NodeDataMsg, id) - return p2p.Send(p.rw, GetNodeDataMsg, &GetNodeDataPacket66{ - RequestId: id, - GetNodeDataPacket: hashes, - }) + req := &Request{ + id: id, + sink: sink, + code: GetNodeDataMsg, + want: NodeDataMsg, + data: &GetNodeDataPacket66{ + RequestId: id, + GetNodeDataPacket: hashes, + }, + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestReceipts fetches a batch of transaction receipts from a remote node. -func (p *Peer) RequestReceipts(hashes []common.Hash) error { +func (p *Peer) RequestReceipts(hashes []common.Hash, sink chan *Response) (*Request, error) { p.Log().Debug("Fetching batch of receipts", "count", len(hashes)) id := rand.Uint64() - requestTracker.Track(p.id, p.version, GetReceiptsMsg, ReceiptsMsg, id) - return p2p.Send(p.rw, GetReceiptsMsg, &GetReceiptsPacket66{ - RequestId: id, - GetReceiptsPacket: hashes, - }) + req := &Request{ + id: id, + sink: sink, + code: GetReceiptsMsg, + want: ReceiptsMsg, + data: &GetReceiptsPacket66{ + RequestId: id, + GetReceiptsPacket: hashes, + }, + } + if err := p.dispatchRequest(req); err != nil { + return nil, err + } + return req, nil } // RequestTxs fetches a batch of transactions from a remote node. diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index 3c3da30fa..a8420ad68 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -175,6 +175,16 @@ type BlockHeadersPacket66 struct { BlockHeadersPacket } +// BlockHeadersRLPPacket represents a block header response, to use when we already +// have the headers rlp encoded. +type BlockHeadersRLPPacket []rlp.RawValue + +// BlockHeadersPacket represents a block header response over eth/66. +type BlockHeadersRLPPacket66 struct { + RequestId uint64 + BlockHeadersRLPPacket +} + // NewBlockPacket is the network packet for the block propagation message. type NewBlockPacket struct { Block *types.Block diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index c62f9cfca..0a1ee2637 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -99,8 +99,8 @@ func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { Version: version, Length: protocolLengths[version], Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { - return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error { - return handle(backend, peer) + return backend.RunPeer(NewPeer(version, p, rw), func(peer *Peer) error { + return Handle(backend, peer) }) }, NodeInfo: func() interface{} { @@ -116,21 +116,21 @@ func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { return protocols } -// handle is the callback invoked to manage the life cycle of a `snap` peer. +// Handle is the callback invoked to manage the life cycle of a `snap` peer. // When this function terminates, the peer is disconnected. -func handle(backend Backend, peer *Peer) error { +func Handle(backend Backend, peer *Peer) error { for { - if err := handleMessage(backend, peer); err != nil { + if err := HandleMessage(backend, peer); err != nil { peer.Log().Debug("Message handling failed in `snap`", "err", err) return err } } } -// handleMessage is invoked whenever an inbound message is received from a +// HandleMessage is invoked whenever an inbound message is received from a // remote peer on the `snap` protocol. The remote connection is torn down upon // returning any error. -func handleMessage(backend Backend, peer *Peer) error { +func HandleMessage(backend Backend, peer *Peer) error { // Read the next message from the remote peer, and ensure it's fully consumed msg, err := peer.rw.ReadMsg() if err != nil { @@ -161,60 +161,10 @@ func handleMessage(backend Backend, peer *Peer) error { if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - if req.Bytes > softResponseLimit { - req.Bytes = softResponseLimit - } - // Retrieve the requested state and bail out if non existent - tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) - if err != nil { - return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) - } - it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin) - if err != nil { - return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) - } - // Iterate over the requested range and pile accounts up - var ( - accounts []*AccountData - size uint64 - last common.Hash - ) - for it.Next() && size < req.Bytes { - hash, account := it.Hash(), common.CopyBytes(it.Account()) + // Service the request, potentially returning nothing in case of errors + accounts, proofs := ServiceGetAccountRangeQuery(backend.Chain(), &req) - // Track the returned interval for the Merkle proofs - last = hash - - // Assemble the reply item - size += uint64(common.HashLength + len(account)) - accounts = append(accounts, &AccountData{ - Hash: hash, - Body: account, - }) - // If we've exceeded the request threshold, abort - if bytes.Compare(hash[:], req.Limit[:]) >= 0 { - break - } - } - it.Release() - - // Generate the Merkle proofs for the first and last account - proof := light.NewNodeSet() - if err := tr.Prove(req.Origin[:], 0, proof); err != nil { - log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) - return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) - } - if last != (common.Hash{}) { - if err := tr.Prove(last[:], 0, proof); err != nil { - log.Warn("Failed to prove account range", "last", last, "err", err) - return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) - } - } - var proofs [][]byte - for _, blob := range proof.NodeList() { - proofs = append(proofs, blob) - } - // Send back anything accumulated + // Send back anything accumulated (or empty in case of errors) return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ ID: req.ID, Accounts: accounts, @@ -243,111 +193,10 @@ func handleMessage(backend Backend, peer *Peer) error { if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - if req.Bytes > softResponseLimit { - req.Bytes = softResponseLimit - } - // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set? - // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user - // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional) + // Service the request, potentially returning nothing in case of errors + slots, proofs := ServiceGetStorageRangesQuery(backend.Chain(), &req) - // Calculate the hard limit at which to abort, even if mid storage trie - hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack)) - - // Retrieve storage ranges until the packet limit is reached - var ( - slots [][]*StorageData - proofs [][]byte - size uint64 - ) - for _, account := range req.Accounts { - // If we've exceeded the requested data limit, abort without opening - // a new storage range (that we'd need to prove due to exceeded size) - if size >= req.Bytes { - break - } - // The first account might start from a different origin and end sooner - var origin common.Hash - if len(req.Origin) > 0 { - origin, req.Origin = common.BytesToHash(req.Origin), nil - } - var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - if len(req.Limit) > 0 { - limit, req.Limit = common.BytesToHash(req.Limit), nil - } - // Retrieve the requested state and bail out if non existent - it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin) - if err != nil { - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - // Iterate over the requested range and pile slots up - var ( - storage []*StorageData - last common.Hash - abort bool - ) - for it.Next() { - if size >= hardLimit { - abort = true - break - } - hash, slot := it.Hash(), common.CopyBytes(it.Slot()) - - // Track the returned interval for the Merkle proofs - last = hash - - // Assemble the reply item - size += uint64(common.HashLength + len(slot)) - storage = append(storage, &StorageData{ - Hash: hash, - Body: slot, - }) - // If we've exceeded the request threshold, abort - if bytes.Compare(hash[:], limit[:]) >= 0 { - break - } - } - slots = append(slots, storage) - it.Release() - - // Generate the Merkle proofs for the first and last storage slot, but - // only if the response was capped. If the entire storage trie included - // in the response, no need for any proofs. - if origin != (common.Hash{}) || abort { - // Request started at a non-zero hash or was capped prematurely, add - // the endpoint Merkle proofs - accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) - if err != nil { - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - var acc types.StateAccount - if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil { - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB()) - if err != nil { - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - proof := light.NewNodeSet() - if err := stTrie.Prove(origin[:], 0, proof); err != nil { - log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - if last != (common.Hash{}) { - if err := stTrie.Prove(last[:], 0, proof); err != nil { - log.Warn("Failed to prove storage range", "last", last, "err", err) - return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) - } - } - for _, blob := range proof.NodeList() { - proofs = append(proofs, blob) - } - // Proof terminates the reply as proofs are only added if a node - // refuses to serve more data (exception when a contract fetch is - // finishing, but that's that). - break - } - } - // Send back anything accumulated + // Send back anything accumulated (or empty in case of errors) return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ ID: req.ID, Slots: slots, @@ -378,31 +227,10 @@ func handleMessage(backend Backend, peer *Peer) error { if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - if req.Bytes > softResponseLimit { - req.Bytes = softResponseLimit - } - if len(req.Hashes) > maxCodeLookups { - req.Hashes = req.Hashes[:maxCodeLookups] - } - // Retrieve bytecodes until the packet size limit is reached - var ( - codes [][]byte - bytes uint64 - ) - for _, hash := range req.Hashes { - if hash == emptyCode { - // Peers should not request the empty code, but if they do, at - // least sent them back a correct response without db lookups - codes = append(codes, []byte{}) - } else if blob, err := backend.Chain().ContractCode(hash); err == nil { - codes = append(codes, blob) - bytes += uint64(len(blob)) - } - if bytes > req.Bytes { - break - } - } - // Send back anything accumulated + // Service the request, potentially returning nothing in case of errors + codes := ServiceGetByteCodesQuery(backend.Chain(), &req) + + // Send back anything accumulated (or empty in case of errors) return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{ ID: req.ID, Codes: codes, @@ -424,80 +252,12 @@ func handleMessage(backend Backend, peer *Peer) error { if err := msg.Decode(&req); err != nil { return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) } - if req.Bytes > softResponseLimit { - req.Bytes = softResponseLimit - } - // Make sure we have the state associated with the request - triedb := backend.Chain().StateCache().TrieDB() - - accTrie, err := trie.NewSecure(req.Root, triedb) + // Service the request, potentially returning nothing in case of errors + nodes, err := ServiceGetTrieNodesQuery(backend.Chain(), &req, start) if err != nil { - // We don't have the requested state available, bail out - return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID}) + return err } - snap := backend.Chain().Snapshots().Snapshot(req.Root) - if snap == nil { - // We don't have the requested state snapshotted yet, bail out. - // In reality we could still serve using the account and storage - // tries only, but let's protect the node a bit while it's doing - // snapshot generation. - return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID}) - } - // Retrieve trie nodes until the packet size limit is reached - var ( - nodes [][]byte - bytes uint64 - loads int // Trie hash expansions to cound database reads - ) - for _, pathset := range req.Paths { - switch len(pathset) { - case 0: - // Ensure we penalize invalid requests - return fmt.Errorf("%w: zero-item pathset requested", errBadRequest) - - case 1: - // If we're only retrieving an account trie node, fetch it directly - blob, resolved, err := accTrie.TryGetNode(pathset[0]) - loads += resolved // always account database reads, even for failures - if err != nil { - break - } - nodes = append(nodes, blob) - bytes += uint64(len(blob)) - - default: - // Storage slots requested, open the storage trie and retrieve from there - account, err := snap.Account(common.BytesToHash(pathset[0])) - loads++ // always account database reads, even for failures - if err != nil || account == nil { - break - } - stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb) - loads++ // always account database reads, even for failures - if err != nil { - break - } - for _, path := range pathset[1:] { - blob, resolved, err := stTrie.TryGetNode(path) - loads += resolved // always account database reads, even for failures - if err != nil { - break - } - nodes = append(nodes, blob) - bytes += uint64(len(blob)) - - // Sanity check limits to avoid DoS on the store trie loads - if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { - break - } - } - } - // Abort request processing if we've exceeded our limits - if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { - break - } - } - // Send back anything accumulated + // Send back anything accumulated (or empty in case of errors) return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ ID: req.ID, Nodes: nodes, @@ -518,6 +278,282 @@ func handleMessage(backend Backend, peer *Peer) error { } } +// ServiceGetAccountRangeQuery assembles the response to an account range query. +// It is exposed to allow external packages to test protocol behavior. +func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePacket) ([]*AccountData, [][]byte) { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // Retrieve the requested state and bail out if non existent + tr, err := trie.New(req.Root, chain.StateCache().TrieDB()) + if err != nil { + return nil, nil + } + it, err := chain.Snapshots().AccountIterator(req.Root, req.Origin) + if err != nil { + return nil, nil + } + // Iterate over the requested range and pile accounts up + var ( + accounts []*AccountData + size uint64 + last common.Hash + ) + for it.Next() && size < req.Bytes { + hash, account := it.Hash(), common.CopyBytes(it.Account()) + + // Track the returned interval for the Merkle proofs + last = hash + + // Assemble the reply item + size += uint64(common.HashLength + len(account)) + accounts = append(accounts, &AccountData{ + Hash: hash, + Body: account, + }) + // If we've exceeded the request threshold, abort + if bytes.Compare(hash[:], req.Limit[:]) >= 0 { + break + } + } + it.Release() + + // Generate the Merkle proofs for the first and last account + proof := light.NewNodeSet() + if err := tr.Prove(req.Origin[:], 0, proof); err != nil { + log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) + return nil, nil + } + if last != (common.Hash{}) { + if err := tr.Prove(last[:], 0, proof); err != nil { + log.Warn("Failed to prove account range", "last", last, "err", err) + return nil, nil + } + } + var proofs [][]byte + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + return accounts, proofs +} + +func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set? + // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user + // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional) + + // Calculate the hard limit at which to abort, even if mid storage trie + hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack)) + + // Retrieve storage ranges until the packet limit is reached + var ( + slots [][]*StorageData + proofs [][]byte + size uint64 + ) + for _, account := range req.Accounts { + // If we've exceeded the requested data limit, abort without opening + // a new storage range (that we'd need to prove due to exceeded size) + if size >= req.Bytes { + break + } + // The first account might start from a different origin and end sooner + var origin common.Hash + if len(req.Origin) > 0 { + origin, req.Origin = common.BytesToHash(req.Origin), nil + } + var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + if len(req.Limit) > 0 { + limit, req.Limit = common.BytesToHash(req.Limit), nil + } + // Retrieve the requested state and bail out if non existent + it, err := chain.Snapshots().StorageIterator(req.Root, account, origin) + if err != nil { + return nil, nil + } + // Iterate over the requested range and pile slots up + var ( + storage []*StorageData + last common.Hash + abort bool + ) + for it.Next() { + if size >= hardLimit { + abort = true + break + } + hash, slot := it.Hash(), common.CopyBytes(it.Slot()) + + // Track the returned interval for the Merkle proofs + last = hash + + // Assemble the reply item + size += uint64(common.HashLength + len(slot)) + storage = append(storage, &StorageData{ + Hash: hash, + Body: slot, + }) + // If we've exceeded the request threshold, abort + if bytes.Compare(hash[:], limit[:]) >= 0 { + break + } + } + slots = append(slots, storage) + it.Release() + + // Generate the Merkle proofs for the first and last storage slot, but + // only if the response was capped. If the entire storage trie included + // in the response, no need for any proofs. + if origin != (common.Hash{}) || abort { + // Request started at a non-zero hash or was capped prematurely, add + // the endpoint Merkle proofs + accTrie, err := trie.New(req.Root, chain.StateCache().TrieDB()) + if err != nil { + return nil, nil + } + var acc types.StateAccount + if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil { + return nil, nil + } + stTrie, err := trie.New(acc.Root, chain.StateCache().TrieDB()) + if err != nil { + return nil, nil + } + proof := light.NewNodeSet() + if err := stTrie.Prove(origin[:], 0, proof); err != nil { + log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) + return nil, nil + } + if last != (common.Hash{}) { + if err := stTrie.Prove(last[:], 0, proof); err != nil { + log.Warn("Failed to prove storage range", "last", last, "err", err) + return nil, nil + } + } + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + // Proof terminates the reply as proofs are only added if a node + // refuses to serve more data (exception when a contract fetch is + // finishing, but that's that). + break + } + } + return slots, proofs +} + +// ServiceGetByteCodesQuery assembles the response to a byte codes query. +// It is exposed to allow external packages to test protocol behavior. +func ServiceGetByteCodesQuery(chain *core.BlockChain, req *GetByteCodesPacket) [][]byte { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + if len(req.Hashes) > maxCodeLookups { + req.Hashes = req.Hashes[:maxCodeLookups] + } + // Retrieve bytecodes until the packet size limit is reached + var ( + codes [][]byte + bytes uint64 + ) + for _, hash := range req.Hashes { + if hash == emptyCode { + // Peers should not request the empty code, but if they do, at + // least sent them back a correct response without db lookups + codes = append(codes, []byte{}) + } else if blob, err := chain.ContractCode(hash); err == nil { + codes = append(codes, blob) + bytes += uint64(len(blob)) + } + if bytes > req.Bytes { + break + } + } + return codes +} + +// ServiceGetTrieNodesQuery assembles the response to a trie nodes query. +// It is exposed to allow external packages to test protocol behavior. +func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, start time.Time) ([][]byte, error) { + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // Make sure we have the state associated with the request + triedb := chain.StateCache().TrieDB() + + accTrie, err := trie.NewSecure(req.Root, triedb) + if err != nil { + // We don't have the requested state available, bail out + return nil, nil + } + snap := chain.Snapshots().Snapshot(req.Root) + if snap == nil { + // We don't have the requested state snapshotted yet, bail out. + // In reality we could still serve using the account and storage + // tries only, but let's protect the node a bit while it's doing + // snapshot generation. + return nil, nil + } + // Retrieve trie nodes until the packet size limit is reached + var ( + nodes [][]byte + bytes uint64 + loads int // Trie hash expansions to cound database reads + ) + for _, pathset := range req.Paths { + switch len(pathset) { + case 0: + // Ensure we penalize invalid requests + return nil, fmt.Errorf("%w: zero-item pathset requested", errBadRequest) + + case 1: + // If we're only retrieving an account trie node, fetch it directly + blob, resolved, err := accTrie.TryGetNode(pathset[0]) + loads += resolved // always account database reads, even for failures + if err != nil { + break + } + nodes = append(nodes, blob) + bytes += uint64(len(blob)) + + default: + // Storage slots requested, open the storage trie and retrieve from there + account, err := snap.Account(common.BytesToHash(pathset[0])) + loads++ // always account database reads, even for failures + if err != nil || account == nil { + break + } + stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb) + loads++ // always account database reads, even for failures + if err != nil { + break + } + for _, path := range pathset[1:] { + blob, resolved, err := stTrie.TryGetNode(path) + loads += resolved // always account database reads, even for failures + if err != nil { + break + } + nodes = append(nodes, blob) + bytes += uint64(len(blob)) + + // Sanity check limits to avoid DoS on the store trie loads + if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { + break + } + } + } + // Abort request processing if we've exceeded our limits + if bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent { + break + } + } + return nodes, nil +} + // NodeInfo represents a short summary of the `snap` sub-protocol metadata // known about the host peer. type NodeInfo struct{} diff --git a/eth/protocols/snap/peer.go b/eth/protocols/snap/peer.go index cf0ce65bd..87a62d2f8 100644 --- a/eth/protocols/snap/peer.go +++ b/eth/protocols/snap/peer.go @@ -33,9 +33,9 @@ type Peer struct { logger log.Logger // Contextual logger with the peer id injected } -// newPeer create a wrapper for a network connection and negotiated protocol +// NewPeer create a wrapper for a network connection and negotiated protocol // version. -func newPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer { +func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer { id := p.ID().String() return &Peer{ id: id, @@ -46,6 +46,16 @@ func newPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer { } } +// NewFakePeer create a fake snap peer without a backing p2p peer, for testing purposes. +func NewFakePeer(version uint, id string, rw p2p.MsgReadWriter) *Peer { + return &Peer{ + id: id, + rw: rw, + version: version, + logger: log.New("peer", id[:8]), + } +} + // ID retrieves the peer's unique identifier. func (p *Peer) ID() string { return p.id diff --git a/eth/protocols/snap/protocol.go b/eth/protocols/snap/protocol.go index 5528e9212..60a254f39 100644 --- a/eth/protocols/snap/protocol.go +++ b/eth/protocols/snap/protocol.go @@ -27,7 +27,7 @@ import ( // Constants to match up protocol versions and messages const ( - snap1 = 1 + SNAP1 = 1 ) // ProtocolName is the official short name of the `snap` protocol used during @@ -36,11 +36,11 @@ const ProtocolName = "snap" // ProtocolVersions are the supported versions of the `snap` protocol (first // is primary). -var ProtocolVersions = []uint{snap1} +var ProtocolVersions = []uint{SNAP1} // protocolLengths are the number of implemented message corresponding to // different protocol versions. -var protocolLengths = map[uint]uint64{snap1: 8} +var protocolLengths = map[uint]uint64{SNAP1: 8} // maxMessageSize is the maximum cap on the size of a protocol message. const maxMessageSize = 10 * 1024 * 1024 diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 9ef9d7571..be8644a5a 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -325,10 +325,10 @@ type healTask struct { codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval } -// syncProgress is a database entry to allow suspending and resuming a snapshot state +// SyncProgress is a database entry to allow suspending and resuming a snapshot state // sync. Opposed to full and fast sync, there is no way to restart a suspended // snap sync without prior knowledge of the suspension point. -type syncProgress struct { +type SyncProgress struct { Tasks []*accountTask // The suspended account tasks (contract tasks within) // Status report during syncing phase @@ -342,12 +342,15 @@ type syncProgress struct { // Status report during healing phase TrienodeHealSynced uint64 // Number of state trie nodes downloaded TrienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk - TrienodeHealDups uint64 // Number of state trie nodes already processed - TrienodeHealNops uint64 // Number of state trie nodes not requested BytecodeHealSynced uint64 // Number of bytecodes downloaded BytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk - BytecodeHealDups uint64 // Number of bytecodes already processed - BytecodeHealNops uint64 // Number of bytecodes not requested +} + +// SyncPending is analogous to SyncProgress, but it's used to report on pending +// ephemeral sync progress that doesn't get persisted into the database. +type SyncPending struct { + TrienodeHeal uint64 // Number of state trie nodes pending + BytecodeHeal uint64 // Number of bytecodes pending } // SyncPeer abstracts out the methods required for a peer to be synced against @@ -543,7 +546,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { s.lock.Lock() s.root = root s.healer = &healTask{ - scheduler: state.NewStateSync(root, s.db, nil, s.onHealState), + scheduler: state.NewStateSync(root, s.db, s.onHealState), trieTasks: make(map[common.Hash]trie.SyncPath), codeTasks: make(map[common.Hash]struct{}), } @@ -671,7 +674,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { // loadSyncStatus retrieves a previously aborted sync status from the database, // or generates a fresh one if none is available. func (s *Syncer) loadSyncStatus() { - var progress syncProgress + var progress SyncProgress if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil { if err := json.Unmarshal(status, &progress); err != nil { @@ -775,7 +778,7 @@ func (s *Syncer) saveSyncStatus() { } } // Store the actual progress markers - progress := &syncProgress{ + progress := &SyncProgress{ Tasks: s.tasks, AccountSynced: s.accountSynced, AccountBytes: s.accountBytes, @@ -795,6 +798,31 @@ func (s *Syncer) saveSyncStatus() { rawdb.WriteSnapshotSyncStatus(s.db, status) } +// Progress returns the snap sync status statistics. +func (s *Syncer) Progress() (*SyncProgress, *SyncPending) { + s.lock.Lock() + defer s.lock.Unlock() + + progress := &SyncProgress{ + AccountSynced: s.accountSynced, + AccountBytes: s.accountBytes, + BytecodeSynced: s.bytecodeSynced, + BytecodeBytes: s.bytecodeBytes, + StorageSynced: s.storageSynced, + StorageBytes: s.storageBytes, + TrienodeHealSynced: s.trienodeHealSynced, + TrienodeHealBytes: s.trienodeHealBytes, + BytecodeHealSynced: s.bytecodeHealSynced, + BytecodeHealBytes: s.bytecodeHealBytes, + } + pending := new(SyncPending) + if s.healer != nil { + pending.TrienodeHeal = uint64(len(s.healer.trieTasks)) + pending.BytecodeHeal = uint64(len(s.healer.codeTasks)) + } + return progress, pending +} + // cleanAccountTasks removes account range retrieval tasks that have already been // completed. func (s *Syncer) cleanAccountTasks() { diff --git a/eth/state_accessor.go b/eth/state_accessor.go index c855f0100..f01db93a6 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -30,7 +30,7 @@ import ( "github.com/ethereum/go-ethereum/trie" ) -// stateAtBlock retrieves the state database associated with a certain block. +// StateAtBlock retrieves the state database associated with a certain block. // If no state is locally available for the given block, a number of blocks // are attempted to be reexecuted to generate the desired state. The optional // base layer statedb can be passed then it's regarded as the statedb of the @@ -45,7 +45,7 @@ import ( // storing trash persistently // - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided, // it would be preferrable to start from a fresh state, if we have it on disk. -func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) { +func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) { var ( current *types.Block database state.Database @@ -171,7 +171,7 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec } // Lookup the statedb of parent block from the live database, // otherwise regenerate it on the flight. - statedb, err := eth.stateAtBlock(parent, reexec, nil, true, false) + statedb, err := eth.StateAtBlock(parent, reexec, nil, true, false) if err != nil { return nil, vm.BlockContext{}, nil, err } diff --git a/eth/sync.go b/eth/sync.go index aaac6bef9..b8ac67d3b 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -145,7 +145,10 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp { if cs.doneCh != nil { return nil // Sync already running. } - + // Disable the td based sync trigger after the transition + if cs.handler.merger.TDDReached() { + return nil + } // Ensure we're at minimum peer count. minPeers := defaultMinSyncPeers if cs.forced { @@ -162,10 +165,7 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp { return nil } mode, ourTD := cs.modeAndLocalHead() - if mode == downloader.FastSync && atomic.LoadUint32(&cs.handler.snapSync) == 1 { - // Fast sync via the snap protocol - mode = downloader.SnapSync - } + op := peerToSyncOp(mode, peer) if op.td.Cmp(ourTD) <= 0 { return nil // We're in sync. @@ -179,19 +179,19 @@ func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp { } func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { - // If we're in fast sync mode, return that directly - if atomic.LoadUint32(&cs.handler.fastSync) == 1 { + // If we're in snap sync mode, return that directly + if atomic.LoadUint32(&cs.handler.snapSync) == 1 { block := cs.handler.chain.CurrentFastBlock() td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64()) - return downloader.FastSync, td + return downloader.SnapSync, td } // We are probably in full sync, but we might have rewound to before the - // fast sync pivot, check if we should reenable + // snap sync pivot, check if we should reenable if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot { block := cs.handler.chain.CurrentFastBlock() td := cs.handler.chain.GetTd(block.Hash(), block.NumberU64()) - return downloader.FastSync, td + return downloader.SnapSync, td } } // Nope, we're really full syncing @@ -208,15 +208,15 @@ func (cs *chainSyncer) startSync(op *chainSyncOp) { // doSync synchronizes the local blockchain with a remote peer. func (h *handler) doSync(op *chainSyncOp) error { - if op.mode == downloader.FastSync || op.mode == downloader.SnapSync { - // Before launch the fast sync, we have to ensure user uses the same + if op.mode == downloader.SnapSync { + // Before launch the snap sync, we have to ensure user uses the same // txlookup limit. - // The main concern here is: during the fast sync Geth won't index the + // The main concern here is: during the snap sync Geth won't index the // block(generate tx indices) before the HEAD-limit. But if user changes - // the limit in the next fast sync(e.g. user kill Geth manually and + // the limit in the next snap sync(e.g. user kill Geth manually and // restart) then it will be hard for Geth to figure out the oldest block // has been indexed. So here for the user-experience wise, it's non-optimal - // that user can't change limit during the fast sync. If changed, Geth + // that user can't change limit during the snap sync. If changed, Geth // will just blindly use the original one. limit := h.chain.TxLookupLimit() if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil { @@ -226,15 +226,11 @@ func (h *handler) doSync(op *chainSyncOp) error { log.Warn("Update txLookup limit", "provided", limit, "updated", *stored) } } - // Run the sync cycle, and disable fast sync if we're past the pivot block + // Run the sync cycle, and disable snap sync if we're past the pivot block err := h.downloader.Synchronise(op.peer.ID(), op.head, op.td, op.mode) if err != nil { return err } - if atomic.LoadUint32(&h.fastSync) == 1 { - log.Info("Fast sync complete, auto disabling") - atomic.StoreUint32(&h.fastSync, 0) - } if atomic.LoadUint32(&h.snapSync) == 1 { log.Info("Snap sync complete, auto disabling") atomic.StoreUint32(&h.snapSync, 0) diff --git a/eth/sync_test.go b/eth/sync_test.go index e96b9ee81..929a2a9d1 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -23,57 +23,74 @@ import ( "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" ) -// Tests that fast sync is disabled after a successful sync cycle. -func TestFastSyncDisabling66(t *testing.T) { testFastSyncDisabling(t, eth.ETH66) } +// Tests that snap sync is disabled after a successful sync cycle. +func TestSnapSyncDisabling66(t *testing.T) { testSnapSyncDisabling(t, eth.ETH66, snap.SNAP1) } -// Tests that fast sync gets disabled as soon as a real block is successfully +// Tests that snap sync gets disabled as soon as a real block is successfully // imported into the blockchain. -func testFastSyncDisabling(t *testing.T, protocol uint) { +func testSnapSyncDisabling(t *testing.T, ethVer uint, snapVer uint) { t.Parallel() - // Create an empty handler and ensure it's in fast sync mode + // Create an empty handler and ensure it's in snap sync mode empty := newTestHandler() - if atomic.LoadUint32(&empty.handler.fastSync) == 0 { - t.Fatalf("fast sync disabled on pristine blockchain") + if atomic.LoadUint32(&empty.handler.snapSync) == 0 { + t.Fatalf("snap sync disabled on pristine blockchain") } defer empty.close() - // Create a full handler and ensure fast sync ends up disabled + // Create a full handler and ensure snap sync ends up disabled full := newTestHandlerWithBlocks(1024) - if atomic.LoadUint32(&full.handler.fastSync) == 1 { - t.Fatalf("fast sync not disabled on non-empty blockchain") + if atomic.LoadUint32(&full.handler.snapSync) == 1 { + t.Fatalf("snap sync not disabled on non-empty blockchain") } defer full.close() - // Sync up the two handlers - emptyPipe, fullPipe := p2p.MsgPipe() - defer emptyPipe.Close() - defer fullPipe.Close() + // Sync up the two handlers via both `eth` and `snap` + caps := []p2p.Cap{{Name: "eth", Version: ethVer}, {Name: "snap", Version: snapVer}} - emptyPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), emptyPipe, empty.txpool) - fullPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), fullPipe, full.txpool) - defer emptyPeer.Close() - defer fullPeer.Close() + emptyPipeEth, fullPipeEth := p2p.MsgPipe() + defer emptyPipeEth.Close() + defer fullPipeEth.Close() - go empty.handler.runEthPeer(emptyPeer, func(peer *eth.Peer) error { + emptyPeerEth := eth.NewPeer(ethVer, p2p.NewPeer(enode.ID{1}, "", caps), emptyPipeEth, empty.txpool) + fullPeerEth := eth.NewPeer(ethVer, p2p.NewPeer(enode.ID{2}, "", caps), fullPipeEth, full.txpool) + defer emptyPeerEth.Close() + defer fullPeerEth.Close() + + go empty.handler.runEthPeer(emptyPeerEth, func(peer *eth.Peer) error { return eth.Handle((*ethHandler)(empty.handler), peer) }) - go full.handler.runEthPeer(fullPeer, func(peer *eth.Peer) error { + go full.handler.runEthPeer(fullPeerEth, func(peer *eth.Peer) error { return eth.Handle((*ethHandler)(full.handler), peer) }) + + emptyPipeSnap, fullPipeSnap := p2p.MsgPipe() + defer emptyPipeSnap.Close() + defer fullPipeSnap.Close() + + emptyPeerSnap := snap.NewPeer(snapVer, p2p.NewPeer(enode.ID{1}, "", caps), emptyPipeSnap) + fullPeerSnap := snap.NewPeer(snapVer, p2p.NewPeer(enode.ID{2}, "", caps), fullPipeSnap) + + go empty.handler.runSnapExtension(emptyPeerSnap, func(peer *snap.Peer) error { + return snap.Handle((*snapHandler)(empty.handler), peer) + }) + go full.handler.runSnapExtension(fullPeerSnap, func(peer *snap.Peer) error { + return snap.Handle((*snapHandler)(full.handler), peer) + }) // Wait a bit for the above handlers to start time.Sleep(250 * time.Millisecond) - // Check that fast sync was disabled - op := peerToSyncOp(downloader.FastSync, empty.handler.peers.peerWithHighestTD()) + // Check that snap sync was disabled + op := peerToSyncOp(downloader.SnapSync, empty.handler.peers.peerWithHighestTD()) if err := empty.handler.doSync(op); err != nil { t.Fatal("sync failed:", err) } - if atomic.LoadUint32(&empty.handler.fastSync) == 1 { - t.Fatalf("fast sync not disabled after successful synchronisation") + if atomic.LoadUint32(&empty.handler.snapSync) == 1 { + t.Fatalf("snap sync not disabled after successful synchronisation") } } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 8213712af..b784e0bcf 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -36,6 +36,7 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" @@ -166,7 +167,7 @@ func (api *API) blockByNumberAndHash(ctx context.Context, number rpc.BlockNumber // TraceConfig holds extra parameters to trace functions. type TraceConfig struct { - *vm.LogConfig + *logger.Config Tracer *string Timeout *string Reexec *uint64 @@ -175,7 +176,7 @@ type TraceConfig struct { // TraceCallConfig is the config for traceCall API. It holds one more // field to override the state for tracing. type TraceCallConfig struct { - *vm.LogConfig + *logger.Config Tracer *string Timeout *string Reexec *uint64 @@ -184,7 +185,7 @@ type TraceCallConfig struct { // StdTraceConfig holds extra parameters to standard-json trace functions. type StdTraceConfig struct { - vm.LogConfig + logger.Config Reexec *uint64 TxHash common.Hash } @@ -670,11 +671,11 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block } // Retrieve the tracing configurations, or use default values var ( - logConfig vm.LogConfig + logConfig logger.Config txHash common.Hash ) if config != nil { - logConfig = config.LogConfig + logConfig = config.Config txHash = config.TxHash } logConfig.Debug = true @@ -699,7 +700,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block chainConfigCopy := new(params.ChainConfig) *chainConfigCopy = *chainConfig chainConfig = chainConfigCopy - if berlin := config.LogConfig.Overrides.BerlinBlock; berlin != nil { + if berlin := config.Config.Overrides.BerlinBlock; berlin != nil { chainConfig.BerlinBlock = berlin canon = false } @@ -731,7 +732,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block writer = bufio.NewWriter(dump) vmConf = vm.Config{ Debug: true, - Tracer: vm.NewJSONLogger(&logConfig, writer), + Tracer: logger.NewJSONLogger(&logConfig, writer), EnablePreimageRecording: true, } } @@ -848,10 +849,10 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc var traceConfig *TraceConfig if config != nil { traceConfig = &TraceConfig{ - LogConfig: config.LogConfig, - Tracer: config.Tracer, - Timeout: config.Timeout, - Reexec: config.Reexec, + Config: config.Config, + Tracer: config.Tracer, + Timeout: config.Timeout, + Reexec: config.Reexec, } } return api.traceTx(ctx, msg, new(Context), vmctx, statedb, traceConfig) @@ -869,7 +870,7 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Contex ) switch { case config == nil: - tracer = vm.NewStructLogger(nil) + tracer = logger.NewStructLogger(nil) case config.Tracer != nil: // Define a meaningful timeout of a single transaction trace timeout := defaultTraceTimeout @@ -898,7 +899,7 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Contex } } default: - tracer = vm.NewStructLogger(config.LogConfig) + tracer = logger.NewStructLogger(config.Config) } // Run the transaction with tracing enabled. vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Debug: true, Tracer: tracer, NoBaseFee: true}) @@ -913,7 +914,7 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Contex // Depending on the tracer type, format and return the output. switch tracer := tracer.(type) { - case *vm.StructLogger: + case *logger.StructLogger: // If the result contains a revert reason, return it. returnVal := fmt.Sprintf("%x", result.Return()) if len(result.Revert()) > 0 { diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json index 7627c8c23..ec2ceb426 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_throw_outer_revert.json @@ -59,7 +59,7 @@ "result": { "calls": [ { - "error": "invalid opcode: opcode 0xfe not defined", + "error": "invalid opcode: INVALID", "from": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76", "gas": "0x75fe3", "gasUsed": "0x75fe3", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json index 7627c8c23..ec2ceb426 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json @@ -59,7 +59,7 @@ "result": { "calls": [ { - "error": "invalid opcode: opcode 0xfe not defined", + "error": "invalid opcode: INVALID", "from": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76", "gas": "0x75fe3", "gasUsed": "0x75fe3", diff --git a/eth/tracers/js/internal/tracers/assets.go b/eth/tracers/js/internal/tracers/assets.go index caeccb7f3..a2bb69dee 100644 --- a/eth/tracers/js/internal/tracers/assets.go +++ b/eth/tracers/js/internal/tracers/assets.go @@ -4,7 +4,7 @@ // bigram_tracer.js (1.712kB) // call_tracer_js.js (3.497kB) // call_tracer_legacy.js (8.956kB) -// evmdis_tracer.js (4.195kB) +// evmdis_tracer.js (4.215kB) // noop_tracer.js (1.271kB) // opcount_tracer.js (1.372kB) // prestate_tracer.js (4.287kB) @@ -158,7 +158,7 @@ func call_tracer_legacyJs() (*asset, error) { return a, nil } -var _evmdis_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x57\xdf\x6f\xda\xca\x12\x7e\x86\xbf\x62\x94\x27\x50\x29\x60\x63\x08\x38\x27\x47\xe2\xa6\xf4\x1c\xae\xd2\x24\x02\x72\x8f\x2a\x94\x87\x05\xc6\xb0\xaa\xf1\x5a\xbb\x6b\x72\xb8\x55\xfe\xf7\xab\xd9\x59\x03\xf9\x75\xdb\x4a\xa7\x0f\x3b\xb5\x77\xbe\x6f\xbe\x9d\x19\xcf\x92\x56\x0b\xae\x54\xbe\xd7\x72\xbd\xb1\x10\xb6\x83\x73\x98\x6d\x10\xd6\xea\x23\xda\x0d\x6a\x2c\xb6\x30\x2c\xec\x46\x69\x53\x6d\xb5\x60\xb6\x91\x06\x12\x99\x22\x48\x03\xb9\xd0\x16\x54\x02\xf6\x85\x7f\x2a\x17\x5a\xe8\x7d\xb3\xda\x6a\x31\xe6\xcd\x6d\x62\x48\x34\x22\x18\x95\xd8\x47\xa1\x31\x86\xbd\x2a\x60\x29\x32\xd0\xb8\x92\xc6\x6a\xb9\x28\x2c\x82\xb4\x20\xb2\x55\x4b\x69\xd8\xaa\x95\x4c\xf6\x44\x29\x2d\x14\xd9\x0a\xb5\x0b\x6d\x51\x6f\x4d\xa9\xe3\x8f\x9b\x7b\xb8\x46\x63\x50\xc3\x1f\x98\xa1\x16\x29\xdc\x15\x8b\x54\x2e\xe1\x5a\x2e\x31\x33\x08\xc2\x40\x4e\x6f\xcc\x06\x57\xb0\x70\x74\x04\xfc\x4c\x52\xa6\x5e\x0a\x7c\x56\x45\xb6\x12\x56\xaa\xac\x01\x28\x49\x39\xec\x50\x1b\xa9\x32\xe8\x94\xa1\x3c\x61\x03\x94\x26\x92\x9a\xb0\x74\x00\x0d\x2a\x27\x5c\x1d\x44\xb6\x87\x54\xd8\x23\xf4\x27\x12\x72\x3c\xf7\x0a\x64\xe6\xc2\x6c\x54\x8e\x60\x37\xc2\xd2\xa9\x1f\x65\x9a\xc2\x02\xa1\x30\x98\x14\x69\x83\xd8\x16\x85\x85\xbf\xc6\xb3\x3f\x6f\xef\x67\x30\xbc\xf9\x0a\x7f\x0d\x27\x93\xe1\xcd\xec\xeb\x05\x3c\x4a\xbb\x51\x85\x05\xdc\x21\x53\xc9\x6d\x9e\x4a\x5c\xc1\xa3\xd0\x5a\x64\x76\x0f\x2a\x21\x86\x2f\xa3\xc9\xd5\x9f\xc3\x9b\xd9\xf0\x5f\xe3\xeb\xf1\xec\x2b\x28\x0d\x9f\xc7\xb3\x9b\xd1\x74\x0a\x9f\x6f\x27\x30\x84\xbb\xe1\x64\x36\xbe\xba\xbf\x1e\x4e\xe0\xee\x7e\x72\x77\x3b\x1d\x35\x61\x8a\xa4\x0a\x09\xff\xe3\x9c\x27\xae\x7a\x1a\x61\x85\x56\xc8\xd4\x94\x99\xf8\xaa\x0a\x30\x1b\x55\xa4\x2b\xd8\x88\x1d\x82\xc6\x25\xca\x1d\xae\x40\xc0\x52\xe5\xfb\x9f\x2e\x2a\x71\x89\x54\x65\x6b\x77\xe6\x77\x1b\x12\xc6\x09\x64\xca\x36\xc0\x20\xc2\x6f\x1b\x6b\xf3\xb8\xd5\x7a\x7c\x7c\x6c\xae\xb3\xa2\xa9\xf4\xba\x95\x32\x9d\x69\xfd\xde\xac\x12\x27\xee\xb6\x2b\x69\x66\x5a\x2c\x51\x83\x46\x5b\xe8\xcc\x80\x29\x92\x44\x2e\x25\x66\x16\x64\x96\x28\xbd\x75\x7d\x02\x89\x56\x5b\x10\x60\xc9\x19\xac\x82\x1c\x35\x6d\x7a\x8e\x8f\xc6\xee\x53\xa7\x73\x25\x8d\x30\x06\xb7\x8b\x74\xdf\xac\x7e\xaf\x56\x8c\x15\xcb\x6f\x31\xcc\xbf\xab\xdc\xc4\x30\x7f\x78\x7a\x68\x54\xab\x95\x2c\x2f\xcc\x06\x4d\x0c\xdf\xdb\x31\xb4\x1b\x10\xc4\x10\x34\x20\x74\x6b\xc7\xad\x91\x5b\xbb\x6e\xed\xb9\xf5\xdc\xad\x7d\xb7\x0e\xdc\x1a\xb4\xd9\x30\x3a\x60\xb7\x80\xfd\x02\x76\x0c\xd8\x33\x64\xcf\xd0\xc7\xe1\x40\x21\x47\x0a\x39\x54\xc8\xb1\x42\x66\xe9\xb0\x4b\xc4\x2c\x11\xb3\x74\x99\xa5\xcb\x2c\x5d\x76\xe9\x32\x4b\xd7\x0b\xee\xba\xf3\x74\x99\xa5\x7b\xce\x4f\xcc\xd2\x65\x96\x1e\x1f\xb9\xc7\x80\x9e\x3f\x22\x03\x7a\x2c\xbe\xc7\x80\x1e\x03\xfa\x0c\xe8\x73\xd8\x7e\xc8\x4f\x1d\x36\xcc\xd2\xe7\xb0\xfd\x1e\x1b\x0e\xdb\x67\x96\x3e\xb3\x0c\x58\xfc\x20\x70\x7b\x03\x8e\x37\xe0\x78\x03\x9f\xd5\x32\xad\x3e\xaf\x6d\x9f\xd8\x76\xe8\x6d\xc7\xdb\xc8\xdb\xae\xb7\x3e\xf3\x6d\x9f\xfa\xb6\xcf\x7d\xdb\xf3\x1d\xea\xe4\xf9\x02\xcf\x17\x78\xbe\xc0\xf3\x05\x9e\xaf\xac\x64\x59\xca\xb2\x96\xbe\x98\x81\xaf\x66\xe0\xcb\x19\xf8\x7a\x06\xbe\xa0\x81\xaf\x68\xe0\x4b\x1a\xf8\x9a\x06\xa1\xe7\x0b\xfb\x31\x84\x64\x07\x31\x74\x1a\x10\x74\xda\x31\x44\x64\x83\x18\xba\x64\xc3\x18\x7a\x64\x3b\x31\x9c\x93\x8d\x62\xe8\x93\xed\xc6\x30\x20\x4b\x7c\xd4\xb5\x1d\x22\x24\xc6\x0e\x29\x24\xca\x0e\x49\x24\xce\x88\x34\x12\x69\x44\x22\x89\x35\x22\x95\x44\x1b\x91\x4c\xe2\x8d\x22\xd6\x11\x75\x59\x47\xd4\x63\x1d\xd1\x39\xeb\xa0\xee\x73\x80\x01\xeb\xa0\xfe\x23\x1d\xd4\x80\xa4\xc3\x75\x20\xe9\x70\x3d\x48\x3a\x5c\x17\x12\x25\xf5\xa1\xd3\xe1\x3a\x91\x48\xa9\x17\x9d\x0e\xd7\x8d\x44\xeb\xfa\x91\x78\x7d\x47\x06\xbd\xc0\xdb\xd0\xdb\x8e\xb7\x91\xb3\x61\xe4\xbf\xa2\xc8\x7f\x46\x91\xff\x8e\xa2\x8e\xdf\xf7\x7e\xee\x23\x78\xa2\xef\xbc\xd5\x02\x8d\xa6\x48\x2d\x4d\x7f\x99\xed\xd4\x37\x9a\xcf\x1b\xcc\x40\xa4\xa9\x1b\x64\x2a\x5f\xaa\x15\x1a\x1e\x90\x0b\xc4\x0c\xa4\x45\x2d\xe8\x86\x50\x3b\xd4\x74\x39\x96\xa3\xc9\xd1\x11\x26\x91\x99\x48\x4b\x62\x3f\x44\x69\x30\xc9\x6c\xdd\xac\x56\xf8\x7d\x0c\x49\x91\x2d\x69\x74\xd5\xea\xf0\xdd\x53\x80\xdd\x48\xd3\x74\x23\x69\xde\x7e\x68\xaa\xdc\x5c\x40\xa9\x33\x11\x6f\xc9\x24\x6a\xb1\xb4\x85\x48\x01\xff\xc6\x65\xe1\x66\xa1\x4a\x40\x64\x5e\x39\x24\x3c\xf1\x2b\x0e\x7f\x12\x35\x55\xeb\x06\xac\x16\x14\xbc\x0c\x61\x2c\xe6\xa7\x11\xe8\xde\xc0\x1d\xea\x7d\xc9\xe5\xee\x41\x0a\xf9\x9f\x2f\x3e\x1c\x12\x35\xe1\xde\x64\xae\x56\x2a\x3b\xa1\x21\xd1\x62\x8b\x70\x79\x7a\xba\xe3\x7f\x9b\x29\x66\x6b\xbb\x81\x8f\x10\x3c\x5c\x54\x3d\x02\xb5\x56\x1a\x2e\x21\x55\xeb\xe6\x1a\xed\x88\x1e\x6b\xf5\x8b\x6a\xa5\x22\x13\xa8\xb9\x5d\xa6\xaf\x38\xee\xf9\x99\x7b\x75\xf6\x00\x97\x0c\x25\xcf\x27\xc0\xd4\x20\x10\xc0\xd3\x7c\xc2\xdc\x6e\x6a\x75\xb8\x3c\x95\xe2\xe3\x7b\x3a\x95\xd3\xa5\x02\x97\xfc\x54\x51\x79\x0c\xf4\x8f\x08\x54\xde\xb4\xea\xa6\xd8\x2e\x50\xd7\xea\x0d\xb7\xbd\x22\x42\x88\xe1\x39\x3f\xef\x95\x65\x9e\x3f\xb8\xe7\x27\x92\xe4\xd4\x3b\xc5\x54\xdb\xf2\xe4\xbf\x43\xdb\x47\x77\x67\xcf\x35\xee\x54\x0e\x97\x70\x70\x9c\xbf\x82\x70\xb2\x08\x91\x28\x5d\x23\x94\x84\x4b\x68\x5f\x80\x84\xdf\xf8\x6c\xfe\x06\x9b\x33\x5b\x53\xe5\x0f\x17\x20\x3f\x7c\xa8\x3b\x50\xc5\xbf\x65\x8d\x4d\x72\x75\x39\xe2\x84\xe4\x88\xdf\x6a\xb2\xde\xb4\x6a\x6a\xb5\xcc\xd6\xb5\xa0\x57\x77\xb9\xaf\x3c\xd1\x62\x1e\xa5\x5d\xb2\xbf\x4b\x89\x77\xaa\xfb\x33\x2c\x85\x41\x38\xbb\x1a\x5e\x5f\x9f\xc5\x70\x7c\xb8\xba\xfd\x34\x3a\x8b\x0f\x87\x94\x99\xb1\xf4\xfb\x95\x4b\x7c\x12\xb7\x53\x6f\xee\x44\x5a\xe0\x6d\xc2\xf5\x3e\xb8\xcb\xff\xe2\x6b\xef\xe8\x95\x37\x17\x70\x7e\xb6\x16\xc6\xb5\xc3\x0b\x40\xfb\x5d\x80\x55\x6f\xf9\x07\xcf\xd3\xf0\x1c\xe2\x98\xde\x42\x85\x27\xa8\x17\x18\x99\xe5\x85\x3d\x60\xb6\xb8\x55\x7a\xdf\x34\xf4\xcb\xa7\xe6\x73\xd2\x38\x24\xe7\x83\x3f\xf7\x0b\x8a\x63\xaf\x67\x45\x9a\x3e\xdf\xe3\x39\xf2\xce\xa6\xca\x39\x27\x73\xdf\x3b\x27\x1f\x81\x6b\x01\xf6\xf3\xd1\x16\x1a\xc5\xb7\x8b\x63\x45\x3f\x8d\xae\x47\x7f\x0c\x67\xa3\x67\x95\x9d\xce\x86\xb3\xf1\x15\xbf\xfa\x71\x6d\xc3\x5f\xaa\xed\xeb\x4e\x38\x9e\xc3\x1d\x03\x5e\xb5\xe0\xdb\x2d\xf0\xcb\x3d\xf0\x4b\x4d\x70\x2c\xe8\x3f\x51\xd1\xff\x5f\xd2\x7f\xba\xa6\x93\xd1\xec\x7e\x72\x73\x52\x3a\xfa\x7b\xe5\x27\xbe\x19\xef\xfa\x76\xdd\x82\x57\xee\x3c\xbe\xfc\x15\xf7\x46\xe3\xab\xc2\x36\x5c\xe8\x0f\x25\xeb\x3b\x7a\xa7\xb3\xdb\xbb\x63\xef\xdd\x8f\xaf\xc6\x87\xa1\xf2\xa3\x18\xed\x06\xb4\xdf\x61\xfd\xf7\xfd\x97\xbb\x4f\xa3\xe9\xcc\x33\x95\x99\xcd\x97\x87\xcf\x74\x8d\xf6\xee\xaa\x76\x32\x03\x65\x52\xce\x3f\x69\xee\x28\xcd\xe5\xf4\x3b\xa0\x53\xcc\x0e\xf0\x67\x37\x07\x7c\x84\xf6\xdf\x5d\x3c\x72\x1d\x87\xfb\xcb\x82\xf9\x1b\xcc\x11\x1f\xeb\xfa\xec\x22\x3d\x9e\xee\xf9\x1d\xc4\xf8\x6a\xe5\xa9\xfa\x54\xfd\x5f\x00\x00\x00\xff\xff\xdf\x2f\xd9\xfa\x63\x10\x00\x00") +var _evmdis_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x97\x6f\x6f\xda\x48\x13\xc0\x5f\xc3\xa7\x18\xe5\x15\xa8\x14\xb0\x31\x04\x9c\xcb\x49\x3c\x29\xe9\xe5\x51\x9a\x44\x40\xee\x54\xa1\xbc\x58\x60\x0c\xab\x1a\xaf\xb5\xbb\x26\xc7\x53\xe5\xbb\x3f\x9a\x9d\x35\x90\x7f\xd7\x56\xba\xbe\xf0\xd4\xde\x99\xdf\xfc\xdd\x5d\xd2\x6a\xc1\x85\xca\x77\x5a\xae\xd6\x16\xc2\x76\x70\x0a\xd3\x35\xc2\x4a\x7d\x44\xbb\x46\x8d\xc5\x06\x86\x85\x5d\x2b\x6d\xaa\xad\x16\x4c\xd7\xd2\x40\x22\x53\x04\x69\x20\x17\xda\x82\x4a\xc0\xbe\xd0\x4f\xe5\x5c\x0b\xbd\x6b\x56\x5b\x2d\xb6\x79\x73\x99\x08\x89\x46\x04\xa3\x12\xfb\x28\x34\xc6\xb0\x53\x05\x2c\x44\x06\x1a\x97\xd2\x58\x2d\xe7\x85\x45\x90\x16\x44\xb6\x6c\x29\x0d\x1b\xb5\x94\xc9\x8e\x90\xd2\x42\x91\x2d\x51\x3b\xd7\x16\xf5\xc6\x94\x71\x7c\xbe\xb9\x87\x6b\x34\x06\x35\x7c\xc6\x0c\xb5\x48\xe1\xae\x98\xa7\x72\x01\xd7\x72\x81\x99\x41\x10\x06\x72\xfa\x62\xd6\xb8\x84\xb9\xc3\x91\xe1\x25\x85\x32\xf1\xa1\xc0\xa5\x2a\xb2\xa5\xb0\x52\x65\x0d\x40\x49\x91\xc3\x16\xb5\x91\x2a\x83\x4e\xe9\xca\x03\x1b\xa0\x34\x41\x6a\xc2\x52\x02\x1a\x54\x4e\x76\x75\x10\xd9\x0e\x52\x61\x0f\xa6\x3f\x51\x90\x43\xde\x4b\x90\x99\x73\xb3\x56\x39\x82\x5d\x0b\x4b\x59\x3f\xca\x34\x85\x39\x42\x61\x30\x29\xd2\x06\xd1\xe6\x85\x85\xbf\xae\xa6\x7f\xdc\xde\x4f\x61\x78\xf3\x15\xfe\x1a\x8e\xc7\xc3\x9b\xe9\xd7\x33\x78\x94\x76\xad\x0a\x0b\xb8\x45\x46\xc9\x4d\x9e\x4a\x5c\xc2\xa3\xd0\x5a\x64\x76\x07\x2a\x21\xc2\x97\xd1\xf8\xe2\x8f\xe1\xcd\x74\xf8\x9f\xab\xeb\xab\xe9\x57\x50\x1a\x2e\xaf\xa6\x37\xa3\xc9\x04\x2e\x6f\xc7\x30\x84\xbb\xe1\x78\x7a\x75\x71\x7f\x3d\x1c\xc3\xdd\xfd\xf8\xee\x76\x32\x6a\xc2\x04\x29\x2a\x24\xfb\x1f\xd7\x3c\x71\xdd\xd3\x08\x4b\xb4\x42\xa6\xa6\xac\xc4\x57\x55\x80\x59\xab\x22\x5d\xc2\x5a\x6c\x11\x34\x2e\x50\x6e\x71\x09\x02\x16\x2a\xdf\xfd\x74\x53\x89\x25\x52\x95\xad\x5c\xce\xef\x0e\x24\x5c\x25\x90\x29\xdb\x00\x83\x08\xbf\xad\xad\xcd\xe3\x56\xeb\xf1\xf1\xb1\xb9\xca\x8a\xa6\xd2\xab\x56\xca\x38\xd3\xfa\xbd\x59\x25\x26\x6e\x37\x4b\x69\xa6\x5a\x2c\x50\x83\x46\x5b\xe8\xcc\x80\x29\x92\x44\x2e\x24\x66\x16\x64\x96\x28\xbd\x71\x73\x02\x89\x56\x1b\x10\x60\x49\x19\xac\x82\x1c\x35\x2d\x7a\xc6\x47\x63\x77\xa9\x8b\x73\x29\x8d\x30\x06\x37\xf3\x74\xd7\xac\x7e\xaf\x56\x8c\x15\x8b\x6f\x31\xcc\xbe\xab\xdc\xc4\x30\x7b\x78\x7a\x68\x54\xab\x95\x2c\x2f\xcc\x1a\x4d\x0c\xdf\xdb\x31\xb4\x1b\x10\xc4\x10\x34\x20\x74\xcf\x8e\x7b\x46\xee\xd9\x75\xcf\x9e\x7b\x9e\xba\x67\xdf\x3d\x07\xee\x19\xb4\x59\xb0\x75\xc0\x6a\x01\xeb\x05\xac\x18\xb0\x66\xc8\x9a\xa1\xf7\xc3\x8e\x42\xf6\x14\xb2\xab\x90\x7d\x85\x4c\xe9\xb0\x4a\xc4\x94\x88\x29\x5d\xa6\x74\x99\xd2\x65\x95\x2e\x53\xba\x3e\xe0\xae\xcb\xa7\xcb\x94\xee\x29\xbf\x31\xa5\xcb\x94\x1e\xa7\xdc\x63\x83\x9e\x4f\x91\x0d\x7a\x1c\x7c\x8f\x0d\x7a\x6c\xd0\x67\x83\x3e\xbb\xed\x87\xfc\xd6\x61\xc1\x94\x3e\xbb\xed\xf7\x58\xb0\xdb\x3e\x53\xfa\x4c\x19\x70\xf0\x83\xc0\xad\x0d\xd8\xdf\x80\xfd\x0d\x7c\x55\xcb\xb2\xfa\xba\xb6\x7d\x61\xdb\xa1\x97\x1d\x2f\x23\x2f\xbb\x5e\xfa\xca\xb7\x7d\xe9\xdb\xbe\xf6\x6d\xcf\xdb\xf7\xc9\xf3\x02\xcf\x0b\x3c\x2f\xf0\xbc\xc0\xf3\xca\x4e\x96\xad\x2c\x7b\xe9\x9b\x19\xf8\x6e\x06\xbe\x9d\x81\xef\x67\xe0\x1b\x1a\xf8\x8e\x06\xbe\xa5\x81\xef\x69\x10\x7a\x5e\xd8\x8f\x21\x24\x39\x88\xa1\xd3\x80\xa0\xd3\x8e\x21\x22\x19\xc4\xd0\x25\x19\xc6\xd0\x23\xd9\x89\xe1\x94\x64\x14\x43\x9f\x64\x37\x86\x01\x49\xe2\xd1\xd4\x76\x08\x48\xc4\x0e\x45\x48\xc8\x0e\x85\x48\xcc\x88\x62\x24\x68\x44\x41\x12\x35\xa2\x28\x09\x1b\x51\x98\xc4\x8d\x22\x8e\x23\xea\x72\x1c\x51\x8f\xe3\x88\x4e\x39\x0e\x9a\x3e\x67\x30\xe0\x38\x68\xfe\x28\x0e\x1a\x40\x8a\xc3\x4d\x20\xc5\xe1\x66\x90\xe2\x70\x53\x48\x48\x9a\x43\x17\x87\x9b\x44\x82\xd2\x2c\xba\x38\xdc\x34\x12\xd6\xcd\x23\x71\xfd\x44\x06\xbd\xc0\xcb\xd0\xcb\x8e\x97\x91\x93\x61\xe4\x77\x51\xe4\xb7\x51\xe4\xf7\x51\xd4\xf1\xeb\x5e\xcf\x6d\x82\x27\xda\xe7\xad\x16\x68\x34\x45\x6a\xe9\xf4\x97\xd9\x56\x7d\xa3\xf3\x79\x8d\x19\x88\x34\x75\x07\x99\xca\x17\x6a\x89\x86\x0f\xc8\x39\x62\x06\xd2\xa2\x16\x74\x43\xa8\x2d\x6a\xba\x1c\xcb\xa3\xc9\xe1\xc8\x26\x91\x99\x48\x4b\xb0\x3f\x44\xe9\x60\x92\xd9\xaa\x59\xad\xf0\xf7\x18\x92\x22\x5b\xd0\xd1\x55\xab\xc3\x77\x8f\x00\xbb\x96\xa6\xe9\x8e\xa4\x59\xfb\xa1\xa9\x72\x73\x06\x65\x9c\x89\x78\x2b\x4c\x42\x8b\x85\x2d\x44\x0a\xf8\x37\x2e\x0a\x77\x16\xaa\x04\x44\xe6\x23\x87\x84\x4f\xfc\x8a\xb3\x3f\xf2\x9a\xaa\x55\x03\x96\x73\x72\x5e\xba\x30\x16\xf3\x63\x0f\x74\x6f\xe0\x16\xf5\xae\x64\xb9\x7b\x90\x5c\xfe\xf9\xc5\xbb\x43\x42\x93\xdd\x9b\xe4\x6a\xa5\xb2\x15\x1a\x12\x2d\x36\x08\xe7\xc7\xd9\x1d\xfe\xdb\x4c\x31\x5b\xd9\x35\x7c\x84\xe0\xe1\xac\xea\x2d\x50\x6b\xa5\xe1\x1c\x52\xb5\x6a\xae\xd0\x8e\xe8\xb5\x56\x3f\xab\x56\x2a\x32\x81\x9a\x5b\x65\x7c\xc5\xb1\x67\x27\xee\xd3\xc9\x03\x9c\xb3\x29\x69\x3e\x01\xa6\x06\x81\x0c\x3c\xe6\x13\xe6\x76\x5d\xab\xc3\xf9\x71\x28\xde\xbf\xc7\xa9\x9c\x2e\x15\x38\xe7\xb7\x8a\xca\x63\xa0\x7f\x04\x50\x79\xd3\xaa\x9b\x62\x33\x47\x5d\xab\x37\xdc\xf2\x92\x80\x10\xc3\x73\x3e\xaf\x95\x6d\x9e\x3d\xb8\xf7\x27\x0a\xc9\x45\xef\x22\xa6\xde\x96\x99\xff\x0e\x6d\xef\xdd\xe5\x9e\x6b\xdc\xaa\x1c\xce\x61\xaf\x38\x7b\x65\xc2\xc5\x22\x8b\x44\xe9\x1a\x59\x49\x38\x87\xf6\x19\x48\xf8\x8d\x73\xf3\x37\xd8\x8c\x69\x4d\x95\x3f\x9c\x81\xfc\xf0\xa1\xee\x8c\x2a\xfe\x2b\xc7\xd8\x24\x55\x57\x23\x2e\x48\x8e\xf8\xad\x26\xeb\x4d\xab\x26\x56\xcb\x6c\x55\x0b\x7a\x75\x57\xfb\xca\x13\x3d\xcc\xa3\xb4\x0b\xd6\x77\x25\xf1\x4a\x75\x9f\xc3\x42\x18\x84\x93\x8b\xe1\xf5\xf5\x49\x0c\x87\x97\x8b\xdb\x4f\xa3\x93\x78\x9f\xa4\xcc\x8c\xa5\xdf\xaf\xdc\xe2\x23\xbf\x9d\x7a\x73\x2b\xd2\x02\x6f\x13\xee\xf7\x5e\x5d\xfe\x0f\x5f\x6b\x47\xaf\xb4\xb9\x81\xb3\x93\x95\x30\x6e\x1c\x5e\x18\xb4\xdf\x35\xb0\xea\x2d\xfd\xe0\x79\x19\x9e\x9b\x38\xd2\x5b\x56\xe1\x91\xd5\x0b\x1b\x99\xe5\x85\xdd\xdb\x6c\x70\xa3\xf4\xae\x69\xe8\x97\x4f\xcd\xd7\xa4\xb1\x2f\xce\x07\x9f\xf7\x0b\xc4\x61\xd6\xb3\x22\x4d\x9f\xaf\xf1\x39\xf2\xce\xa2\xca\xb9\x26\x33\x3f\x3b\x47\x9b\xc0\x8d\x00\xeb\x79\x6f\x73\x8d\xe2\xdb\xd9\xa1\xa3\x9f\x46\xd7\xa3\xcf\xc3\xe9\xe8\x59\x67\x27\xd3\xe1\xf4\xea\x82\x3f\xfd\xb8\xb7\xe1\x2f\xf5\xf6\xf5\x24\x1c\xf2\x70\x69\xc0\xab\x11\x7c\x7b\x04\x7e\x79\x06\x7e\x69\x08\x0e\x0d\xfd\x37\x3a\xfa\xcf\x2d\xfd\xb7\x7b\x3a\x1e\x4d\xef\xc7\x37\xfb\x6e\x8e\x47\x7f\x8e\xc6\xd3\xa3\x4e\xd2\x9f\x2f\x3f\xb1\x85\xbc\xea\xdb\x6d\x0c\x5e\xa9\xf3\x69\xe6\x6f\xbc\x37\xf6\x81\x2a\x6c\xc3\xb9\xfe\x50\x52\xdf\x09\x7f\x32\xbd\xbd\x3b\x8c\xe2\xe8\xfa\xf2\xd3\x68\x32\x1d\xdf\x5f\x94\x29\xfc\xc8\x51\xbb\x01\xed\x77\xd0\xff\xbd\xff\x72\x47\x34\x4f\x2a\xab\x9d\x2f\xf6\x5b\x77\x85\xf6\xee\xa2\x76\x74\x2e\xca\xa4\x3c\x13\xa5\xb9\xa3\xd2\x97\x27\xe2\xde\x3a\xc5\x6c\x6f\xfe\xec\x36\x81\x8f\xd0\xfe\xbb\x8b\x07\xd6\xe1\xc0\x7f\xd9\x44\x7f\xab\x39\xf0\xa1\xd7\xcf\x2e\xd7\x43\x76\xcf\xef\x25\xb6\xaf\x56\x9e\xaa\x4f\xd5\xff\x07\x00\x00\xff\xff\xa5\x5b\x87\x4a\x77\x10\x00\x00") func evmdis_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -174,7 +174,7 @@ func evmdis_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "evmdis_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb5, 0xc8, 0x73, 0x8e, 0xfb, 0x1f, 0x84, 0x7d, 0x37, 0xd9, 0x26, 0x24, 0x37, 0xb8, 0x65, 0xb1, 0xed, 0xa0, 0x76, 0x9a, 0xf0, 0x8e, 0x3a, 0x9b, 0x20, 0x93, 0x27, 0x26, 0x2e, 0xc9, 0x9b, 0xde}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x13, 0xeb, 0xca, 0x1f, 0x5f, 0xd3, 0x29, 0x81, 0xbb, 0xd8, 0xc8, 0x4a, 0x3a, 0x38, 0x10, 0xe2, 0xe7, 0xa4, 0xcd, 0xde, 0x78, 0x85, 0xc2, 0xed, 0xee, 0xb, 0xd6, 0xdb, 0x5e, 0x44, 0x28, 0x16}} return a, nil } diff --git a/eth/tracers/js/internal/tracers/evmdis_tracer.js b/eth/tracers/js/internal/tracers/evmdis_tracer.js index bb19777ab..c841c57f1 100644 --- a/eth/tracers/js/internal/tracers/evmdis_tracer.js +++ b/eth/tracers/js/internal/tracers/evmdis_tracer.js @@ -71,12 +71,12 @@ opinfo["ops"] = []; this.stack.push(opinfo); break; - case "RETURN": + case "RETURN": case "REVERT": var out = log.stack.peek(0).valueOf(); var outsize = log.stack.peek(1).valueOf(); frame.return = log.memory.slice(out, out + outsize); break; - case "STOP": case "SUICIDE": + case "STOP": case "SELFDESTRUCT": frame.return = log.memory.slice(0, 0); break; case "JUMPDEST": diff --git a/core/vm/access_list_tracer.go b/eth/tracers/logger/access_list_tracer.go similarity index 81% rename from core/vm/access_list_tracer.go rename to eth/tracers/logger/access_list_tracer.go index 1368e4c99..181fc47ac 100644 --- a/core/vm/access_list_tracer.go +++ b/eth/tracers/logger/access_list_tracer.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package vm +package logger import ( "math/big" @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" ) // accessList is an accumulator for the set of accounts and storage slots an EVM @@ -137,36 +138,38 @@ func NewAccessListTracer(acl types.AccessList, from, to common.Address, precompi } } -func (a *AccessListTracer) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { +func (a *AccessListTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { } // CaptureState captures all opcodes that touch storage or addresses and adds them to the accesslist. -func (a *AccessListTracer) CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { +func (a *AccessListTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { stack := scope.Stack - if (op == SLOAD || op == SSTORE) && stack.len() >= 1 { - slot := common.Hash(stack.data[stack.len()-1].Bytes32()) + stackData := stack.Data() + stackLen := len(stackData) + if (op == vm.SLOAD || op == vm.SSTORE) && stackLen >= 1 { + slot := common.Hash(stackData[stackLen-1].Bytes32()) a.list.addSlot(scope.Contract.Address(), slot) } - if (op == EXTCODECOPY || op == EXTCODEHASH || op == EXTCODESIZE || op == BALANCE || op == SELFDESTRUCT) && stack.len() >= 1 { - addr := common.Address(stack.data[stack.len()-1].Bytes20()) + if (op == vm.EXTCODECOPY || op == vm.EXTCODEHASH || op == vm.EXTCODESIZE || op == vm.BALANCE || op == vm.SELFDESTRUCT) && stackLen >= 1 { + addr := common.Address(stackData[stackLen-1].Bytes20()) if _, ok := a.excl[addr]; !ok { a.list.addAddress(addr) } } - if (op == DELEGATECALL || op == CALL || op == STATICCALL || op == CALLCODE) && stack.len() >= 5 { - addr := common.Address(stack.data[stack.len()-2].Bytes20()) + if (op == vm.DELEGATECALL || op == vm.CALL || op == vm.STATICCALL || op == vm.CALLCODE) && stackLen >= 5 { + addr := common.Address(stackData[stackLen-2].Bytes20()) if _, ok := a.excl[addr]; !ok { a.list.addAddress(addr) } } } -func (*AccessListTracer) CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { +func (*AccessListTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { } func (*AccessListTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {} -func (*AccessListTracer) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +func (*AccessListTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { } func (*AccessListTracer) CaptureExit(output []byte, gasUsed uint64, err error) {} diff --git a/core/vm/gen_structlog.go b/eth/tracers/logger/gen_structlog.go similarity index 94% rename from core/vm/gen_structlog.go rename to eth/tracers/logger/gen_structlog.go index 365f3b791..9e71b555c 100644 --- a/core/vm/gen_structlog.go +++ b/eth/tracers/logger/gen_structlog.go @@ -1,6 +1,6 @@ // Code generated by github.com/fjl/gencodec. DO NOT EDIT. -package vm +package logger import ( "encoding/json" @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/vm" "github.com/holiman/uint256" ) @@ -17,7 +18,7 @@ var _ = (*structLogMarshaling)(nil) func (s StructLog) MarshalJSON() ([]byte, error) { type StructLog struct { Pc uint64 `json:"pc"` - Op OpCode `json:"op"` + Op vm.OpCode `json:"op"` Gas math.HexOrDecimal64 `json:"gas"` GasCost math.HexOrDecimal64 `json:"gasCost"` Memory hexutil.Bytes `json:"memory"` @@ -53,7 +54,7 @@ func (s StructLog) MarshalJSON() ([]byte, error) { func (s *StructLog) UnmarshalJSON(input []byte) error { type StructLog struct { Pc *uint64 `json:"pc"` - Op *OpCode `json:"op"` + Op *vm.OpCode `json:"op"` Gas *math.HexOrDecimal64 `json:"gas"` GasCost *math.HexOrDecimal64 `json:"gasCost"` Memory *hexutil.Bytes `json:"memory"` diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go new file mode 100644 index 000000000..846193582 --- /dev/null +++ b/eth/tracers/logger/logger.go @@ -0,0 +1,349 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package logger + +import ( + "encoding/hex" + "fmt" + "io" + "math/big" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +// Storage represents a contract's storage. +type Storage map[common.Hash]common.Hash + +// Copy duplicates the current storage. +func (s Storage) Copy() Storage { + cpy := make(Storage) + for key, value := range s { + cpy[key] = value + } + return cpy +} + +// Config are the configuration options for structured logger the EVM +type Config struct { + EnableMemory bool // enable memory capture + DisableStack bool // disable stack capture + DisableStorage bool // disable storage capture + EnableReturnData bool // enable return data capture + Debug bool // print output during capture end + Limit int // maximum length of output, but zero means unlimited + // Chain overrides, can be used to execute a trace using future fork rules + Overrides *params.ChainConfig `json:"overrides,omitempty"` +} + +//go:generate gencodec -type StructLog -field-override structLogMarshaling -out gen_structlog.go + +// StructLog is emitted to the EVM each cycle and lists information about the current internal state +// prior to the execution of the statement. +type StructLog struct { + Pc uint64 `json:"pc"` + Op vm.OpCode `json:"op"` + Gas uint64 `json:"gas"` + GasCost uint64 `json:"gasCost"` + Memory []byte `json:"memory"` + MemorySize int `json:"memSize"` + Stack []uint256.Int `json:"stack"` + ReturnData []byte `json:"returnData"` + Storage map[common.Hash]common.Hash `json:"-"` + Depth int `json:"depth"` + RefundCounter uint64 `json:"refund"` + Err error `json:"-"` +} + +// overrides for gencodec +type structLogMarshaling struct { + Gas math.HexOrDecimal64 + GasCost math.HexOrDecimal64 + Memory hexutil.Bytes + ReturnData hexutil.Bytes + OpName string `json:"opName"` // adds call to OpName() in MarshalJSON + ErrorString string `json:"error"` // adds call to ErrorString() in MarshalJSON +} + +// OpName formats the operand name in a human-readable format. +func (s *StructLog) OpName() string { + return s.Op.String() +} + +// ErrorString formats the log's error as a string. +func (s *StructLog) ErrorString() string { + if s.Err != nil { + return s.Err.Error() + } + return "" +} + +// StructLogger is an EVM state logger and implements EVMLogger. +// +// StructLogger can capture state based on the given Log configuration and also keeps +// a track record of modified storage which is used in reporting snapshots of the +// contract their storage. +type StructLogger struct { + cfg Config + env *vm.EVM + + storage map[common.Address]Storage + logs []StructLog + output []byte + err error +} + +// NewStructLogger returns a new logger +func NewStructLogger(cfg *Config) *StructLogger { + logger := &StructLogger{ + storage: make(map[common.Address]Storage), + } + if cfg != nil { + logger.cfg = *cfg + } + return logger +} + +// Reset clears the data held by the logger. +func (l *StructLogger) Reset() { + l.storage = make(map[common.Address]Storage) + l.output = make([]byte, 0) + l.logs = l.logs[:0] + l.err = nil +} + +// CaptureStart implements the EVMLogger interface to initialize the tracing operation. +func (l *StructLogger) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { + l.env = env +} + +// CaptureState logs a new structured log message and pushes it out to the environment +// +// CaptureState also tracks SLOAD/SSTORE ops to track storage change. +func (l *StructLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + memory := scope.Memory + stack := scope.Stack + contract := scope.Contract + // check if already accumulated the specified number of logs + if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) { + return + } + // Copy a snapshot of the current memory state to a new buffer + var mem []byte + if l.cfg.EnableMemory { + mem = make([]byte, len(memory.Data())) + copy(mem, memory.Data()) + } + // Copy a snapshot of the current stack state to a new buffer + var stck []uint256.Int + if !l.cfg.DisableStack { + stck = make([]uint256.Int, len(stack.Data())) + for i, item := range stack.Data() { + stck[i] = item + } + } + stackData := stack.Data() + stackLen := len(stackData) + // Copy a snapshot of the current storage to a new container + var storage Storage + if !l.cfg.DisableStorage && (op == vm.SLOAD || op == vm.SSTORE) { + // initialise new changed values storage container for this contract + // if not present. + if l.storage[contract.Address()] == nil { + l.storage[contract.Address()] = make(Storage) + } + // capture SLOAD opcodes and record the read entry in the local storage + if op == vm.SLOAD && stackLen >= 1 { + var ( + address = common.Hash(stackData[stackLen-1].Bytes32()) + value = l.env.StateDB.GetState(contract.Address(), address) + ) + l.storage[contract.Address()][address] = value + storage = l.storage[contract.Address()].Copy() + } else if op == vm.SSTORE && stackLen >= 2 { + // capture SSTORE opcodes and record the written entry in the local storage. + var ( + value = common.Hash(stackData[stackLen-2].Bytes32()) + address = common.Hash(stackData[stackLen-1].Bytes32()) + ) + l.storage[contract.Address()][address] = value + storage = l.storage[contract.Address()].Copy() + } + } + var rdata []byte + if l.cfg.EnableReturnData { + rdata = make([]byte, len(rData)) + copy(rdata, rData) + } + // create a new snapshot of the EVM. + log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, rdata, storage, depth, l.env.StateDB.GetRefund(), err} + l.logs = append(l.logs, log) +} + +// CaptureFault implements the EVMLogger interface to trace an execution fault +// while running an opcode. +func (l *StructLogger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { +} + +// CaptureEnd is called after the call finishes to finalize the tracing. +func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { + l.output = output + l.err = err + if l.cfg.Debug { + fmt.Printf("0x%x\n", output) + if err != nil { + fmt.Printf(" error: %v\n", err) + } + } +} + +func (l *StructLogger) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +} + +func (l *StructLogger) CaptureExit(output []byte, gasUsed uint64, err error) {} + +// StructLogs returns the captured log entries. +func (l *StructLogger) StructLogs() []StructLog { return l.logs } + +// Error returns the VM error captured by the trace. +func (l *StructLogger) Error() error { return l.err } + +// Output returns the VM return value captured by the trace. +func (l *StructLogger) Output() []byte { return l.output } + +// WriteTrace writes a formatted trace to the given writer +func WriteTrace(writer io.Writer, logs []StructLog) { + for _, log := range logs { + fmt.Fprintf(writer, "%-16spc=%08d gas=%v cost=%v", log.Op, log.Pc, log.Gas, log.GasCost) + if log.Err != nil { + fmt.Fprintf(writer, " ERROR: %v", log.Err) + } + fmt.Fprintln(writer) + + if len(log.Stack) > 0 { + fmt.Fprintln(writer, "Stack:") + for i := len(log.Stack) - 1; i >= 0; i-- { + fmt.Fprintf(writer, "%08d %s\n", len(log.Stack)-i-1, log.Stack[i].Hex()) + } + } + if len(log.Memory) > 0 { + fmt.Fprintln(writer, "Memory:") + fmt.Fprint(writer, hex.Dump(log.Memory)) + } + if len(log.Storage) > 0 { + fmt.Fprintln(writer, "Storage:") + for h, item := range log.Storage { + fmt.Fprintf(writer, "%x: %x\n", h, item) + } + } + if len(log.ReturnData) > 0 { + fmt.Fprintln(writer, "ReturnData:") + fmt.Fprint(writer, hex.Dump(log.ReturnData)) + } + fmt.Fprintln(writer) + } +} + +// WriteLogs writes vm logs in a readable format to the given writer +func WriteLogs(writer io.Writer, logs []*types.Log) { + for _, log := range logs { + fmt.Fprintf(writer, "LOG%d: %x bn=%d txi=%x\n", len(log.Topics), log.Address, log.BlockNumber, log.TxIndex) + + for i, topic := range log.Topics { + fmt.Fprintf(writer, "%08d %x\n", i, topic) + } + + fmt.Fprint(writer, hex.Dump(log.Data)) + fmt.Fprintln(writer) + } +} + +type mdLogger struct { + out io.Writer + cfg *Config + env *vm.EVM +} + +// NewMarkdownLogger creates a logger which outputs information in a format adapted +// for human readability, and is also a valid markdown table +func NewMarkdownLogger(cfg *Config, writer io.Writer) *mdLogger { + l := &mdLogger{out: writer, cfg: cfg} + if l.cfg == nil { + l.cfg = &Config{} + } + return l +} + +func (t *mdLogger) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { + t.env = env + if !create { + fmt.Fprintf(t.out, "From: `%v`\nTo: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n", + from.String(), to.String(), + input, gas, value) + } else { + fmt.Fprintf(t.out, "From: `%v`\nCreate at: `%v`\nData: `0x%x`\nGas: `%d`\nValue `%v` wei\n", + from.String(), to.String(), + input, gas, value) + } + + fmt.Fprintf(t.out, ` +| Pc | Op | Cost | Stack | RStack | Refund | +|-------|-------------|------|-----------|-----------|---------| +`) +} + +// CaptureState also tracks SLOAD/SSTORE ops to track storage change. +func (t *mdLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + stack := scope.Stack + fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, op, cost) + + if !t.cfg.DisableStack { + // format stack + var a []string + for _, elem := range stack.Data() { + a = append(a, elem.Hex()) + } + b := fmt.Sprintf("[%v]", strings.Join(a, ",")) + fmt.Fprintf(t.out, "%10v |", b) + } + fmt.Fprintf(t.out, "%10v |", t.env.StateDB.GetRefund()) + fmt.Fprintln(t.out, "") + if err != nil { + fmt.Fprintf(t.out, "Error: %v\n", err) + } +} + +func (t *mdLogger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { + fmt.Fprintf(t.out, "\nError: at pc=%d, op=%v: %v\n", pc, op, err) +} + +func (t *mdLogger) CaptureEnd(output []byte, gasUsed uint64, tm time.Duration, err error) { + fmt.Fprintf(t.out, "\nOutput: `0x%x`\nConsumed gas: `%d`\nError: `%v`\n", + output, gasUsed, err) +} + +func (t *mdLogger) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +} + +func (t *mdLogger) CaptureExit(output []byte, gasUsed uint64, err error) {} diff --git a/core/vm/logger_json.go b/eth/tracers/logger/logger_json.go similarity index 73% rename from core/vm/logger_json.go rename to eth/tracers/logger/logger_json.go index 364ce738a..4a7abacba 100644 --- a/core/vm/logger_json.go +++ b/eth/tracers/logger/logger_json.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package vm +package logger import ( "encoding/json" @@ -24,32 +24,36 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/vm" ) type JSONLogger struct { encoder *json.Encoder - cfg *LogConfig - env *EVM + cfg *Config + env *vm.EVM } // NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects // into the provided stream. -func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger { +func NewJSONLogger(cfg *Config, writer io.Writer) *JSONLogger { l := &JSONLogger{encoder: json.NewEncoder(writer), cfg: cfg} if l.cfg == nil { - l.cfg = &LogConfig{} + l.cfg = &Config{} } return l } -func (l *JSONLogger) CaptureStart(env *EVM, from, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { +func (l *JSONLogger) CaptureStart(env *vm.EVM, from, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { l.env = env } -func (l *JSONLogger) CaptureFault(uint64, OpCode, uint64, uint64, *ScopeContext, int, error) {} +func (l *JSONLogger) CaptureFault(pc uint64, op vm.OpCode, gas uint64, cost uint64, scope *vm.ScopeContext, depth int, err error) { + // TODO: Add rData to this interface as well + l.CaptureState(pc, op, gas, cost, scope, nil, depth, err) +} // CaptureState outputs state information on the logger. -func (l *JSONLogger) CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) { +func (l *JSONLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { memory := scope.Memory stack := scope.Stack @@ -67,7 +71,7 @@ func (l *JSONLogger) CaptureState(pc uint64, op OpCode, gas, cost uint64, scope log.Memory = memory.Data() } if !l.cfg.DisableStack { - log.Stack = stack.data + log.Stack = stack.Data() } if l.cfg.EnableReturnData { log.ReturnData = rData @@ -90,7 +94,7 @@ func (l *JSONLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), t, errMsg}) } -func (l *JSONLogger) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +func (l *JSONLogger) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { } func (l *JSONLogger) CaptureExit(output []byte, gasUsed uint64, err error) {} diff --git a/core/vm/logger_test.go b/eth/tracers/logger/logger_test.go similarity index 75% rename from core/vm/logger_test.go rename to eth/tracers/logger/logger_test.go index 7726c90bd..205ee3112 100644 --- a/core/vm/logger_test.go +++ b/eth/tracers/logger/logger_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package vm +package logger import ( "math/big" @@ -22,8 +22,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" - "github.com/holiman/uint256" ) type dummyContractRef struct { @@ -46,24 +46,23 @@ type dummyStatedb struct { state.StateDB } -func (*dummyStatedb) GetRefund() uint64 { return 1337 } +func (*dummyStatedb) GetRefund() uint64 { return 1337 } +func (*dummyStatedb) GetState(_ common.Address, _ common.Hash) common.Hash { return common.Hash{} } +func (*dummyStatedb) SetState(_ common.Address, _ common.Hash, _ common.Hash) {} func TestStoreCapture(t *testing.T) { var ( - env = NewEVM(BlockContext{}, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{}) logger = NewStructLogger(nil) - contract = NewContract(&dummyContractRef{}, &dummyContractRef{}, new(big.Int), 0) - scope = &ScopeContext{ - Memory: NewMemory(), - Stack: newstack(), - Contract: contract, - } + env = vm.NewEVM(vm.BlockContext{}, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: logger}) + contract = vm.NewContract(&dummyContractRef{}, &dummyContractRef{}, new(big.Int), 100000) ) - scope.Stack.push(uint256.NewInt(1)) - scope.Stack.push(new(uint256.Int)) + contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x0, byte(vm.SSTORE)} var index common.Hash logger.CaptureStart(env, common.Address{}, contract.Address(), false, nil, 0, nil) - logger.CaptureState(0, SSTORE, 0, 0, scope, nil, 0, nil) + _, err := env.Interpreter().Run(contract, []byte{}, false) + if err != nil { + t.Fatal(err) + } if len(logger.storage[contract.Address()]) == 0 { t.Fatalf("expected exactly 1 changed value on address %x, got %d", contract.Address(), len(logger.storage[contract.Address()])) diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 915cab10d..ce9289dd7 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/tests" ) @@ -95,7 +96,7 @@ func BenchmarkTransactionTrace(b *testing.B) { } _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false) // Create the tracer, the EVM environment and run it - tracer := vm.NewStructLogger(&vm.LogConfig{ + tracer := logger.NewStructLogger(&logger.Config{ Debug: false, //DisableStorage: true, //EnableMemory: false, diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 37680807d..78194d04a 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -286,14 +286,6 @@ func (ec *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (* return r, err } -type rpcProgress struct { - StartingBlock hexutil.Uint64 - CurrentBlock hexutil.Uint64 - HighestBlock hexutil.Uint64 - PulledStates hexutil.Uint64 - KnownStates hexutil.Uint64 -} - // SyncProgress retrieves the current progress of the sync algorithm. If there's // no sync currently running, it returns nil. func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) { @@ -306,17 +298,11 @@ func (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, err if err := json.Unmarshal(raw, &syncing); err == nil { return nil, nil // Not syncing (always false) } - var progress *rpcProgress + var progress *ethereum.SyncProgress if err := json.Unmarshal(raw, &progress); err != nil { return nil, err } - return ðereum.SyncProgress{ - StartingBlock: uint64(progress.StartingBlock), - CurrentBlock: uint64(progress.CurrentBlock), - HighestBlock: uint64(progress.HighestBlock), - PulledStates: uint64(progress.PulledStates), - KnownStates: uint64(progress.KnownStates), - }, nil + return progress, nil } // SubscribeNewHead subscribes to notifications about the current blockchain head diff --git a/go.mod b/go.mod index 61c88cb5d..6661bef0a 100644 --- a/go.mod +++ b/go.mod @@ -72,4 +72,5 @@ require ( gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/urfave/cli.v1 v1.20.0 gotest.tools v2.2.0+incompatible // indirect + ) diff --git a/go.sum b/go.sum index 9d8837db3..28c9baf97 100644 --- a/go.sum +++ b/go.sum @@ -339,8 +339,6 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/openrelayxyz/plugeth-utils v0.0.9 h1:Rz0nwzirHSpGa6TfTuPl9/0W0TSN5vsYW+DtQH8QLIc= -github.com/openrelayxyz/plugeth-utils v0.0.9/go.mod h1:Lv47unyKJ3b/PVbVAt9Uk+RQmpdrzDOsjSCPhAMQAps= github.com/openrelayxyz/plugeth-utils v0.0.10 h1:Aw1wiQUepHH9yytOM8+RlSj9Z3OU+OsegoPym7SLdic= github.com/openrelayxyz/plugeth-utils v0.0.10/go.mod h1:Lv47unyKJ3b/PVbVAt9Uk+RQmpdrzDOsjSCPhAMQAps= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= diff --git a/graphql/graphql.go b/graphql/graphql.go index af6b7dc31..e92f1126f 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -1220,32 +1220,66 @@ type SyncState struct { func (s *SyncState) StartingBlock() hexutil.Uint64 { return hexutil.Uint64(s.progress.StartingBlock) } - func (s *SyncState) CurrentBlock() hexutil.Uint64 { return hexutil.Uint64(s.progress.CurrentBlock) } - func (s *SyncState) HighestBlock() hexutil.Uint64 { return hexutil.Uint64(s.progress.HighestBlock) } - -func (s *SyncState) PulledStates() *hexutil.Uint64 { - ret := hexutil.Uint64(s.progress.PulledStates) - return &ret +func (s *SyncState) SyncedAccounts() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedAccounts) } - -func (s *SyncState) KnownStates() *hexutil.Uint64 { - ret := hexutil.Uint64(s.progress.KnownStates) - return &ret +func (s *SyncState) SyncedAccountBytes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedAccountBytes) +} +func (s *SyncState) SyncedBytecodes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedBytecodes) +} +func (s *SyncState) SyncedBytecodeBytes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedBytecodeBytes) +} +func (s *SyncState) SyncedStorage() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedStorage) +} +func (s *SyncState) SyncedStorageBytes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.SyncedStorageBytes) +} +func (s *SyncState) HealedTrienodes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealedTrienodes) +} +func (s *SyncState) HealedTrienodeBytes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealedTrienodeBytes) +} +func (s *SyncState) HealedBytecodes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealedBytecodes) +} +func (s *SyncState) HealedBytecodeBytes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealedBytecodeBytes) +} +func (s *SyncState) HealingTrienodes() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealingTrienodes) +} +func (s *SyncState) HealingBytecode() hexutil.Uint64 { + return hexutil.Uint64(s.progress.HealingBytecode) } // Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not // yet received the latest block headers from its pears. In case it is synchronizing: -// - startingBlock: block number this node started to synchronise from -// - currentBlock: block number this node is currently importing -// - highestBlock: block number of the highest block header this node has received from peers -// - pulledStates: number of state entries processed until now -// - knownStates: number of known state entries that still need to be pulled +// - startingBlock: block number this node started to synchronise from +// - currentBlock: block number this node is currently importing +// - highestBlock: block number of the highest block header this node has received from peers +// - syncedAccounts: number of accounts downloaded +// - syncedAccountBytes: number of account trie bytes persisted to disk +// - syncedBytecodes: number of bytecodes downloaded +// - syncedBytecodeBytes: number of bytecode bytes downloaded +// - syncedStorage: number of storage slots downloaded +// - syncedStorageBytes: number of storage trie bytes persisted to disk +// - healedTrienodes: number of state trie nodes downloaded +// - healedTrienodeBytes: number of state trie bytes persisted to disk +// - healedBytecodes: number of bytecodes downloaded +// - healedBytecodeBytes: number of bytecodes persisted to disk +// - healingTrienodes: number of state trie nodes pending +// - healingBytecode: number of bytecodes pending func (r *Resolver) Syncing() (*SyncState, error) { progress := r.backend.SyncProgress() diff --git a/graphql/schema.go b/graphql/schema.go index dfd094a42..86060cd23 100644 --- a/graphql/schema.go +++ b/graphql/schema.go @@ -297,12 +297,6 @@ const schema string = ` currentBlock: Long! # HighestBlock is the latest known block number. highestBlock: Long! - # PulledStates is the number of state entries fetched so far, or null - # if this is not known or not relevant. - pulledStates: Long - # KnownStates is the number of states the node knows of so far, or null - # if this is not known or not relevant. - knownStates: Long } # Pending represents the current pending state. diff --git a/interfaces.go b/interfaces.go index b9d0bb880..daea1afb6 100644 --- a/interfaces.go +++ b/interfaces.go @@ -101,8 +101,22 @@ type SyncProgress struct { StartingBlock uint64 // Block number where sync began CurrentBlock uint64 // Current block number where sync is at HighestBlock uint64 // Highest alleged block number in the chain - PulledStates uint64 // Number of state trie entries already downloaded - KnownStates uint64 // Total number of state trie entries known about + + // Fields belonging to snap sync + SyncedAccounts uint64 // Number of accounts downloaded + SyncedAccountBytes uint64 // Number of account trie bytes persisted to disk + SyncedBytecodes uint64 // Number of bytecodes downloaded + SyncedBytecodeBytes uint64 // Number of bytecode bytes downloaded + SyncedStorage uint64 // Number of storage slots downloaded + SyncedStorageBytes uint64 // Number of storage trie bytes persisted to disk + + HealedTrienodes uint64 // Number of state trie nodes downloaded + HealedTrienodeBytes uint64 // Number of state trie bytes persisted to disk + HealedBytecodes uint64 // Number of bytecodes downloaded + HealedBytecodeBytes uint64 // Number of bytecodes persisted to disk + + HealingTrienodes uint64 // Number of state trie nodes pending + HealingBytecode uint64 // Number of bytecodes pending } // ChainSyncReader wraps access to the node's current sync status. If there's no diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index aa8e8767c..65e34752b 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -40,6 +40,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/params" @@ -130,11 +131,21 @@ func (s *PublicEthereumAPI) Syncing() (interface{}, error) { } // Otherwise gather the block sync stats return map[string]interface{}{ - "startingBlock": hexutil.Uint64(progress.StartingBlock), - "currentBlock": hexutil.Uint64(progress.CurrentBlock), - "highestBlock": hexutil.Uint64(progress.HighestBlock), - "pulledStates": hexutil.Uint64(progress.PulledStates), - "knownStates": hexutil.Uint64(progress.KnownStates), + "startingBlock": hexutil.Uint64(progress.StartingBlock), + "currentBlock": hexutil.Uint64(progress.CurrentBlock), + "highestBlock": hexutil.Uint64(progress.HighestBlock), + "syncedAccounts": hexutil.Uint64(progress.SyncedAccounts), + "syncedAccountBytes": hexutil.Uint64(progress.SyncedAccountBytes), + "syncedBytecodes": hexutil.Uint64(progress.SyncedBytecodes), + "syncedBytecodeBytes": hexutil.Uint64(progress.SyncedBytecodeBytes), + "syncedStorage": hexutil.Uint64(progress.SyncedStorage), + "syncedStorageBytes": hexutil.Uint64(progress.SyncedStorageBytes), + "healedTrienodes": hexutil.Uint64(progress.HealedTrienodes), + "healedTrienodeBytes": hexutil.Uint64(progress.HealedTrienodeBytes), + "healedBytecodes": hexutil.Uint64(progress.HealedBytecodes), + "healedBytecodeBytes": hexutil.Uint64(progress.HealedBytecodeBytes), + "healingTrienodes": hexutil.Uint64(progress.HealingTrienodes), + "healingBytecode": hexutil.Uint64(progress.HealingBytecode), }, nil } @@ -1138,7 +1149,7 @@ type StructLogRes struct { } // FormatLogs formats EVM returned structured logs for json output -func FormatLogs(logs []vm.StructLog) []StructLogRes { +func FormatLogs(logs []logger.StructLog) []StructLogRes { formatted := make([]StructLogRes, len(logs)) for index, trace := range logs { formatted[index] = StructLogRes{ @@ -1425,9 +1436,9 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number)) // Create an initial tracer - prevTracer := vm.NewAccessListTracer(nil, args.from(), to, precompiles) + prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles) if args.AccessList != nil { - prevTracer = vm.NewAccessListTracer(*args.AccessList, args.from(), to, precompiles) + prevTracer = logger.NewAccessListTracer(*args.AccessList, args.from(), to, precompiles) } for { // Retrieve the current access list to expand @@ -1453,7 +1464,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH } // Apply the transaction with the access list tracer - tracer := vm.NewAccessListTracer(accessList, args.from(), to, precompiles) + tracer := logger.NewAccessListTracer(accessList, args.from(), to, precompiles) config := vm.Config{Tracer: tracer, Debug: true, NoBaseFee: true} vmenv, _, err := b.GetEVM(ctx, msg, statedb, header, &config) if err != nil { diff --git a/internal/jsre/deps/bindata.go b/internal/jsre/deps/bindata.go index 6f079c2ba..3e7d3a136 100644 --- a/internal/jsre/deps/bindata.go +++ b/internal/jsre/deps/bindata.go @@ -1,7 +1,7 @@ // Code generated by go-bindata. DO NOT EDIT. // sources: // bignumber.js (17.273kB) -// web3.js (401.764kB) +// web3.js (402.466kB) package deps @@ -90,7 +90,7 @@ func bignumberJs() (*asset, error) { return a, nil } -var _web3Js = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xfd\x6b\x7b\x13\x39\xd2\x38\x0e\xbf\xcf\xa7\x50\xfc\xdc\x0f\xb6\x89\xb1\x73\x60\x18\xc6\x99\x0c\x1b\x02\x33\x64\x6f\x20\x5c\x40\x76\x76\xef\x6c\x96\xab\xed\x96\xed\x1e\xda\xdd\xfe\x75\xb7\x73\x18\x92\xef\xfe\xbf\x54\x3a\x95\x0e\x7d\x70\x12\xe6\xb4\xc9\x0b\x70\x4b\xa5\x53\xa9\x54\x2a\x95\x4a\x55\x19\xfd\x7f\xcb\x28\xa3\x7b\x9d\xc9\x32\x19\x17\x51\x9a\x10\xda\x29\x7a\x49\x2f\xeb\x7e\x51\x29\x79\x27\xed\x2d\xbb\x5f\xa2\x49\x67\x3d\x39\x49\x4f\xf9\xaf\x02\x7e\x9d\x05\x19\x09\xf6\x8a\xcb\x05\x4d\x27\x44\xd6\xb5\xd7\x92\x45\x5b\x0f\x1e\x88\xc4\x5d\x56\x66\xf9\xe0\x41\xd0\xcd\x68\xb1\xcc\x12\x12\x74\xd2\xde\xfa\x66\x97\xa5\x47\x32\x2d\x12\x69\xac\xd6\xc9\x5e\x42\xcf\xc9\xcb\x2c\x4b\xb3\x4e\xeb\x20\x48\x92\xb4\x20\x93\x28\x09\xc9\x3c\x0d\x97\x31\x25\xed\xd6\x46\xba\xd1\x6a\xb7\xba\xbb\xc5\x2c\x4b\xcf\xc9\xa4\x3f\x4e\x43\xba\xd7\x7a\x73\xf4\xe2\xf8\xf5\xcb\x4f\x6f\x8f\x3e\x7e\xfa\xf1\xe8\xf8\xed\x8b\x56\x6f\x72\xcd\xea\x8b\xf7\x58\xdf\xf7\xbe\xd0\x8b\x45\x9a\x15\xf9\xf0\xcb\xf5\xf5\x2e\x1b\xc3\xc9\xe6\x69\x7f\x1c\xc4\x71\x27\xee\x8b\xac\x9e\xec\x7d\x87\xf2\x01\x26\x7b\x00\xb8\x75\x7a\x42\x4f\x77\x45\x57\xf3\x4e\xf2\x2c\x19\xd2\xee\x75\x2f\xee\xe9\x92\xb4\xc7\x71\x77\x2d\xa0\x58\x93\x32\x13\x7a\x11\x35\xc2\xd5\x24\xcd\x3a\x0c\x3a\xdd\xdb\xdc\x4d\xbf\xcf\xfa\x31\x4d\xa6\xc5\x6c\x37\xdd\xd8\xe8\xe6\x9d\x8c\x21\x5e\x75\xe3\xba\xdb\xf9\xb2\x35\x3c\x51\x5d\x16\x55\xf4\x38\x96\x7a\xa2\xed\xee\x97\x35\x9e\x20\x3b\xb3\x77\xb2\x46\xc8\x97\x35\x42\x08\x69\x8d\xd3\x24\x2f\x82\xa4\x68\x0d\x49\x91\x2d\x69\x8f\xa7\x46\xc9\x62\x59\xe4\xad\x21\x39\x81\x6f\x09\x0d\x79\x49\x30\xa7\xad\x21\x69\x7d\x4a\xcf\x13\x9a\xb5\x7a\x3a\x87\x8d\x8e\xe5\x04\x61\x98\xd1\x3c\x6f\x89\x9c\x6b\xf8\xff\x54\x54\x2d\x8b\xc3\xff\x22\x2d\x5d\x16\xf5\xed\xa5\x9f\x50\x11\xa3\xbd\xd1\x65\x41\xf3\x9d\x6d\x7f\x7b\x12\x48\x61\x7a\x8d\x90\xeb\xde\x9d\x20\xe0\x46\xfd\x51\xc3\x41\xd8\x6b\x86\x80\x95\x51\xfd\x47\x1d\xfa\x38\x4d\x0a\x9a\x14\xb7\x1e\xfc\x9f\x72\xde\xd9\x8c\xfd\x61\xa6\x7d\x12\xc4\xf9\x6f\x37\xf4\x8c\xe6\x34\x3b\xf3\xad\xfa\x3f\xfa\xa4\xe5\xcb\xd1\x7b\x3a\x8d\xf2\x22\x0b\xfe\x0b\x26\xaf\x57\x55\x07\x3d\x3f\xba\x15\xdf\x2f\xb2\x20\xc9\x27\x5e\xd6\xf7\x67\xc1\x41\x66\x91\xc2\xea\x48\xc8\x69\xf1\xa1\x9a\xa4\xee\x0c\x17\x76\xd3\xbf\x49\xa3\x5f\x79\x02\x82\x26\x88\xaf\xaa\x60\x91\x45\xf3\x20\xbb\xf4\xf6\x23\x4d\xe3\xda\xc9\xdb\x17\x6d\xfd\x79\x51\x68\xee\xc1\x95\xd5\x94\x21\xe1\xa0\x74\x1b\xff\x23\x21\xc1\xdb\xfb\x30\xca\xd3\xf3\xe4\x16\x3d\x0f\x92\x34\xb9\x9c\xa7\xcb\x7c\x85\xae\x47\x49\x48\x2f\x68\x68\xec\x5d\x77\x36\xb1\xba\x72\xd4\x1d\xb3\xf6\xf3\x28\xb9\x0d\xe3\xde\x5f\x02\x26\x5e\x26\x21\x0d\x5b\x16\x9a\xe8\x19\x23\x84\xbf\x00\x8e\x46\x51\x18\x36\xc3\xd1\xcd\xea\x3f\x0b\xe2\xa5\xb7\xfb\xcb\x28\x29\xb6\xbf\x79\x52\x3d\x05\x6f\xe9\xf9\xf3\xe8\x77\x44\xfe\xad\xd6\xdc\xc1\x2c\x48\xa6\xbf\x27\xe9\xdc\x09\xe5\x94\xd4\x8d\xa4\xfa\x4a\xaa\xf1\x62\xe6\x1d\xdf\x8d\x6a\x11\xb4\x76\xba\xb6\x76\xdd\xfb\x72\x7d\xda\xdb\xfe\xdd\x0e\xfd\x7f\xa1\x33\xef\xef\x24\x3b\x4e\x96\x49\x78\x63\x52\xb9\xf5\xc6\x75\x7f\xec\xfd\x73\x1f\x7b\xef\x0f\x7d\x7f\xe4\x33\x87\x77\xf0\xe2\xbc\xf0\x47\x93\x36\xbf\xee\x66\xae\xf7\xaa\x9d\x3b\xdb\xab\x56\x9d\xf7\x49\x96\xce\x6f\x39\xed\x45\x7a\xcb\xa3\xe6\xed\x04\xbe\xdf\x77\xdd\xfc\x11\xf0\x17\x25\x61\x94\xd1\x71\x71\xe8\xdd\x33\x57\xe8\xc9\xed\x26\x22\x1a\x07\x8b\x8f\xbf\xeb\x64\xf8\x31\xd9\xec\xb4\x4b\x17\x69\x1e\x55\x1d\xd4\x17\xc1\x65\x30\x8a\xa9\x29\x14\xfc\x2e\x5c\xa9\x8c\xe6\xee\xe4\xf8\x75\x3b\x1a\xd8\x97\xe3\x7d\x61\xe2\xf3\xb7\x3f\xc9\xdc\x09\x92\x4a\xea\x6e\x46\x67\xbf\x03\xfa\xff\xb0\x58\xbf\x8b\xf3\xe3\x8d\xf9\xe4\xd7\xc6\xba\xcd\xf4\xee\xd1\xde\x10\xed\xb7\xde\xb8\xbe\xf6\xcc\x1e\x7a\xb6\xb4\x2a\x39\xee\x71\x13\x39\x0e\x8c\x37\xc8\x9e\xb4\x70\xe8\xb4\xfb\x83\x49\x9a\xcd\x83\xa2\xa0\x59\xde\xee\xee\x02\xc0\x87\x34\x8e\xc2\xa8\xb8\xfc\x78\xb9\xa0\x26\x2c\x6b\x9f\x41\xad\x0d\x1e\x3e\x5c\x23\x0f\x0d\x48\xa1\x73\x27\x51\x4e\x02\xb2\xc8\xd2\x94\x01\x93\x62\x16\x14\x24\xa3\x0b\x76\xc8\x4a\x8a\x9c\x88\xb9\x23\x2c\x93\xd5\x70\x58\x90\x79\x50\x8c\x67\x34\x1f\xb2\x4f\x91\x8d\x7e\x9e\x9c\xe2\x8f\xc7\xc6\xd7\xa9\x99\xb9\x63\x7d\x9f\x9e\x3c\x39\x3d\x39\xed\x91\x7e\xbf\xbf\x46\x1e\x0e\x9c\xb1\xc9\x1e\xef\x11\x65\x4d\xd3\xe9\x8a\x29\x2e\x66\x51\xde\xff\x04\x0b\xe3\x47\x89\x20\x06\xd8\xe7\xe8\x3a\x64\x19\x87\x49\xb1\x8b\x80\xf9\xbe\xed\x83\x3e\x82\x1c\xd1\xdc\xee\xda\xf5\xee\xda\x9a\xa7\x1f\xfd\x45\x96\x16\x1c\x6b\x7b\x24\xa1\xe7\x46\x5f\x3b\x5f\xae\xbb\xbb\xd5\xa5\xfa\x20\xbd\x64\xcb\x71\x91\xb2\xc6\x3d\xb0\x75\xed\xf6\xa3\x5c\xcc\xb9\x46\x08\x23\x47\x89\x14\x61\xd7\xb2\xbe\xce\x12\xfb\x30\x6f\x9d\x81\xc0\x76\xe7\xdf\x27\x9d\x93\xcd\x47\xdf\x9d\x3e\xec\xfe\xfb\xb4\xfb\x6c\xd0\xe5\xe3\x34\x0f\x0e\xa5\xdd\xba\xee\x7d\x69\x61\x52\x6c\x0d\xbf\xeb\xb5\x38\xbd\xb5\x86\x5b\x8f\xaf\x4f\x7b\xdf\xfc\xce\xe4\xfd\x3c\x4d\xe3\x1a\xda\x1e\x31\x90\x12\xc2\x66\x79\xf2\x7f\x4e\xa5\xf0\xeb\xb1\xfe\x79\x8a\x92\x77\xf0\x47\x1d\x19\x43\xcf\x6e\x4a\xc3\xac\xf0\x2a\x44\xcc\xe1\x6d\x0a\x66\xa9\x2b\x92\xaf\x59\xa4\x82\x76\x79\x8b\x55\x65\x6f\x42\xb5\xff\x61\xa8\x35\x69\xf6\xe1\xff\x34\x22\x5a\xd1\x9f\x7a\x8a\x7d\xf2\x7b\x53\x2c\xdb\xc3\x14\xc9\x16\x7e\x9a\x2d\x66\x94\xc0\x66\x07\x84\xdb\xf7\x51\x2e\xcb\x55\x3f\x04\x5d\xc2\xcf\xc7\xe8\xf7\x29\xce\xd8\x31\xbe\x4c\xfa\x25\x62\x6b\x55\x3f\x9f\x1a\xf5\x88\xa2\x1e\x2a\x87\x4e\xde\x98\xcc\x59\xe9\x95\xe8\x9c\x17\x70\x08\x9d\x25\xaf\x4a\xe9\x66\x99\x2a\x52\xe7\x8d\x56\x96\xbe\x19\xb1\xb3\x4a\x38\xa9\x7f\xd9\xea\x5d\x77\x6f\x46\xf8\xa2\x77\xf5\x94\xff\x6d\x13\xca\x1f\x3c\x84\x0e\x7f\x9c\x45\x39\x99\x44\x31\x65\x94\xba\x08\xb2\x82\xa4\x13\x72\x4e\x47\x3b\xfd\x5f\xf2\xfe\x1a\x80\x88\x2f\x06\x30\xc9\x28\x25\x79\x3a\x29\xce\x83\x8c\x0e\xc9\x65\xba\x24\xe3\x20\x21\x19\x0d\xa3\xbc\xc8\xa2\xd1\xb2\xa0\x24\x2a\x48\x90\x84\x83\x34\x23\xf3\x34\x8c\x26\x97\x50\x47\x54\x90\x65\x12\xd2\x0c\x08\xbe\xa0\xd9\x3c\x67\xed\xb0\x8f\x9f\xde\x1e\x93\xd7\x34\xcf\x69\x46\x7e\xa2\x09\xcd\x82\x98\xbc\x5b\x8e\xe2\x68\x4c\x5e\x47\x63\x9a\xe4\x94\x04\x39\x59\xb0\x94\x7c\x46\x43\x32\xba\x14\x54\x44\xc9\x8f\xac\x33\x1f\x44\x67\xc8\x8f\xe9\x32\x09\x03\x36\xe6\x1e\xa1\x51\x31\xa3\x19\x39\xa3\x59\xce\x66\x68\x47\xb6\x25\x6a\xec\x91\x34\x83\x5a\x3a\x41\xc1\xc6\x90\x91\x74\xc1\x0a\x76\x49\x90\x5c\x92\x38\x28\x74\x59\x17\x05\x7a\xa4\x21\x89\x12\xa8\x76\x96\xca\x95\x1d\x15\xe4\x3c\x8a\x63\x32\xa2\x64\x99\xd3\xc9\x32\xe6\x82\xe3\x68\x59\x90\x9f\x0f\x3f\xbe\x3a\x3a\xfe\x48\xf6\xdf\xfe\x8b\xfc\xbc\xff\xfe\xfd\xfe\xdb\x8f\xff\xda\x25\xe7\x51\x31\x4b\x97\x05\x61\x12\x25\xd4\x15\xcd\x17\x71\x44\x43\x72\x1e\x64\x59\x90\x14\x97\x24\x9d\x40\x15\x6f\x5e\xbe\x3f\x78\xb5\xff\xf6\xe3\xfe\xf3\xc3\xd7\x87\x1f\xff\x45\xd2\x8c\xfc\x78\xf8\xf1\xed\xcb\x0f\x1f\xc8\x8f\x47\xef\xc9\x3e\x79\xb7\xff\xfe\xe3\xe1\xc1\xf1\xeb\xfd\xf7\xe4\xdd\xf1\xfb\x77\x47\x1f\x5e\xf6\x09\xf9\x40\x59\xc7\x28\xd4\x50\x8f\xe8\x09\xcc\x59\x46\x49\x48\x8b\x20\x8a\xe5\xfc\xff\x2b\x5d\x92\x7c\x96\x2e\xe3\x90\xcc\x82\x33\x4a\x32\x3a\xa6\xd1\x19\x0d\x49\x40\xc6\xe9\xe2\xb2\xf1\x44\x42\x65\x41\x9c\x26\x53\x18\xb6\xa2\x32\x42\x0e\x27\x24\x49\x8b\x1e\xc9\x29\x25\xdf\xcf\x8a\x62\x31\x1c\x0c\xce\xcf\xcf\xfb\xd3\x64\xd9\x4f\xb3\xe9\x20\xe6\x15\xe4\x83\x1f\xfa\x6b\x0f\x07\x92\xd9\xfe\x0d\xc8\x76\x9c\x86\x34\xeb\xff\x02\x2c\xf2\x6f\xc1\xb2\x98\xa5\x19\x79\x13\x64\xf4\x33\xf9\xdf\xb4\xa0\xe7\xd1\xf8\x57\xf2\xfd\x9c\x7d\xff\x8d\x16\xb3\x90\x9e\xf5\xc7\xe9\xfc\x07\x00\x0e\x83\x82\x92\xed\xcd\xad\x6f\x80\xe1\xd5\x6f\x05\x15\x02\x2c\x2a\x23\xe4\x31\xdf\xde\x21\x24\x05\x04\xcc\x76\x41\x1f\xe4\x61\x52\x98\x80\x51\x52\xf8\xe0\x8e\x1d\xc0\x65\x09\xe4\x8b\xcb\x24\x98\x47\x63\xc9\xc6\x51\x89\x90\xe7\x00\x8f\xf2\x95\xfc\x50\x64\x51\x32\x35\xcb\xe4\x90\xe6\x83\x7e\x4f\x03\x6b\x8c\x19\x0d\xbc\x63\x3c\x76\x41\x97\x65\xb0\x9e\x6e\xab\xfe\x02\x70\x94\x8b\x01\x1a\x9c\x39\x47\x55\xf4\x60\x87\x15\x7c\x5a\x5a\x88\xa3\xfc\xbe\xaa\x02\xb6\x11\x0e\x7c\x75\xa5\x4e\x8f\xa4\x04\x7a\x3f\xcb\x82\x4b\x0e\xce\x99\xb8\x25\x0a\x1c\x30\xfa\x44\x12\x80\x58\x49\x9c\x43\x84\xa4\x48\x09\x4d\x18\x0d\x0f\x42\xca\xfe\x53\xad\x30\x66\x1c\x70\x36\xc9\xb8\x92\x90\x6b\xcd\x8d\x99\xd7\x8d\x47\xcc\xc0\x72\x73\x67\x86\x24\xb2\x07\x35\xe4\x46\x17\x81\xf7\xcf\x69\x31\x4b\x43\x4f\xb7\xb8\x72\x3d\xcd\xe6\x84\x4b\x2e\xa9\x31\x23\x6b\x84\xaf\x41\x51\xfc\x93\x98\x19\x91\x45\xfe\x06\xbd\x27\x5f\x38\xf1\x5c\x2b\xb1\xfc\x6f\x1c\xf3\x39\xf9\x82\x2b\xbb\x86\x2c\x78\xab\x90\x93\x2f\xf0\xae\xe1\x9a\x88\xcf\x88\xf1\x06\x2e\x11\x31\x32\x84\xbe\xb0\x9d\x88\xb1\x7b\x40\x88\x81\x0c\xb4\x53\xe3\x2e\x39\x38\x92\x28\x62\xd8\xcc\x4d\xf1\x0e\x61\xad\x3f\x89\xe2\x82\x66\x1d\x54\xb6\x8b\x74\x10\x82\x8a\x0a\x21\x14\x48\x22\x00\x9d\x42\xf7\x64\xf3\x74\x97\xf3\xcf\x68\x42\x3a\xeb\xb8\x11\x5c\x07\x7f\xa0\xc1\x9f\x72\xb4\xa3\xe4\x2c\x88\xa3\x50\xd3\x00\xab\x71\x7d\x48\xda\x64\x83\xe0\xca\xd7\xb0\xac\x81\x6b\x36\x29\xb0\x84\xd2\xc8\x22\x0e\xa2\x84\xd3\x97\x35\x8d\x1c\xe0\x9d\xc8\x29\x9f\x45\x91\x7e\x34\xfa\x85\x8e\x8b\x6b\xab\x42\x39\xc9\xba\x1c\xaf\x36\xb4\xe0\xca\xa7\x0e\x75\xc3\x99\xb9\x1e\x2f\x6f\x09\x5c\x30\x69\xa8\x58\xde\x39\x61\xc0\xa7\x3d\x72\x02\xe0\xa7\xdd\x66\xa8\x89\xa3\x1c\x24\x20\xbe\xf8\xca\xb1\x93\x63\x34\x00\x0b\xe0\xd8\xf1\xa5\x2f\x74\x81\x32\xc4\x38\xcd\x36\xc2\x4d\xee\x2e\x7d\x81\x9d\xbc\x8c\xbe\x73\x49\xe0\x53\x5a\xe0\x15\x98\x0b\xce\x21\x48\x96\x15\x13\x7d\x63\x25\x8c\x1a\xfa\xf3\x60\xd1\x29\xe3\xb1\xa0\x95\xf3\xac\x11\x83\x77\xf2\x9a\x3b\xbc\xa7\x27\x50\xe4\x94\xb3\x67\xf9\xa5\x56\x11\xea\x8f\xd8\xa7\x8e\x26\x93\x9c\x16\x4e\xa7\x32\x1a\x2e\xc7\x14\xf5\x2b\x18\x8f\x7b\xa4\xa6\x73\x80\x9d\x22\x28\xa2\xf1\xbb\x20\x2b\x5e\xc3\x4b\x22\xab\xe6\xbe\x9d\xdf\xf1\xf4\x53\xd6\x95\x31\xa6\x44\xc3\x0f\x6e\x95\x6f\x82\x62\xd6\x9f\xc4\x69\x9a\x75\x3a\x4e\x8b\x1b\x64\x67\xab\x4b\x06\x64\x67\xbb\x4b\x1e\x92\x9d\x6d\x31\x68\x84\xbe\x60\x3c\x26\x1b\xa4\xa3\x36\x1d\x03\xeb\x25\x28\x24\xcf\xd0\xde\x45\xc8\xce\x36\x19\x1a\x09\x25\x9d\x95\xa8\xef\x91\x4d\x8c\xfd\x8c\xe6\xcb\xb8\x90\xd4\xc3\x67\xf0\xcd\x32\x2e\xa2\x9f\xa3\x62\xc6\xe7\x44\x52\xa0\xd1\xb7\x9e\xa2\xa3\x9e\x39\x83\xb2\x72\x31\x42\x5e\xbf\x79\xe2\xf3\x93\xbe\xd5\xaa\x6f\x0d\x34\xec\x01\x5a\x23\x6a\x78\xad\xd6\xae\x5e\x38\x34\x9e\x88\x11\x8b\xce\x8a\x5d\x21\xcd\x5e\x06\xe3\x59\xc7\x66\x4c\x11\xa6\x2d\xc6\xf5\x4b\xe7\x4b\xcf\xd5\x69\x17\x17\xe2\x08\x81\xae\x6c\xb8\xda\xce\x8e\xd9\x7d\xb9\x8e\x10\x11\xaa\xb5\xcb\xa8\x98\xc6\x13\x01\x62\xcf\x11\x74\xc0\xed\x92\xc4\x13\x7c\xd8\x93\x85\x9b\x30\x97\xe2\xc6\x1e\xa1\xe2\x19\x1e\x19\x90\x6d\x0d\x7a\x4d\x68\x9c\x53\x6b\x78\x83\x01\x09\xd3\xa4\x5d\x90\x20\x0c\x89\x28\x55\xa4\x66\x95\x7d\x12\x15\xed\x9c\x04\x71\x46\x83\xf0\x92\x8c\xd3\x65\x52\xd0\xb0\x04\x4b\x5f\x69\x9c\xd7\x7a\x11\x0e\x06\xe4\xe3\xd1\x8b\xa3\x21\x99\x44\xd3\x65\x46\x09\x3b\xb0\x25\x34\x67\x27\x40\x76\x4a\xbb\xcc\x4d\x66\xf5\x5b\x10\xc9\x1f\x67\x92\xcd\xc9\xa0\x18\x81\x12\x2b\x25\xcb\x5c\xa1\x35\xa3\x93\x00\xd4\x31\xe7\xb3\x34\xa6\xbc\x87\x51\x32\x5d\xaf\x61\x04\x15\x3c\xc0\xe6\xfc\x62\xd0\x3d\x92\x3a\x2b\xdf\x58\xe4\x72\x4e\x6a\x45\x7d\xcf\x16\xd7\x71\x55\x63\x88\x80\x78\xc3\xe4\x3c\xd0\x64\x9d\xd3\xc2\x99\x53\x4e\x56\x6f\x83\x39\xb5\xf7\x21\x9d\x83\xe5\x4c\xb7\xac\x67\xf3\xa9\xde\xcf\x74\xc5\x9e\x3a\x15\x5f\x14\x18\xd4\x52\xad\xfc\xab\x18\xb6\xac\x64\x91\xd1\xb3\x28\x5d\xe6\xaa\x43\xdb\xbb\x0c\x25\x51\x42\xa2\xa4\x70\x4a\xd4\xe1\x1f\xf5\xd7\xd7\x20\xfb\x9b\xa4\x19\x81\x47\xc2\x11\xd9\x23\x5b\xbb\x24\x22\xdf\xcb\x01\xc8\xf7\xc2\x24\xda\xd8\x28\x2b\xce\xfe\xac\x3e\x6f\xec\x91\x8d\x8e\xc4\x41\x44\x1e\x91\xad\x53\x26\xe1\x93\xab\x2b\xb2\xb9\x5b\x5a\x49\x05\x2b\x17\xf4\xb0\x41\x22\xf2\xb0\x6c\xe6\x36\xec\x5e\x30\xe1\xa0\x8c\xed\xcb\xbf\x6b\x27\xd5\x4c\xb9\xee\x76\xba\xd6\x14\x0e\x06\x64\x12\x65\x79\x41\x68\x4c\xe7\x34\x29\xd8\xf9\x8a\xa3\xa9\x47\xf2\xcf\xd1\x82\x44\xc5\x2a\x53\x6e\x60\x7f\xd3\x87\x7d\x86\xbf\xca\x19\x80\xa7\xf3\x61\x18\xb1\x46\x82\x58\x2d\x72\x81\x4f\x87\xff\xb8\xf8\xf6\xf3\x45\x4d\x3a\x25\x0c\xe2\x24\x22\x1b\x64\xeb\x54\xf2\x09\xb2\x41\x9c\x6e\x78\xd0\x5e\x8b\x60\x8b\xf9\x79\x20\xc5\x56\xe9\xa1\x7d\x4e\x15\x37\x66\x3d\x7f\x68\xa6\xc2\x84\x2d\x13\x53\xb7\x5c\xfc\x35\x94\x49\xca\x18\xd2\x66\x15\x43\x22\x8d\x68\xba\x96\xa3\x0c\x06\x64\x1c\xc4\xe3\x65\x1c\x14\x54\x0a\x3e\xec\xc8\x27\xfa\x42\xa2\x82\xce\x6f\xc1\x8e\x18\x2b\x3a\xf9\x13\x31\xa5\xae\x0d\x7b\xbd\xd2\xbe\x72\xcb\x09\xf9\xfd\x18\x0c\x66\x2e\x5f\x9d\xb7\x10\x47\x5b\x24\xfa\x51\xa3\x0d\x11\xba\x48\x71\x33\x99\x56\x68\x8c\x38\x64\x63\x8d\x91\x4c\x57\xb7\x9a\x4a\x25\xe2\xd7\x25\x95\xeb\x41\x50\xc3\x1e\xf1\x0f\xea\xf7\xe9\x88\x50\x31\xad\x23\xe2\xd0\x20\xdb\x34\x41\x4b\xa5\x92\xa8\x04\x21\x65\x3a\xa2\x72\x84\x88\x12\x70\xc2\x80\xd6\x34\x62\xaa\x35\x44\x78\x88\xbe\xd3\xb1\x81\x9b\xd5\x15\x44\xb2\x14\xa7\x62\x0c\xcf\x89\x38\xf7\x9e\xc2\xad\xe3\xfe\x1d\x6b\x94\xf8\x90\x3b\x30\x32\xb9\xbe\xb4\x5a\xc4\xd0\x8b\xc8\x1a\xb5\x86\xa9\x4a\xe5\xa0\x47\x55\xab\x67\xc0\x18\xe5\x1c\x88\x95\xb9\xeb\x91\x36\x51\x47\xa9\x93\xa8\x4f\x0e\x16\x5d\x2b\x65\x92\x83\x01\xc9\x97\x73\x7e\x43\xe7\xd9\xa5\x84\x88\xa8\xe0\x45\x75\x27\xd1\x29\xe3\x8a\xea\x0b\xb6\x24\x1f\xff\x91\xcd\x9b\x88\x90\xd2\xa6\x83\x82\xc1\x80\x64\x74\x9e\x9e\xc1\x35\x26\x19\x2f\xb3\x8c\xc9\xa7\x4a\x38\x4d\x21\x59\x74\x33\xca\xa1\xe7\x9e\xde\xe6\xab\x68\xfc\x24\x32\x1b\x6b\xfe\x8c\x91\x91\x47\x4e\xfd\x8d\x29\xed\x83\xb5\x0e\x4b\xae\x75\xbc\xa7\x56\xc9\xe3\x3c\x54\x56\x58\x57\x0e\x92\xac\xd8\x0e\x86\x2f\x49\xcc\xfb\x0b\xde\x5b\xd6\xd6\x58\xdc\x32\x61\x53\x0b\xe8\x7d\x87\xdb\xab\xda\x26\x18\xe2\x5a\xb4\xd3\xed\x79\xb3\x9f\xa7\x69\x5c\x96\xc7\x84\x90\x92\xac\xe3\x8a\x3c\x7c\xb9\x59\xda\x6c\x55\x26\xe7\xc2\x65\xb9\xef\x69\x50\xda\xe3\x63\x9e\xb9\xc6\x08\xc2\xb5\xdf\x00\xd4\x29\x9b\x0d\x69\x38\x3b\x7c\xdc\x6b\xf1\xbb\xdf\xd6\xf0\x1b\xf8\xc9\xfa\xd6\x1a\x3e\x61\xbf\xf1\x75\x6c\x6b\xf8\xb4\xe7\xb3\xf5\x88\x92\xa2\x35\xdc\xda\x64\x3f\x33\x1a\xc4\xad\xe1\xd6\x36\xfb\xcd\x6f\x65\x5b\xc3\xad\x1d\xf6\xb5\xe4\x50\xd0\xc0\x52\x80\x3d\xb9\x3e\xed\x3d\xfd\x2d\xed\xa2\x6a\xae\xa1\x6f\x66\x4d\x84\x2b\x59\xc5\xa8\xc8\x2c\x67\xdb\x16\xe1\xdc\x15\x4d\x8c\xfc\x45\x2b\x2c\x8d\xcc\x9e\x34\xa9\xeb\x16\x76\x47\x25\xc6\x46\x8d\x1a\x45\x57\xe2\xde\xe9\x92\x6c\x27\x5b\xd2\x06\x26\x4c\xd6\xb0\xeb\x2d\x99\xbe\xbb\xb7\x64\xba\xb7\x64\xfa\x6f\xb1\x64\xd2\x0b\xe1\xae\xcc\x99\x9e\x47\xd3\xb7\xcb\xf9\x08\x58\xa1\xe2\xce\xa3\x68\x9a\x40\x62\xff\x17\xc5\xc9\x97\x45\x14\x9b\xf6\x35\xfd\x01\xa4\xf1\x7f\x25\xd8\xd8\x0b\x32\x4e\x93\x49\xe4\x18\x03\xc9\x93\x19\xda\x15\xe0\xec\x02\xdb\x82\x1c\x38\xe7\xd5\x39\x01\x7e\x4f\xe0\xc1\x06\x3b\x67\x31\xbe\xa5\xad\x64\x61\x29\xb0\xb9\x01\xe5\xcc\x43\x86\x63\x0e\x19\xe5\x24\xa1\xd3\xa0\x88\xce\x68\x4f\x72\x22\xb8\x38\x2a\xce\xd3\x76\x4e\xc6\xe9\x7c\x21\xa5\x55\x28\xc5\xe6\x56\x95\x9c\xc4\x69\x50\x44\xc9\x94\x2c\xd2\x28\x29\x7a\xfc\x3a\x94\x91\x7d\x98\x9e\x27\xd6\x99\xce\x54\x93\xb8\xc7\xb7\x2b\x8e\xe5\x2b\x85\xef\x6b\x39\x16\xb6\x94\x12\x4a\x43\x38\x45\x8f\xf4\x1c\x87\x7e\x63\x18\x40\xda\xb5\xb2\xf3\x31\xdb\x35\x18\x30\xd4\x2f\xb9\xb0\x6a\xb7\xcf\xe7\xa2\x33\xee\xbf\xfc\xf8\xea\xd3\xf3\xc3\x9f\xde\x1e\xbf\x79\xfe\xf2\xfd\xa7\xf7\x47\xc7\x6f\x5f\x1c\xbe\xfd\xe9\xd3\x9b\xa3\x17\x2f\xd1\x19\x4e\x69\xe2\x60\x26\xfb\x8b\x20\x7c\x4d\x27\x45\x87\x7f\x15\xe9\xc7\xf3\x34\x3f\x50\x58\x14\x6d\xf6\x8b\x54\x88\x4b\x5b\x4f\xba\x3d\xf2\xe4\xb1\x79\xc3\x83\x77\x4b\x18\x4e\x87\x37\x62\x1a\x60\x98\x13\x2f\x0f\xbf\x25\x38\x7f\xae\xce\xc6\xe6\xa1\x79\x55\x1c\xba\x52\x87\x81\x45\x0f\x42\x8a\xf4\x15\xbd\x90\xe3\xce\x97\xa3\xbc\xc8\x3a\xdb\x08\x7f\xb1\x75\xb5\xcf\x8b\x4b\x2d\xf7\x06\x79\xb2\xd3\x25\x03\x8c\x22\x1b\xdd\xef\xa3\xe9\xac\x10\xc5\x7a\x24\x26\x0f\xbf\x32\x3e\xc5\x0e\x7c\xa7\x68\x2d\x95\xe9\x6e\x8d\x5d\x79\x3c\x33\xd1\xaa\xb4\x73\xbf\xdb\x0c\x58\x6a\x53\xde\x58\xb7\xcf\xd7\xfc\x06\xa9\x9f\xa0\x3a\x4e\xc7\x25\xf9\xf2\x15\xf1\x41\xe6\xdf\x76\xee\x94\x71\x67\xf3\x59\x9b\x64\xe9\xfc\xb8\x98\x3c\xbd\x9f\x38\xcf\xc4\x89\x77\x46\x65\x8c\x4c\xbc\x42\x92\x93\xc6\xbe\x69\x90\xac\xce\xc8\xec\x27\x47\xe5\x73\xd6\xde\xbc\xdd\x5f\x9b\x6c\x88\xea\xc9\x33\x42\xda\x5b\x6d\x32\x24\xed\xcd\xf6\xed\x79\x54\x1d\x26\xd9\x89\x95\x95\xfa\x07\x83\xcb\x09\x13\x8c\xe7\xcb\xb8\x88\xb8\x50\x39\xba\x24\xdb\xff\x99\x33\xf1\x5c\xd9\xd0\x05\xac\xe6\x82\x4e\x69\x56\xb1\x95\xbc\x17\xb5\xd6\xed\xdf\xab\xce\x88\xb0\x65\x2e\x99\x11\x81\x26\x8b\xfa\x18\xd6\x54\x8b\x6a\x73\x8d\xe6\x34\xb7\xb2\xb6\xbb\xfd\x45\x7a\xde\xd9\xda\x7e\xda\xed\x9a\x28\x3d\x98\xd1\xf1\x67\x12\x4d\x0c\x9c\x22\xb1\xc8\x42\x44\x1e\x4d\x13\x1a\x1e\xe6\x6f\x75\xb6\xa3\x88\x56\x75\xcc\xe8\x85\xe8\xb1\x89\x0c\x49\xb4\x70\xe8\x83\xb6\x0b\x53\x12\x4b\xd9\x91\xe5\x3c\x62\x62\x78\x10\xe7\xda\x6a\xd9\x6e\xbd\x16\x5f\x3e\x0c\x49\x76\xb3\xd9\x23\x5b\xdd\x1e\xd9\x7a\x82\xe4\x91\xed\xae\x91\xdb\x25\x7b\x7b\x7b\x8c\x64\xbd\x54\x98\x31\xf6\xf1\x28\x88\xa1\x53\x84\xab\x0e\xf4\x85\x07\x17\x35\x5d\x22\xe2\x8a\x04\x5b\x08\x34\xc8\xc3\xb1\x83\x65\x38\xd3\x82\x61\x45\xbb\x4a\x38\x84\x65\x11\x4d\x09\x97\xd3\x2d\x7a\x53\x5d\x30\xf0\x67\x18\xc5\x32\x60\x3e\x8f\x7b\xbc\x37\x48\x97\xd9\xe9\x92\xab\x2b\xd2\xda\x6c\x09\x1d\xf1\x60\x40\xc6\x8a\x8a\x98\xf0\x2c\x27\x52\xb5\xce\x81\x60\x96\x95\x98\xed\x4a\xd8\xf2\xf2\xd6\x9a\x64\x31\xb1\x1e\xfd\xa3\x67\x72\xf9\x7c\xce\xa3\x64\x69\x2f\x81\xf6\xe4\x96\x7f\x6d\xa8\x5b\x56\xbe\xa5\xee\xc6\x1a\x74\xe8\x06\xe4\xb3\xac\xa6\x9f\xe3\x4a\x02\xf2\x91\x0e\x5d\x89\x76\x44\xf3\x2e\xd5\x1c\xdf\x05\xd9\x7c\x1d\x94\x09\x7e\x5f\x86\x32\x87\x71\xd7\xa2\x0c\x30\x86\xe4\x61\x13\x45\xa2\x39\x17\x45\x0e\x27\xf7\x99\x9b\x5b\x2b\x51\xc0\xf4\xc3\xe8\x2c\x0a\x69\xf8\xfc\xb2\x82\x81\xdf\x84\x9a\x6a\x70\x73\x7c\xd7\xc8\x59\x96\x62\xe7\x78\x65\xf4\x1c\xdf\x06\x3f\xee\x15\x2c\xaf\x5a\xa1\xa8\x4c\xdc\xd2\xaf\xa5\x1b\xe3\x45\x6e\x6b\xe6\x5c\x94\xe2\x48\x34\xed\xa2\xc8\x11\xce\x7c\x18\xf2\x2c\x2f\xd8\xac\x6e\x29\xad\x6d\xb5\xc9\x33\xbe\x2f\x0b\xb7\x18\xab\x61\xb3\xf4\xd8\x88\x1e\xe5\x56\x6c\x7c\x31\x9d\x68\xc4\x31\xf1\xa1\xe2\x60\xe3\xc8\x1d\x49\x30\xa7\xfc\x75\x0f\xfb\x65\xc9\x5f\x02\x86\xd5\xa9\x6a\xf0\x60\xde\x39\x81\x42\x1b\x3d\x82\x35\xe5\xac\x90\x78\x5f\x4d\xf6\x48\xd9\x33\xdd\x87\xdd\x01\x3a\xcf\xe4\xd1\xaf\x82\x27\xe6\x70\x45\x25\xca\x9f\x6c\x9d\x9a\x72\x70\x7b\xf3\x82\xc9\xcb\xee\xe4\xf6\xf3\x38\x1a\x53\x26\x96\x6c\x93\x87\x50\xdd\x8a\x74\x5e\x33\x33\xf8\x08\x7e\x67\x13\xb4\x2a\xfa\x4b\xf5\x00\xce\x26\xa3\xce\x87\x16\x1f\xe0\x88\x13\x37\x60\x36\xe6\x9e\x3c\xee\x8a\x3d\xbc\x48\x05\x7c\x97\x3c\x94\x47\x4a\xdf\x0c\x58\x15\x71\xd1\xf0\xc9\xe3\x9e\x68\x7f\xb5\x29\xa8\x38\x92\xf3\xe1\x7b\xce\xe4\x77\x8a\xfd\x20\x1f\x47\x51\x15\xfe\x3d\x67\xf9\xdf\x10\xf3\x52\xa5\x03\xaa\x81\x66\xf8\x5f\x6d\x02\xb4\x6f\x9a\xb2\x19\xd8\xd7\xde\x6b\x4a\xa6\xa0\x94\xb7\x97\xa0\x5c\x55\xe8\x62\xdb\xe7\xbd\x66\x05\x69\xca\xc0\x5d\x6b\xf3\xa2\x45\x36\x88\x38\xe0\x00\xda\xf9\x6f\x65\x53\xf0\x78\xb3\x47\x70\x52\x99\xc3\x80\x2f\xd2\xee\x03\x1d\x34\x87\xd6\x77\xcf\x86\x81\x15\x3b\x74\x52\x1c\x38\xbc\xc0\x87\x65\x19\x4e\x29\x8e\xcc\xa1\x9b\xe4\xf6\x23\x4d\xe3\xa1\x9d\xe0\x40\x31\x09\x64\x68\x27\x60\x28\x25\x96\x0d\xed\x04\x17\xea\xd8\x01\x3b\xf6\xc2\xe1\x46\x75\x8a\xa7\x3e\x17\xf0\xd8\x0f\x89\x07\xab\x53\x3c\x70\x18\xdb\x28\xc9\x85\xf4\x4d\x8f\x9b\xe3\x96\x33\x27\x08\xa7\xb9\xb0\x82\xea\x87\xde\x75\x77\x2d\xef\x74\xcd\x9b\xa1\xd6\x70\xeb\x69\xaf\x65\xde\x28\xb5\x86\xdb\x60\xbe\x00\x0b\xa3\x35\xdc\xda\xea\xb5\xf0\xbd\x54\x6b\x68\x7e\x5e\x9f\xf6\xb6\x36\x7f\x67\x7f\x2e\x87\xdc\x30\xbe\xc2\x01\x51\x94\x14\x65\xfe\x87\xc4\xd5\x55\x94\x14\xdc\x35\x0b\xfb\xf1\x58\xfd\x3a\xd5\x89\x3b\xe8\xb7\xe5\xb9\x25\x4a\x0a\xee\xb7\x25\x4a\x8a\x27\x8f\x15\xd8\x53\x5d\xd1\xf6\x37\x4f\x4a\xea\x62\xf0\x35\x7e\x8c\xec\xa3\xe1\x57\x74\xc5\x05\xe0\xb6\x0d\xc2\x61\x52\xac\x68\x76\x61\x94\xa8\xb0\xb6\x80\xe6\x2a\x4a\xde\xc8\xb6\x22\x4a\x0a\x29\x2a\x3e\xbb\x91\x3f\x17\xde\xab\x7a\x1b\x88\xad\x46\x21\xec\xee\x8d\x20\xee\x8d\x20\xfe\xbc\x46\x10\x44\x5b\x41\x70\x51\xe9\x8e\x0c\x20\x1a\xd8\x35\xd8\xac\x9e\xdb\x2d\xa4\x60\x8d\xae\xdd\x76\xf4\x3d\x12\xea\xf9\x8c\x26\xea\xb1\x62\x8f\x1b\x7e\x33\x01\x5c\x79\x6f\x90\x92\xe5\xc0\x6b\x18\x61\xe9\xbe\xed\xb7\x89\xc0\x49\xa5\xfc\xc8\xff\xbf\xba\x22\xed\x36\xe2\xb3\xa9\x7c\xb6\xc0\x7f\xec\xa2\x77\x86\x51\x22\x5a\x6f\xec\xee\x63\x4a\x0b\x6c\xef\x0b\xd6\xe3\xed\x5c\xbe\x02\x05\x5e\xc2\x2a\x31\x4c\xdd\xb5\x7c\xcf\x2d\x5d\x4d\x29\x5a\xaa\x99\x74\xad\xb8\x32\xd2\x91\x7d\xec\x1a\xd6\xec\x80\x1e\x6c\xcd\x6e\x37\x52\x69\x87\x06\x26\xfe\xc6\xb1\x03\xdf\x3d\x36\x46\xc6\x38\xa3\x8c\x98\xe4\x7a\x30\x7d\xb2\x70\x72\x0f\xa3\xc9\x84\x82\x35\x32\x47\xb9\x75\x2e\x39\x57\x8f\x42\xf0\x71\x44\xa2\x44\xcc\x92\x34\x5c\x4e\xbc\x87\x10\xf3\xe8\xc2\xb6\x43\x5f\x3f\x82\x05\xe7\x30\xaa\x17\xe5\xa8\x3c\xf7\x3f\x98\x35\xe9\xae\xf4\x4a\x4f\x13\xa4\x22\xd5\x55\x30\x9a\xce\x47\x51\xe2\xba\xb7\x29\xd2\x29\x65\xdc\x9d\xd5\x40\xa7\x7d\xbe\xa8\x82\xc5\x82\x26\xb0\x96\x82\x84\x3f\x80\xb0\xb0\x2b\x6a\xab\xbb\x84\x11\x8c\x69\x16\x8d\x19\x7b\x92\xbd\xaa\x2f\x2c\x6e\x4f\xd3\x89\x80\x85\x7d\xa8\x12\xb5\x72\x78\x75\x7a\xbf\x2a\xb4\x2a\xbd\x05\xbf\x32\xd9\x25\xf5\xd8\x1d\x07\x71\x2c\xf0\x2b\xef\x70\xf8\x88\x66\x81\x5e\xba\x79\xf4\xab\xf0\x2c\x08\x77\x75\xb3\x20\xef\xb1\xff\x25\xa1\x81\xef\x5f\xcf\xa5\x1d\xc6\xb7\x32\x04\xf5\xeb\x4c\x2b\x51\xe3\x77\xcd\xe4\x5b\xb8\x62\x55\xac\xef\xed\x81\x74\x31\x89\x12\xeb\xa1\x52\x1d\x12\xb4\xcb\x22\x51\x95\xb8\x5e\xb6\x95\x06\x3c\x77\x3f\x7f\x5e\x7e\xf4\xe7\x1a\x5f\x57\x43\xd3\x60\x99\x19\xb5\x57\x0d\x7a\x1d\x46\xad\xdf\xff\x77\xc9\x33\xd2\x6e\x93\x61\x33\x6b\x2c\x84\x32\xaf\x4d\xd6\x0a\x78\x63\xbc\x9f\x2b\x27\x94\xcc\xe8\x7b\xeb\xa5\xf5\x17\x7e\x9c\xc9\xbd\x47\x5e\x09\x07\x98\xe1\x07\x73\x4c\x64\x40\xe2\x95\x58\xd4\x8d\x79\x51\x08\x4e\x95\x6c\xfc\xf9\x9c\x33\xa9\xe5\xb5\x4b\xf8\x95\x1f\x29\xa1\x3b\x31\x61\x9d\xd5\x51\x67\x6c\x6b\x25\xb8\x43\x9b\x92\x1f\x79\x32\x21\x90\x37\xf0\x0d\xb0\x48\xe7\x8b\xe2\x12\xab\x04\x1b\x6c\xa2\xb5\xab\xd0\xa4\x47\xc4\x9e\x86\x20\x7d\xac\x80\x1b\xe9\x6e\xaa\xd4\xd1\x94\x17\x13\x95\x03\x11\x55\xd6\x8d\xc1\xb8\x58\xd9\xf0\x88\x05\x37\x19\x87\x7e\x89\x57\xee\x1c\xea\x75\x94\x17\xce\xb3\xbf\x13\x63\x34\xa7\x1e\x8f\x50\x95\xa3\xd7\x35\xbb\xdb\x8b\x7a\x14\x24\xaf\xe9\x97\x8b\x90\x9b\xb5\x8a\x47\x70\x4a\x15\x59\xa4\x05\x7a\xe8\xca\x0b\x4b\xe1\x88\x3b\x1d\x22\xc6\xc3\x3e\xf5\x7e\x50\x80\x9a\x6f\x8a\x8c\xbd\x4d\xad\x47\xbe\x7d\x95\x2c\x48\xfb\xf6\xcb\xf6\x14\x62\x36\x4f\xf6\x70\x8f\x35\x2c\x1e\xc6\xc6\x9e\xab\xe8\x17\x4f\xb5\xdc\xe7\x59\x1c\x52\x8b\x40\x9d\x14\x3f\xb9\x55\x4f\xe6\x06\x03\x39\xdd\xf4\x8c\x66\x97\xc5\x0c\x1c\x91\xa0\x7a\x30\x76\x5c\xaf\x53\xd2\x1c\xcd\xc1\x8f\xf1\x4c\xd7\x7f\x43\xa1\x1c\x2f\xdd\x69\x13\xae\xd2\xf9\xba\x47\xda\x6d\xa9\x7c\xaf\x50\x52\xbc\xe3\xb3\x64\xe9\xf4\x94\xfa\xee\xfa\xb4\xb7\xd5\x28\xd0\xde\x57\xd4\xc9\xc1\x6d\x74\xb5\x52\x2e\x63\x20\x25\x5a\x39\x69\x63\xc6\xfe\xe7\xaa\x32\xf8\xf5\x58\xff\x3c\x45\xc9\x3b\xf8\xc3\xd2\xcd\xb1\x34\xae\x9c\x63\xbf\xa4\x76\x8e\xfd\x7e\x8a\xaa\x43\xfa\x39\xa7\xc6\x06\x1a\x3a\xe7\xee\x7d\x15\x15\x1d\x2b\xbc\x8a\x8e\x8e\xc3\xdb\x4a\x3a\x96\xba\xa2\x96\xce\x2c\x52\xa1\xa6\xe3\x2d\x56\x95\xbd\x89\xa2\x8e\xe1\xb6\x44\x51\xd7\xcc\x4b\xbe\xe8\x56\x03\x45\x5d\xa3\x50\x5e\x5f\xeb\x65\x9d\xe7\xf6\x6f\x15\xf2\xe0\xc5\x57\x21\x10\x59\xc2\x26\x11\x9e\xbe\x22\x91\xd8\x85\x2a\xc8\x44\xb6\x5b\x5d\xfe\x46\x3a\x5d\x2e\x49\x35\x79\x30\xe7\x69\xef\x6e\x9f\xca\xa9\x51\x36\xa0\xbb\xbb\x0f\x3d\x52\xf9\x78\xc7\xc3\x87\x91\x7f\xdb\x28\x6f\xee\xd8\x76\x4c\xb3\x22\x88\x12\xbf\x73\x5b\x07\x91\xfc\x36\xa9\x86\xa8\x39\x50\xdf\x4c\xaf\x26\x6b\x51\xc4\xca\xa8\x75\x05\x51\xd0\x6c\xce\x8e\xfc\xd1\x04\x6a\x36\xfb\x1d\x0a\x97\xb5\x64\x1a\x9d\xd1\x44\x9a\xb4\x98\x47\xea\x32\x5f\xb9\x96\xfd\x0b\x3f\x66\x6b\x73\x5b\xc0\x32\xaf\xdc\x69\xd7\x6f\x7c\x8b\x21\x9a\x2f\x11\xee\x99\xb6\x55\x78\x85\xe3\xf4\x8c\x66\xd9\x79\x16\x15\x05\x05\x73\x2f\xde\xab\x16\xd9\x80\xde\x37\xc6\xdd\x39\x68\xd9\x73\xfc\x8a\x1f\xac\x20\xf4\x51\x34\x4a\x04\x0a\x0b\xd7\xe9\xb0\xfd\xd0\xbe\x11\x32\x5d\xad\xa4\xd5\x9c\xd6\xda\x96\xe0\xcd\xe3\x3f\xc0\x8f\xc1\xc1\x00\x54\xe1\xc1\x9c\xad\x0a\x70\x79\x28\xb4\x59\x6c\xbc\x8c\x13\x50\x7e\xc7\x10\x47\x9f\x29\x09\x48\x1e\x25\xd3\x98\x2a\x27\x5c\x00\xd9\x37\xec\xa1\x81\x82\xb9\x8f\x19\xee\x93\x83\xb7\x76\x75\x45\x4e\xda\x27\x5b\xa7\xed\xd3\xae\x12\x06\x6b\x7c\x00\x88\xee\x99\x78\x67\x5f\xd8\xaf\x61\x89\xe8\xce\x6d\xa0\x38\x2a\xc0\x56\x61\xab\x47\x1e\x81\x31\xf6\x26\xf4\x65\x0b\x7b\xa1\xd1\x1d\x72\x04\x59\xe9\xa5\xa1\x27\xfd\x3a\x94\x9d\x16\xa4\x37\x87\x87\x12\x50\x37\x30\x18\x90\x20\x8e\xc9\x28\xc8\xa3\x31\x77\x7e\x00\x2f\x05\x76\xb6\x85\x02\x27\x4e\xd9\xc9\x58\xf6\xa6\x47\x76\xb6\xeb\x8c\x4e\xcc\x85\x2d\x38\x9a\x3c\x81\x4b\x5d\x24\xa1\x53\x10\x20\x21\x22\xd4\xc9\x69\x8b\xec\xfd\x00\xeb\x53\xa7\x3d\xe6\x89\x95\xca\xb4\x7d\x59\xdb\xaa\x1c\x60\x46\x4b\x7b\x56\xb1\xda\x71\xab\xa5\x34\xab\x7d\x7e\x19\xde\x60\x1c\xa2\xdb\xb5\xb6\x51\x54\xe4\xc1\x03\x82\xbf\x4f\xd0\x6f\xe4\xff\xed\x54\xee\xba\x2a\x2c\xc6\x60\x7a\xa3\xb9\x11\xcb\xb7\x6a\x6a\xe4\x2c\x98\x73\x23\x26\xcc\x9c\x1a\xe4\x6e\xed\x96\x33\x63\xf5\xab\x62\x62\x50\x9b\x5f\x7b\x5e\xee\x72\x62\x4c\xbf\x27\x9a\x91\xa2\x99\x80\xb3\x51\x0b\x6c\x11\xb6\x39\xd2\xf9\x21\xa9\x25\x8c\x15\xb6\xc4\x54\x6c\x3d\x56\x80\xdb\xa7\x27\x3b\x02\x54\xa6\x71\x10\x05\xb1\x75\x6a\x25\xe8\x6f\x77\x77\x00\xac\xde\x60\x7b\xc0\x63\x11\x43\xac\xdf\x13\x50\x63\x77\x34\x91\xd1\x84\x74\x50\x16\xe2\x90\x36\x3f\xbe\xe1\xc4\x02\xc3\xf6\xbd\x86\xd8\xaa\x98\x72\xb1\x49\xc8\x53\xb5\x6f\x9e\x61\xde\x7c\x53\xdd\x52\xc1\xf7\x9c\x09\x17\x9f\x2d\x63\xde\x8d\x8a\x4e\xcc\xca\xf1\x74\x6b\xd7\x6b\x8d\xe6\x59\x65\xf0\xa1\x88\xfc\xd2\xf9\x35\x5c\x28\x96\xee\xf6\xc2\x55\x51\x1c\xe4\x05\x39\x39\x65\xc2\x04\xaf\xf7\x46\xd3\xbe\xee\x9f\x77\x35\x07\x20\x67\x11\xc7\xc1\x12\x1c\x68\xf4\x33\x28\xf8\x54\x34\xd0\x84\x48\x2a\x8c\x63\xd1\x11\x46\x71\x60\xfb\xa6\x89\x8c\x2e\x49\x48\x27\xc1\x32\x06\x45\x68\xbe\x64\x72\xaa\xda\x98\x5b\xc2\x47\x4d\x4f\xc4\x78\xb4\x67\xd1\x38\x46\xdd\x80\x01\xeb\x1d\x71\x45\x51\xb8\xe1\xe9\xad\xd4\xa8\x5e\x3a\x6a\x97\x3a\x62\xb4\x44\x72\x7b\x8d\x00\xc5\x0b\x52\x3e\x69\x31\x8a\xef\x91\x16\x5b\x04\xec\xbf\xd3\xd6\xa9\xa6\x76\x01\x81\xd2\xa0\x50\xb2\x8c\xed\x67\x0f\x68\x36\x1b\xa1\xcd\xf6\x2e\x67\xf5\xb7\x66\x21\xb8\x1e\xaa\x9c\x95\xc0\xf7\x06\xe1\x29\x8f\xcf\x7a\x0e\x37\xbc\x6c\x38\xc6\x78\xd9\xbf\xb0\xea\x2d\x22\x16\xdc\xaa\xf3\xef\x13\x7e\x1a\xff\xf7\x69\xb7\x5e\x44\x10\xca\x5b\xe5\xea\xa1\xfc\xde\xc1\x8a\x61\x21\xa1\x9b\xb3\x0e\xf9\xf0\xd4\xbd\xcb\xb2\x70\xe6\xb9\xb4\x10\xf7\xe8\xf6\xc6\xe0\x75\x46\x6d\xde\xca\x08\x3f\xa8\xd2\x03\xaa\xcd\x16\x6a\x5c\xc1\x2a\xfb\x6f\x6c\x4c\xbc\x4b\x4a\xff\xfc\x5e\x51\x5d\xa7\xb2\x34\x9e\x60\x67\xb2\x82\x95\x39\x85\xd4\xb3\xe4\x93\x53\x9f\x07\xf1\xfe\x62\x99\xcf\x3a\x8e\x5b\x52\xf9\x4c\x5b\xfa\x18\x75\x6b\x66\x63\x71\x1d\xae\x9f\xf9\xbc\x7f\xe2\x96\x90\x13\xcf\xce\x59\x8f\x60\xe7\xb2\x96\x6f\xd2\x5b\x79\xf4\x15\x13\x88\x3d\xf9\xde\x7a\xfe\xa0\xeb\x8e\xd4\x21\x10\xff\xdb\xcf\x9f\xcf\x1d\x6b\x8d\x1b\xd6\xd2\x89\x60\xb3\x09\x7e\x52\x2b\xe6\x63\xe5\xd9\x58\x73\xee\x08\x2d\xdd\x91\xb1\x24\x91\x3b\xdb\x26\x0e\x41\xf9\xfd\xe8\x24\x4b\xe7\x5e\x73\x03\x0e\xe5\xe3\x2d\x23\xfb\xc1\x8e\x65\x20\x64\x58\x06\xad\xf0\x60\x4a\x32\x35\xde\x72\x03\x16\x25\x06\x82\x59\x94\xe1\x4c\xb3\x86\x55\x7d\x15\x5e\x05\x7b\x13\xbe\xb1\xe4\x82\xae\x78\xe2\x03\xdd\x93\x82\x8e\x40\xd7\x43\xb2\x0d\xc6\x0f\x5d\xe9\xce\x59\x20\xaf\x6c\x11\x55\xd6\x89\x9b\x77\x2a\xf6\xad\x28\x28\xf0\xa1\xe0\x77\xec\xb8\xf4\x06\xd9\xe1\x1e\xef\xf9\x6e\x9b\x33\x90\x9c\x04\x93\x82\x66\x6a\x91\xe0\xfe\xde\x68\xad\xfa\xcb\xf8\x1c\x77\x6b\xce\x51\xe2\xb0\x9b\x54\x62\x4f\xc4\x8d\x79\x5b\x56\x3f\x76\xea\x51\xea\x43\xda\x0e\x78\x53\xc9\x68\x1a\x72\x1a\xf2\xb0\xba\x6f\x0c\x76\x63\xaf\x1a\xa6\x11\xa3\x32\xbd\xcd\xa2\x69\xdf\x20\xd1\xdd\x72\xad\x3f\xc4\x1e\x82\xff\x1a\x52\xbf\x34\x48\x6d\xf8\xf7\x87\x22\xfe\x7b\xda\x47\x7f\xbf\x0b\xed\x13\x2f\xe9\xe3\xe8\x8c\x37\x25\x7d\x3b\x86\xd8\x8a\x9b\x8a\x43\xac\x76\xfd\xcd\x76\x16\xb3\x17\xab\xd4\x2f\xe6\xcf\x4b\x6f\xb1\x43\x5f\xfe\xf5\x57\xbe\x84\x17\xe2\xd6\xcf\x35\x52\xad\xeb\x7e\x87\x6c\x91\x0d\xb3\x77\x5d\xee\x90\x89\x87\x11\xf3\x4c\x3d\x77\x3f\x6c\x5d\xba\x19\x0f\xb6\x2b\x9c\xd9\x1b\xb8\xb6\x2c\xbe\x0c\x2e\xb6\xb6\xe2\xd8\xf0\x9c\xab\x95\xb5\xdd\x35\xd5\xaa\xde\x8b\x44\xab\xeb\xb5\x17\xbc\xe5\x57\xbb\xea\x4d\xdc\xf5\x69\x6f\xeb\xf7\x8e\xbb\x7f\x5c\xff\xec\x6d\x59\xf1\xee\x4d\x78\x22\x81\xff\xb9\xad\xcb\x52\x3f\x7d\x5b\xa2\xb7\x6f\x4b\xfc\x60\x6d\xe9\x79\xfd\xb6\x54\xcf\xdf\x96\xe8\xfd\xdb\x12\x3d\x80\x5b\x9a\x2f\xe0\x9c\x1a\x1b\x58\xd8\x38\xfe\x51\xbe\xe2\x23\xb8\x63\xef\x2b\xb8\xe3\xd5\x9f\xc1\x1d\x37\x7d\x07\x77\xec\x3e\x84\x3b\xbe\x83\x97\x70\xcb\x5b\x3f\x85\x3b\x6e\xfc\x16\xee\xf7\x0e\xea\x7f\xdc\xc0\xe2\x6c\x59\x65\x72\x26\x5d\xab\xf0\x1f\x82\x38\x91\xd5\xd9\x12\x9b\x9d\x2d\x0d\x2b\xb1\xa5\xcf\xf0\x6c\xa9\x2d\xcf\x96\xd8\xf4\x6c\x89\x6d\xcf\x96\x96\xf1\x99\xa7\xde\x26\x8b\xe3\x37\xb5\x3f\x3b\xf6\x1b\xa0\x1d\xdf\xc0\x02\xed\xb8\xb1\x09\xda\xb1\xc7\x06\xcd\x2e\x7d\xb3\x35\x52\x61\x86\xd6\x74\x91\x34\x37\x44\xfb\xb6\xc9\x2a\x69\x2f\x73\x0a\x8a\xd9\x71\xd1\xe6\xd1\xf8\xa6\x29\xa1\xc9\x19\x09\x53\x0a\xd6\x0a\xf0\x3a\x30\x48\x42\x70\x60\x4b\xfe\xf9\xe6\xf5\xab\xa2\x58\xbc\xa7\xff\x6f\x49\xf3\x62\x0d\x04\xb3\xcb\x05\x4d\x27\x56\x0e\xf7\x63\xa3\xde\x6f\xb4\x25\x5e\x44\xc3\x7d\x1b\x9a\x7c\xb9\xde\x5d\x33\x22\x45\x96\x42\x9a\x09\x20\xa9\xff\x92\xcf\xd8\xee\x13\x4d\x93\x34\xa3\xc3\x38\x4a\xe8\xda\x35\xb7\x58\x65\x78\x68\xe4\xea\xfe\xfe\xe5\xec\xfd\xcb\xd9\x3f\xf1\xcb\x59\xfe\x6a\x56\xd8\xb0\x19\xcf\x66\xf9\x86\x43\x6e\xf6\x7a\x56\xec\x7d\xc7\x45\x14\x43\x9d\x5c\x9f\x09\x6b\x87\x3f\x4f\x72\xc0\xa2\xe2\x52\xb1\x44\x5d\x64\x1c\x07\x79\x4e\x4e\xa0\xc8\xa9\xe8\x26\xcf\xd0\x4c\x98\x57\xb5\x36\x80\x7b\x23\x58\xa5\x42\xb9\xca\x38\x08\xa9\xf0\x64\xdd\xdc\xc9\x39\x40\xb2\x9a\x8e\xdf\x1e\x7e\xfc\xc0\xce\xd6\x30\x09\xed\x73\x1a\xb5\x39\x69\xb6\x3f\xa3\xdf\x6f\xd0\xef\x9f\xd0\xef\xfc\xd7\x60\x94\xca\x8f\x49\x94\x24\xf4\x52\x7d\xd1\x79\x91\xc2\x53\x46\x99\xb2\x88\xc6\x66\x42\x12\x24\x66\xc2\x3c\x1a\x67\x76\x4a\x1c\x47\x4e\x21\x03\xde\x00\x95\x1f\x46\x91\x69\x16\x24\xa1\x1a\x8a\x91\xf5\x93\xf1\xf5\xd1\xf8\x7a\x67\x7c\xbd\x34\xbe\xfe\xcf\xf8\xfa\x97\xf1\xf5\xd6\xf8\x7a\x61\x7c\xfd\xc3\xf8\x3a\xe6\x5f\x6b\xa7\xe5\xae\x6b\xd8\x1c\xbd\xdb\x7f\xc1\xa6\x78\x48\x76\xb6\x7b\x2a\xf1\xc3\xe1\x4f\x6f\xf7\x3f\x1e\xbf\x7f\xf9\xe9\xf5\xcb\xb7\x3f\x7d\x7c\x35\x24\x8f\x75\x26\xcc\xea\x50\xff\xd4\x39\x25\x94\x33\x24\x5f\x88\x95\xa0\x9d\xa8\x43\xc6\xa7\x17\x47\x3f\xbf\x25\xd7\xba\xa6\x77\x47\xaf\x5f\x33\xe8\x8f\x87\x6f\x5e\x1e\x1d\x7f\x1c\x92\xad\xcd\xcd\xcd\x81\xe8\xa1\xb8\xf1\x7e\x1e\xa7\xe3\xcf\x43\xd2\x66\xac\x33\x2f\xda\x46\xde\xfe\x18\xe2\x18\x0f\xf5\xdb\x46\xfe\x00\x83\xed\xe7\x75\xbe\x4f\xee\xe3\x60\xdc\x6f\x64\x7f\xf5\x8d\x6c\x4d\xb9\x80\xc8\x67\xc1\xce\x5d\x79\x80\x38\xc8\x2e\x17\x45\xfa\xf7\x0f\x78\x73\x18\x43\xda\x23\x1d\xfe\x82\x35\xe8\x05\x18\xb0\x9c\xb6\x37\xb4\x93\xeb\xbe\x01\x28\x2e\xc7\x0f\x54\x45\x12\x79\xf0\x40\xe6\xf6\xa5\xbf\x08\x2e\x26\xcf\xe8\x45\xdb\x7e\x45\x67\x78\xfe\xfa\x81\x6c\xb3\xd2\xb6\xeb\xe3\x6d\xe9\x2e\xd2\x2c\x4e\xe4\x65\xb8\xba\xe0\xb7\x9c\xb3\x13\xeb\xb5\x1d\x07\x95\x38\x62\x9d\xeb\xbf\xa2\x17\x7d\xd0\x5e\x0a\xcf\xbd\x3e\x1b\x23\x86\x15\x39\x6c\xdd\x3a\x3f\xd1\x71\xf5\xdb\x90\x6c\x7f\xf3\x84\x97\x44\x8f\x93\xe5\x9b\x33\xc6\xf2\x14\x8e\x5b\xc3\x6f\xbe\xeb\xb5\x4c\x94\xb7\x86\x4f\x37\xaf\x4f\x7b\xdb\x8d\x7c\x3e\xdd\xf3\xbd\x7b\xbe\xf7\xe7\xe5\x7b\x9a\xed\xf1\x77\xfe\x77\xc0\xf7\x2c\xd9\x7d\x75\xd1\xdd\x23\xb9\xcb\x82\x3e\xc1\x7d\xa5\x50\x43\x36\xaf\xed\x0f\x04\xbb\xd7\xb1\x88\x26\x4f\x31\x00\xfb\x56\x22\xfc\x32\x89\x8a\x37\xc1\x42\x89\x8b\x6d\x29\x51\x0f\x39\x0f\x6a\x6f\x4a\x59\x93\x49\xed\x43\xcd\x16\xdb\x5b\x86\x9c\x3f\x44\x19\x9b\x9b\xaa\xd0\xff\x56\xe4\x8d\x82\xd1\x28\x98\x52\xd5\x12\xce\x43\xc2\xff\xd0\xce\x9b\x7b\xea\x44\xd9\x6f\xaa\xb3\xe3\xf4\x8c\xc6\xc1\x58\x36\x6b\x67\xeb\x33\xc6\xd0\x97\x3d\xf5\x57\x8e\x20\x7e\xaa\x85\xc8\x67\x41\x92\xa4\x89\x31\x6e\x13\x42\x9f\x6b\x86\x15\x10\x35\xad\xc0\xc9\x6a\xe8\x81\xc0\xa8\xd4\xe7\xa5\x61\x35\x50\x5d\x4d\xe2\xec\x36\xf4\x02\x19\x95\xa9\xf3\x98\x3d\x36\x0f\xa0\x7f\x88\x26\xa0\x41\xae\x1e\x38\x04\xfa\xd9\x84\xf5\x81\xe2\xb9\x86\x53\x5f\x65\xc5\xb8\xbf\x8d\xea\xc6\xd5\x37\x2d\x80\xca\x14\x2b\x94\x61\xc5\xfc\xc6\x56\xda\x11\xc3\x22\x08\x85\x29\x29\x98\x7a\x5e\x2c\xe8\x98\x6d\x5e\xca\x3c\x1f\x1b\x5d\x09\xef\x29\x3e\xcb\x29\x5d\xc5\x88\x32\xb8\x50\x84\xe3\xb2\x6c\xb0\xc6\xb3\x20\x0b\xc6\x05\xcd\x72\xa9\xe2\x87\x7b\x79\x51\x1a\xed\x23\xde\x36\xa2\x69\xd2\x43\xb6\xd0\x64\x73\xcd\xef\xf6\x23\x9a\xce\x0a\x22\x3d\xd2\x5a\xde\x7d\xc5\x18\x0c\x69\x93\x83\xf4\xa0\x77\x79\x0f\xda\xf1\xf8\x18\xe2\x16\x22\x00\x03\x11\x69\xe1\xb5\xaa\xba\x21\xde\xea\xf6\x7f\x49\xa3\x04\x82\x35\x90\x67\x50\x07\x19\x92\xd6\x66\xab\x4b\x36\x04\x70\x89\xe1\xdb\x8d\xe7\x02\xa2\xf5\xfc\xd9\x27\x03\x06\xb1\xe2\x6c\x88\x1e\x6e\x70\x8f\xcb\x37\x9d\x97\x32\x43\x44\xd3\x11\x0d\x6c\x9d\x60\x86\x08\x91\x3c\x5c\x1f\xd3\xd6\xbc\x70\x6f\xcd\x15\xb3\x12\x25\xac\x12\x3f\xb2\xb0\x3f\x6a\x8f\xa3\x24\xd6\xb8\x36\x3b\xe4\x1e\x48\x8e\xf9\xd6\xae\x44\xfa\x19\x0f\xf6\x3c\x18\x90\x1f\xa3\x24\x24\xfc\x71\x97\xe8\xa8\x0a\xd6\xcc\x24\x8a\x56\x4b\xdf\xe4\x83\xed\x4b\x0f\xe2\x47\xcd\xe8\x85\x34\x61\x56\x67\x2e\x96\xc6\x4f\x3d\xec\xc4\x51\x7e\x56\x62\xd5\x6c\xe3\x77\x2f\x60\x5c\x23\x6c\x6a\x76\x49\xb4\xb1\xb7\x8d\xc1\x65\x20\x64\x6c\xdb\xa1\x9b\xea\x44\xac\x1d\x11\xfa\x42\xb5\x30\x21\x1d\x5e\x64\x6f\x8f\x6c\x76\x8d\x53\xda\x28\xa3\xc1\x67\x0d\xca\x46\xb9\xb1\x47\xc4\xab\x72\x36\x83\x07\xb3\x20\x3b\x48\x43\x0a\x35\x78\x0f\x61\x6c\xb2\xa5\x39\x4e\x5e\x64\xcd\x28\x84\x4f\xda\x4a\x24\xb2\xcf\x8a\xfc\x76\x34\x02\xcd\xfd\xf7\x10\xc9\x4d\x66\x3e\x2f\xca\x5e\xa7\x9b\x93\xed\xf1\x31\xdf\x59\x64\x74\x12\x5d\xf0\x08\x5a\x9b\x17\x5d\x36\x0b\xc0\x35\xfc\xee\xed\x45\xa8\xb7\xf2\xd9\xf7\xda\x2e\xc3\x11\x34\x88\x81\x9b\x57\x06\x13\xf0\x85\xf8\x34\x7c\xed\x0b\xb7\xeb\xa2\x1b\x98\x2a\x18\xc5\x0b\xcc\xf3\xd9\x87\xe5\x20\xcc\xb6\xf9\x72\x90\x33\xc2\x5a\xd2\xd4\x31\x49\x33\xdb\x84\x2e\x2f\xb2\xb2\x70\xf8\x68\x46\x19\xd4\x58\xcc\xcd\x7e\xd1\x89\x6e\xb6\xd2\xc1\x3a\x51\x44\x06\x37\xbc\xb6\x69\x10\xd6\xdf\x8d\x3d\x92\xc8\x7d\xe1\x7b\xb2\x4d\x9e\xb1\x93\x0d\xd9\x20\x6c\x3f\x48\x7c\x34\x21\x5c\xc8\xcf\xe8\xc5\x5d\x92\x86\x15\x73\xc0\xa6\x8d\x1a\xd6\xf0\x9b\x11\x87\xc3\x33\x10\x75\xfc\x36\x14\xf0\xbb\x4d\xab\xe5\xb1\x74\xb2\x8c\x63\x85\x86\x01\x3d\xa3\x49\xc1\x1f\x0a\x00\xcb\xff\x25\x4f\x13\x12\x8c\x22\x9b\xc7\x4b\xb7\x89\x1f\xd3\x1f\x97\x71\x6c\xbf\xa1\x94\x8f\x09\x58\xe9\x47\xbc\xb4\xfb\x18\x8a\x37\xec\xb4\xab\x19\xbb\xdb\x86\x21\x48\xb1\xca\xb1\xea\x94\x7d\xf7\xc1\x84\x22\x4a\x42\x7a\x71\x34\xe9\xb4\x3b\xed\x2e\xf8\x86\x7c\xb4\xe5\x79\x0e\xa9\xe0\x1d\x3b\xc1\xe2\x72\x41\x45\x73\x00\x04\x54\x64\xfa\x33\xeb\x44\xdd\x2f\x32\x7e\x70\x9f\xc1\xef\x92\x6b\x21\x8a\x99\x96\x7f\xaa\x15\xb2\x41\xda\x1d\x36\x73\xaa\xf6\x0d\xd2\xee\xb6\x1b\xad\xbd\x30\xca\x17\x71\x70\xc9\xe7\x05\x7c\x8c\x26\x05\x93\x6d\x15\x36\xec\x37\x6b\x17\x90\xfd\x82\x17\xab\x7a\xe1\xca\x6a\x33\x27\xdf\xbf\xbc\x8c\x1e\xb0\x2d\xcd\xa2\x18\x3a\xed\xcb\x60\x8b\x97\x1d\x61\x56\xd7\x25\x8f\x7e\x50\x89\x6a\x5a\xdd\xbe\x55\x3e\x7c\x56\x36\x9b\xce\xcc\x1a\x68\x16\x60\x7c\xb2\xc9\x33\xfb\x4d\xab\x78\x0f\xc6\xd6\x8c\x76\x36\x32\x18\xe8\x81\xa6\x67\x34\x8b\xd3\x20\xa4\xa1\x52\x04\x7b\xd6\x04\x1e\xc0\x47\x4d\x24\x65\x6f\x1a\x07\xe4\xe3\xd1\x8b\xa3\x21\x99\x07\x9f\x41\x35\x1c\x25\x67\xcb\x38\xa1\x59\x30\x8a\xe9\x5d\x0e\x50\x9f\x06\xec\xd7\xbb\x5b\xe4\x11\x41\xd9\xdd\x6e\x3f\xa3\x8b\x38\x18\xd3\x4e\x9b\xb4\xc1\xa9\x1b\x3b\x2d\xb4\xcc\x08\x91\x69\x72\x46\xb3\x22\xd7\xf1\x36\x41\xee\x0b\xe9\x38\x9a\x07\xb1\xcd\x64\xa3\xc4\xcf\xec\x8b\xf4\x05\x2f\xe0\x52\x5e\x65\xec\x4c\xd3\xad\x21\x17\xf0\x44\x4d\xb5\xd1\x1f\x8b\xd4\x0d\x8e\xa9\xc2\xcf\x34\x19\x63\xad\x6c\xcb\x78\xe2\x5d\x8d\x0b\xd5\x55\x1d\x99\x35\x91\x5a\x52\x77\x7c\x9e\xb8\xdc\x42\x7d\x6a\xee\x28\xc6\x61\x9f\x03\xc4\x34\xcf\x3f\xce\x82\xa4\xb3\x09\x4e\x64\x1f\x71\xab\x73\x61\xbd\x2f\x08\x6b\xab\x0b\xb1\x5b\x51\x8e\x81\xc5\xfd\x25\xb8\x69\x16\xa8\x0c\x92\x4b\xe1\x78\x47\xb8\x23\x4d\xca\xd1\xda\x17\x78\xdd\x4f\x42\xae\xfe\xe7\x34\x14\x4d\x2e\x73\xe1\x48\x3d\x27\x23\x3a\x49\x33\xda\x77\xe8\xea\x95\x38\x3a\x54\xe3\xfe\x4a\xec\x41\x35\xa4\xf5\x0a\xf6\x79\x03\xf9\x6a\xfd\x3e\x14\xa6\x62\xf3\xe0\x82\x87\xad\xbc\x88\x8a\xcb\x21\x79\x0a\x2a\x6c\xb9\xeb\x44\xb9\x70\x69\x0c\x45\xbb\xf6\x26\x83\x26\xb9\xb3\xc1\x20\x76\x8d\xa2\x78\x3a\xab\x0b\x5b\x65\x85\x21\xdd\x19\xa3\x1d\x76\x0a\xe1\x48\x6b\x7b\xab\x80\xf8\x4a\x7f\xff\x70\xf4\xb6\xaf\xb0\xcc\xdb\xd3\x0e\x2c\xc1\x75\x6c\x4e\x02\x3b\x94\x67\x8f\x2c\x82\x3c\x67\xbc\xab\x98\x65\xe9\x72\x3a\x33\x57\x80\x1a\x88\xa0\x35\xa8\xd5\xbd\x9c\xd4\x5c\xed\x11\x9c\x96\x3c\x32\x6f\xe9\x88\x25\x80\x78\xdb\x61\x56\x57\x53\xdb\x99\xb4\x1f\x45\x15\x90\xce\x7a\x94\xff\x18\x25\x51\x41\x2d\xa4\x5b\xdd\x00\x09\x11\x75\xc2\x94\xb2\xdc\x8e\xa2\x75\xf1\x5e\x6c\x2a\x7c\x1d\xb0\xf3\x52\x02\xdc\x9f\xfc\x4c\x6d\x41\x6a\x4a\x0b\x08\x57\x7c\x34\x39\x4e\x22\xaf\xb6\x0b\xca\x16\x33\x2a\x7e\xa8\x05\x47\x8a\xb4\xa7\xb4\x53\xca\x21\xba\x37\x6a\xa3\xea\x87\xaa\xa6\xc3\x3b\xd3\x85\x22\xe0\xb6\x2b\x27\x34\xcb\xd2\x4c\xba\xa4\xe1\x3d\xce\x49\x92\x16\x64\x9c\x66\x19\x1d\x17\xc3\x73\xb5\x6e\xcc\x5e\x1b\x0b\x88\x15\x94\x24\xb0\xe4\x99\xf0\xdf\x33\xf8\xaf\x5f\xa4\xaf\xd3\x73\x9a\x1d\x04\x39\xed\x00\x73\xe1\xfa\x5e\xcd\xc7\x18\xd4\x3f\xc4\x2d\xb3\xb8\xba\x39\x61\xff\x9f\xea\xa3\x38\x02\xc1\x7e\xbf\x31\xe1\x71\x4f\x64\x09\x3d\x27\x2f\xd9\xa8\x3a\x6d\xb8\xea\x85\x8e\x80\xad\xea\xbf\xdb\x05\xa1\x17\x51\x5e\xe4\x3d\xb2\x88\x69\x90\x83\x58\x0c\x23\x4f\x13\x85\xaa\x49\x1a\xc7\xe9\x79\x94\x4c\xa1\x64\xce\xb8\xa0\xb5\x8c\x44\x0f\x7b\xe0\x5f\xa1\xa7\x9f\x7d\x54\x44\x89\x55\xbd\x07\xef\x57\xa6\x57\xe1\xe0\x33\x85\x45\xc8\x19\x3e\x5c\x46\x47\x60\x4f\xab\x98\x2c\x27\x01\xc6\x6a\xc1\x57\x05\x9f\x78\x8e\x5a\x41\x59\xef\xd2\x3c\x8f\x46\x31\x9f\x42\x70\xa1\x21\x8c\xfa\x3e\x1c\x32\xf9\x32\x2b\xf8\x4f\x26\x52\x4b\x6c\xbd\x9c\x4c\xa2\xe9\xa5\xf8\x38\x92\xa4\xf4\x88\x7c\x66\xcd\xf3\x3f\x7d\x5d\x05\x9f\xe2\x66\x8b\x83\xcd\x35\x98\xba\x5c\xe2\x9f\xf2\x2a\x8a\xc3\x4d\x35\x9c\xba\xff\xe1\x9f\xe2\xc2\x48\xe7\xf1\x02\x8f\x1e\xa9\x85\xa9\xef\x71\x78\x81\x5f\x83\x51\x6a\xe4\x79\x4a\xc8\x7b\x18\x3e\x00\xb8\xbe\xc1\x79\xbc\x04\xea\x05\x2a\xcc\x3f\x05\x16\x10\x08\xb1\x20\xd0\x07\x5c\xa6\x08\x84\x50\x8d\xc3\x29\xfa\x5d\xc8\xdf\xb6\x48\xc1\xf9\x82\x75\xf2\xbd\x52\x72\x3a\x27\x87\x71\x90\xb0\x93\x41\xa0\x58\xb3\x48\x17\xba\xb2\x34\x23\x01\x79\xf5\xf2\x9f\x70\x08\x97\xd2\xda\x9d\x31\x14\xb5\xcf\xca\xa3\xdd\xcf\x33\x2a\xfd\xec\x05\xe8\x2a\x57\x44\x41\x41\xc1\x02\xd8\x7a\x0a\x72\x72\x4e\xd9\x02\xd1\x0e\x56\xe4\x30\xd6\x90\x34\xf4\x33\x35\x8e\xe4\x72\x9c\x98\xa5\x70\x51\x87\xd5\x2c\x99\x04\x16\x8a\x78\x09\x1c\x35\xd6\xe4\x54\x9c\x3b\x59\xf2\x10\xde\x86\x45\x05\xe4\x99\xd1\xc8\x10\x7f\x21\xc9\xaa\x76\xf9\x06\x1c\xc7\x9e\x15\x7c\x4e\xa3\xfb\x05\xfb\xdf\xb2\xc4\x8b\xb4\x6a\x81\xa3\xf3\xc2\x6f\xb6\xd4\xd9\x6a\xfb\x1d\x17\x3b\x20\xe4\x6e\x96\x7a\x11\xcd\x69\xfe\x7b\x2c\xf3\x44\x28\x17\xd9\xe2\x56\xaa\xaa\x9c\x1f\xf3\xd9\x16\x4d\x94\x29\x8b\x43\x0d\xaa\x23\x8d\x68\x42\x53\x81\xbc\x3a\x64\x53\xaf\x49\xc1\xac\x4d\x39\xb9\xd2\x15\x68\x00\x85\x7e\x6c\x7b\x63\x4d\x42\xcd\xf1\xe7\x1b\x26\x03\xc2\xaa\x97\xe5\xc5\x8f\xab\x2b\xb2\xb9\xeb\x3d\xdb\x88\x7a\x9d\xb3\x09\x4f\x37\x0e\x44\x02\xe5\xb2\x27\x0f\x1e\x10\xf1\xdb\x27\xf3\xb3\x26\xed\x5c\x7c\xc0\xf0\xb9\x40\x33\x44\x31\x51\x58\xa9\x44\x36\x2f\xda\xbd\x76\x1b\xdf\xb7\x58\x8e\xd2\x7c\xa5\x31\x9d\x94\x8a\x74\x89\x0c\x1d\xeb\xa1\x14\x45\x27\x1c\x4c\x06\xf1\x50\x27\x31\x61\x35\x09\xb0\xc5\x79\xda\xce\xc9\x58\x85\x74\x71\x48\xcb\x8c\xf8\xd2\x84\xbe\x4a\xa8\x06\x9d\x91\xcd\x3a\x4d\x7d\x97\x41\x32\x0c\x7c\x84\x28\xcb\xb7\x5e\xe1\xc5\x77\x07\x39\xad\x53\x05\xb0\x46\xa2\x76\xea\x5a\x93\x5b\xfe\xb5\x60\x96\xfb\x8b\x78\x99\xeb\x2e\x88\x6f\xaf\x77\x43\x05\x64\x2a\x92\x66\x74\xfc\x39\x97\xa7\x26\xce\x22\xe5\x2d\x67\x2e\xde\xca\xc5\x97\xe0\xc6\xd7\x1b\x8c\x98\x93\xfc\xd8\x1b\x88\xd8\x0c\x29\x8c\x1a\x60\xeb\x3f\x40\x05\xb0\x63\x3b\x08\xae\x24\xa6\xce\xaa\xdc\x98\x39\x51\xde\xd2\xa0\x0d\xfe\xb3\x79\x71\xb2\xf9\xe8\xbb\xe0\xd1\xe4\xf4\xcb\xe3\xcd\xeb\xff\x19\x44\xfd\x82\xe6\x85\x02\x5f\x61\xf0\x15\x63\xfe\x4a\xa3\x6d\x30\x4e\x50\x00\x0c\xfe\xd3\xd9\xbc\xe8\x3e\xab\x1c\x28\xa6\xc0\xc1\x40\x07\xcb\xe2\xe1\xb0\xa0\x7b\xdc\x85\xb0\xb0\x3a\x9c\xc3\x43\x5e\xb6\x21\xa3\x61\x9b\x14\x2c\x3c\x01\x12\xd3\x57\x85\xb7\x33\x66\x5f\x18\xa3\x43\x60\xfb\x8f\x7e\xf4\x82\x59\x5d\x86\xd8\x5d\xed\x1c\xbc\x1d\xe7\x73\xf6\xef\x38\x58\xe4\x20\x3c\x88\xdf\x3d\xec\x9e\xd1\xee\x2d\xf7\x3a\x8f\x3a\x6b\x54\x7e\xa4\xf6\x76\x8e\x19\x1a\x8c\x67\x64\x1c\xe4\x4e\x35\x51\xce\xa9\x64\x39\x17\xb3\x83\x48\x89\xaf\xb1\xe6\x04\xc5\xdb\xca\x97\xf3\x39\x0d\x4b\x69\xcb\x6a\xee\xae\x69\xcc\xaa\xbe\x8a\xd6\x06\x03\x3e\x20\x0b\x39\x81\x2a\x29\x7e\x39\x1b\x90\xd6\x86\x08\x88\x57\x41\x0e\xae\x68\x66\xc1\x8e\x6c\xc4\xd4\xa4\x48\x59\xc7\xe7\xee\xe5\xf1\x26\xdc\x50\x12\x8b\x3c\xc0\x75\x77\x31\x23\x31\x85\xc7\xd4\x28\xfe\xde\x62\x41\x33\xd6\x5b\x39\x0f\x09\xc4\x2e\x9c\x46\x3c\xbc\x5d\x90\xd3\x79\xb0\x60\xf3\xb1\x65\xe8\xf9\x3a\xca\x7e\x01\x75\x1a\x9c\xb2\x6d\x3d\xe9\x92\x1f\xc8\xb7\x6c\x37\x17\x59\x27\xd1\x69\xbf\x48\x8f\x59\x43\x42\x13\xb4\xbe\xb7\x87\x32\x81\xe2\xab\x2b\xfc\x7e\xcf\x53\x23\xd6\x2d\x59\x35\x96\x78\x0a\x47\x6b\x52\x73\x7c\x83\xef\xeb\xe8\x0b\x8a\x4c\xdf\x88\x83\x9e\x24\xc7\x12\x5a\x2c\xd2\x3b\xa5\x45\xa9\xbc\x56\xfb\xf2\x0a\xa4\x88\x54\xc6\x8a\xfc\xec\x47\xd7\xa2\x9d\x76\x5b\xd0\x92\x4b\xa7\x06\x82\x6f\x44\xb5\x08\x68\xec\xf4\x9e\x55\x54\x41\xc7\xb2\x17\xe8\xd6\xdd\xa6\x69\x60\x79\x33\x6d\xf9\xc7\xa8\xf4\x3b\x76\xee\x99\x70\xff\xf9\xf2\x22\x4e\x91\xb8\x41\xc1\x75\x04\x6c\x92\x90\xdd\xff\x8d\xbd\x52\xea\x46\xf4\x65\xb3\xd2\xda\x9a\x2a\x69\xd3\x2a\x69\x4a\x9e\x5a\xd2\x34\x18\x69\x91\x32\x89\x32\x0a\xc9\xf6\x26\x77\x19\xf4\x48\xdc\x0f\xf2\x36\xf9\xf3\x84\xcd\x0b\xc2\x6d\x3b\x5c\xdb\xae\x5a\x52\xf6\x5f\xf6\x0b\xe7\x03\x98\x6f\x2b\xfb\xad\x66\xf4\x6b\x49\x33\xde\x6d\x4f\xfa\xd4\x95\xf8\x40\x32\x3c\xdf\x6b\xab\xb6\x59\x4f\x45\xe2\xee\xcb\x57\x9f\x09\x21\x23\x2f\xc2\x8d\x92\xaa\x51\x3f\xa6\xea\x91\xc7\x9b\xfe\x4b\x02\xe9\x87\x58\x1e\xa6\x73\x2d\xe5\xd6\xc7\xd8\xf4\x9e\x24\x7d\x37\x5f\x46\xdc\x4d\xbe\x93\xf9\xce\x80\xa4\xc3\xbb\x61\x89\x85\xb2\x6f\x49\x5e\x04\xc9\x98\x71\x11\x5d\xf8\xea\x4a\x21\x4d\x14\x86\xc7\x6b\xf0\xcb\xf0\x9b\xe1\x4d\xe5\xa6\x11\xc0\x8b\x54\x95\xed\xa6\x88\x92\xe7\xe1\x3a\x2c\x7d\x70\x6c\x8b\x1a\xa2\xc8\x13\x21\xc9\x8b\x1f\xc1\x5a\x45\xcf\x60\x34\xbc\x6f\xed\xbb\x43\x0f\xef\x4b\x63\xdc\xc8\x1e\xd7\x63\xe7\x47\x6d\x43\xb2\x2a\x7e\x64\xd1\x1b\x61\x48\x96\x68\x37\x1c\x11\xeb\x53\x51\x3f\x1c\xde\xf5\x1b\x0c\xe6\x48\xf4\xad\xe1\x62\x60\xf2\x45\xb2\x8c\x63\x08\x92\xd0\x71\x57\x08\xd8\x6d\x83\x0a\xc3\x33\x76\x71\x5d\xdb\x70\xe4\x23\xde\xd9\x06\xec\x80\x03\xde\x84\x19\xf0\xa4\x1b\x4d\xa4\xe8\x5e\xd3\xd1\x80\x07\xc0\xfa\xb1\x38\x01\x35\x1a\x8e\xc4\x0d\x8a\xd1\x90\xa5\x41\xc1\xca\x31\xd8\x07\x12\xbe\x8f\x82\x89\x5c\x2a\xa9\xce\x1c\xc4\xdf\x73\x73\x5d\x69\x03\x84\xca\x31\xb0\x62\xf6\xa3\x01\xe5\x39\x29\xbb\x74\xf7\xa9\xf5\x75\xb8\x98\xe4\xaf\x70\xb5\x2d\xeb\x35\x19\x43\xd4\xa7\x0e\xf5\xec\x6d\xf8\x38\xba\xca\xa8\x03\x31\xee\x97\x6c\x02\xe9\x72\x4e\x46\x71\x3a\xfe\x4c\x66\x34\x08\x69\xc6\x3e\xd2\xb9\x6d\xb4\x11\xe5\xcf\x59\xb2\x4f\x68\x98\xd1\x0b\xe5\x16\x1d\xca\x92\x49\x14\x17\xb6\x32\xd3\x43\xb0\x00\x6b\x78\x1f\x66\x29\x95\xe7\xfc\x6f\xb6\xb6\xf5\x41\x9f\x83\xd7\xe0\xa5\xfc\x98\xce\xeb\xc2\x55\xf9\x4e\xe9\x2e\x94\x2f\xe0\xb0\x3e\x69\xaf\xb9\xfd\xb8\xc1\xcc\xc4\x29\x13\xf3\x16\xd1\xd8\x9d\x87\x8f\x2c\xb9\x6e\x1e\x0a\x05\x54\x31\x01\x50\x93\x31\x01\x50\xac\x72\x02\x9e\x3c\xd6\xf8\xe7\xd0\x37\xc6\x3f\x54\x85\x6b\xf2\xa1\xdf\x01\xba\x11\xf6\x4b\xfc\x8e\x08\x91\x6f\x28\x7f\xf4\x64\x2a\xbc\xf9\x19\xaa\x5f\x3c\x1d\x04\xc3\x21\xff\x4f\xa6\x08\x03\x92\xa1\xfe\xc9\x73\x90\x71\xc9\x10\x7f\xc8\x72\xc7\xc5\xe4\xe9\x50\xfc\x2f\xd3\xc0\x5c\x65\x28\x7f\xe8\x7a\x38\xac\xfc\xa5\xd3\x05\xbc\xfa\x29\xea\x71\x6d\x6e\x87\xbe\x44\x0e\xed\x9a\x72\x0e\x3d\x69\x06\xac\xb4\x9a\x1c\xda\x09\x72\x1c\x3f\x53\x18\xc5\xcf\x14\x8d\x01\xd2\xc4\x0f\x09\xa7\xa4\xc5\x21\xfe\x90\xb9\xa6\xca\x7a\xe8\xa4\x28\xac\x71\x41\x7d\xa8\x7f\xf2\x1c\x24\x1d\x0f\xf1\x87\xcc\x35\x4e\x22\x43\x3b\x41\x42\xa1\x7c\x2b\xc7\x3a\xba\x0f\xdd\x24\xd9\x43\x07\xd2\x49\x92\x75\x4a\x61\x6c\x88\x7e\xe3\xfe\x26\xd3\xa1\xfa\x25\xd3\xf9\x9e\x3a\x54\xbf\xd4\xe8\xf9\x7a\x1f\xea\x9f\x6a\x4c\x6c\x97\x1c\xca\x1f\x32\x95\x6d\x58\x43\xf1\xbf\xaa\x83\xf1\xbb\xa1\xfc\x21\x53\x81\x6d\x0c\xe5\x8f\x1e\x2c\x30\xee\x9f\x4e\x3c\xea\x6e\x0d\xb7\xbe\xeb\x55\xba\xb7\xe9\xb5\x96\xc5\xe4\x69\x6b\xf8\xf4\x9b\xeb\xd3\xde\xf6\x56\x13\x87\x0f\xe6\x12\xde\xe3\x0b\xb8\x25\xfc\x1c\xb4\x86\xa4\xb5\xd9\xdf\xde\xec\x6f\xb5\xd6\xae\xa5\x27\xb8\xed\x46\x81\x8a\xef\x1d\x49\xdc\x3b\x92\xf8\x2b\x38\x92\x10\xb5\xac\xb9\xae\xe0\xfe\x4e\x27\x93\x8c\x5e\x92\x9f\xa3\x78\xfc\x99\x92\xef\x7f\xa1\x93\x89\xed\x4d\xa2\xa1\xc3\x38\x00\x8b\x82\x84\x1c\x31\x89\x3b\x00\xa8\x28\x48\x5c\xb0\x1f\x83\x11\x03\xfb\x47\x3a\xa5\x71\x5e\xd0\x38\xa6\x19\xf9\x7e\x02\x89\x2e\xf0\x4f\xc1\x19\xf9\x39\x4d\x43\xf2\xfd\xb4\xd4\xcb\xc5\x63\xed\xdd\x47\xb8\x82\x7c\x13\x24\xc1\xd4\x74\x3d\xd1\x1f\x30\x2c\x0c\x32\x0e\x30\xe7\x00\xd2\xc5\xc4\xe1\x08\x0e\x47\x36\x70\x34\x0a\x12\x09\xf2\x12\xac\xf8\x6d\x08\x2e\x79\xe5\x03\x5a\xcc\x24\xe0\x8b\xe7\x15\x70\xe1\x48\xb9\x9b\x9d\x55\xd5\x97\xcf\x54\x7d\x6f\xc1\x31\x79\x19\x60\x42\x0b\x09\xf8\x8e\x66\x39\xbc\xa4\x2a\x87\x5e\x08\x10\xd5\x89\xf3\x20\x9b\x57\x75\x83\xe5\x2b\x60\x5a\x14\x10\xb4\xc9\x85\xcf\x45\x96\x04\x95\x5c\xc5\x80\x94\xec\x82\x9d\xa8\xb4\x6f\x8f\x28\xb6\x2a\x44\x51\xe5\xcb\x5d\x84\x70\x20\xe9\x8c\x49\xbc\xdb\xa0\x49\xe8\xe9\x1b\xcf\x90\x60\xcf\xe1\xc4\xe4\x42\x8d\x58\xba\xc2\x64\x96\x2e\x68\x56\x5c\x7a\xe0\x16\x22\x4b\x82\xbe\x2a\x8a\xc5\xbb\x2c\x3d\x8b\x42\x2f\xb9\xb1\x85\xba\x10\xd9\x8a\xd8\x16\xe3\x8a\x12\xd1\x62\x6c\x17\x68\xe6\xd0\x70\x6d\x4d\xc9\xea\x3f\xd3\xd1\x0e\xe9\xc8\x6a\x4c\xa7\xbc\x99\xbd\x42\x12\x7a\x6e\x2d\x1b\x5d\x12\xf9\xe7\x15\x91\x56\x51\xcf\x25\x14\x02\xa2\xfc\xa9\x0b\x3d\x67\xcb\x05\xfc\xf4\xe3\x2a\xc2\x91\xc8\x7c\xf1\xdc\xc9\xcb\x67\xb2\xe4\x87\x99\x5b\x32\x81\x35\xc0\x72\xdf\xd2\xc2\xc9\x5d\x68\xc2\x67\x20\x72\x1d\x38\x70\xa3\x5f\x7f\x95\x6d\x30\xba\x76\xfb\xa0\x09\x1c\x80\xc4\x67\x07\xc3\x68\xca\xd6\x47\x8d\x60\x11\x0d\xd5\x66\x28\xfe\xe7\x47\x0e\xdc\x49\x81\xad\xdc\x28\x8a\xc9\x67\x68\x7c\xf5\x14\x0c\xa2\x97\x21\xfe\x70\x9a\xf8\xa4\xd6\x00\xff\xe1\x0c\x50\x00\x74\x74\xfb\x82\x9c\x23\x9a\x0f\xd1\xef\x0e\x37\xe6\xb9\xee\xee\x32\x89\x69\x30\x00\x0f\xbc\x39\x25\x7a\x0c\x29\xdf\x89\xc1\x25\xd0\x1a\x23\x37\xcf\xf8\xea\xc6\x56\x3a\x2e\x26\x34\xca\x3a\x65\x38\x4d\x8a\x29\x0f\x87\x0c\xae\xa7\x71\x5c\x78\x65\xd2\xf6\xf4\x25\xa3\x3c\x56\x84\xee\xc5\x67\x4a\x17\x87\xf9\x87\xcb\x64\x1c\x25\xd3\xca\xae\x40\x59\x0b\xbe\x19\x05\x7a\x3a\x82\xf9\xc2\x73\x6d\xbf\x62\x41\xc9\x57\x30\xdc\x9b\x14\x7c\x79\x60\xe4\x8b\x59\x09\x05\xdf\x1e\x38\xf1\xec\x5a\x82\xb1\x4f\x07\x0a\xbf\xc0\xe5\x80\x2a\xc5\x0b\x6b\xd4\x29\x13\x3c\x6d\xeb\xe7\x54\xb2\x79\x91\xe2\xad\xd5\x86\x46\x69\x9e\xba\x31\x2e\x65\xed\x55\x38\xe5\x16\x8e\x12\xf2\x67\xea\x1f\x19\x86\x12\xdf\x0e\x1c\x36\x6c\xe1\x90\x2a\xc5\x03\xeb\xde\x0a\xcb\x32\x07\xf6\x6d\xa1\xd3\xe7\xb2\xb2\x4e\x8e\xa7\xdd\xc3\xe7\xfb\x6f\x51\x63\xec\xd3\x81\xd2\xde\x69\x38\x98\xf8\xf6\xc1\x49\xc7\x29\x0a\x10\x12\xd8\x2e\x66\x2f\x7c\xbe\xf5\xe3\x87\xdc\xfc\x52\xc8\x74\xae\x68\x5e\xd7\xc1\x9d\xb4\x0d\x59\x76\x7d\x1a\x46\x19\xa8\x8a\xc7\xc1\x02\x1e\x5f\xa0\x0b\x4c\xcf\x8c\x1e\x1e\xec\xbf\x33\xd6\x3e\x2b\x87\x2d\xe4\x22\x2e\x4a\xb2\xe5\xcb\xa4\x4a\x9e\x6f\xbc\xf5\x64\x10\x7d\xd1\x8c\x5c\xd9\xe0\x4f\x46\xf1\xdf\xaa\x80\xa3\x27\x8a\x77\xc3\x5e\x27\xc4\x91\x8e\x79\xe7\x9c\x80\x0e\xa6\x2d\xf7\xa4\x24\x0d\x69\xbb\x67\x40\x4c\xc1\x2e\x64\x48\xda\x4c\xe8\xf8\x34\x8e\x23\x9a\x14\xff\xe0\xe0\x6d\x7d\x27\xdd\xed\xdd\xa4\x35\x5a\x9c\xa7\xd9\xe7\xb2\x06\x13\x5a\x7c\x12\xa0\x16\x88\x19\x2f\x60\x68\xaf\xf2\x5b\x76\x8b\x0a\x85\x76\x59\xbf\x68\x31\xfb\x04\x73\x3d\x4e\xe3\x7f\xfc\x0e\xfd\x3b\x9f\x45\xf9\x42\xb9\x46\x76\xba\x97\xcf\x66\xb7\x46\x1b\xfc\x3c\xf5\xee\x25\x51\x7e\x90\x26\x09\x77\xd9\x84\x96\x5b\xd7\xa0\xbd\x8e\x77\xbb\x7c\xf0\xc0\xbb\x8d\xe2\x2a\x3b\x5d\xff\x0e\xc6\x9d\x14\x48\x99\xbc\x94\xe6\xc1\x38\x14\x02\x27\x08\x89\xc6\xab\xb7\x65\x75\x4b\x67\xa2\xf8\x84\xc0\x55\x4e\xc6\xc1\xa2\x35\xdc\xde\x64\x49\xf8\x48\xd2\x1a\x6e\x6f\xb1\x34\x7d\x1c\x68\x0d\xb7\x1f\xab\x14\x2e\x3a\xb5\x86\xdb\x4f\x55\x12\x16\xee\x5b\xc3\x9d\x6d\x95\xc1\x56\x78\x6b\xb8\xb3\xa3\x13\xb4\x50\xdf\x1a\xee\xe8\x4a\xf5\xb1\xb0\x35\xdc\xf9\xd6\x49\xa6\xc5\xac\x35\xdc\x79\xea\xa4\x27\xb4\x68\x0d\x77\xbe\x73\xd2\xa5\x20\xdc\x1a\x3e\xde\x74\x32\xf3\xd9\xac\x35\x7c\xbc\xe5\xa6\x33\x59\xb8\x35\x7c\xac\xbb\x2f\xcf\x38\xad\xe1\xe3\x6f\x54\xa2\x79\x70\x6e\x0d\x1f\x3f\x51\x59\x52\x6a\x69\x0d\x1f\x7f\x5b\xad\xdb\xbb\x3e\xed\x6d\xef\xdc\x6b\xde\xee\x35\x6f\xff\x2d\x9a\xb7\x20\x8e\xc1\xbf\xc4\xed\xdc\xb8\x22\x05\x97\xa3\x0a\xf1\xe9\x42\x64\x94\x98\x97\x67\xdc\xa2\x1f\xe9\x18\xa0\x37\x12\x4e\xc7\x8c\xa9\x0b\x8e\xe4\xea\x69\xbc\x8a\x9a\x1f\xe1\x72\xd7\xaa\x0c\xd2\x24\xc4\x39\x0f\x7d\x64\x82\x48\x56\x24\x32\x95\x73\xd7\xfd\x38\x36\x86\x62\x0a\x46\xe6\xd1\xaa\x07\x37\xf5\x3d\x62\x99\x96\x95\x28\x3d\xcc\x04\x7c\x44\xfe\x85\x5f\xce\xb3\xff\x70\xb2\x63\x2e\xc9\x37\x21\xa7\x87\xd5\x51\xbe\x2d\xa9\x55\xba\x03\xdf\x53\xbf\xae\xae\x20\xfc\x0d\xb1\xdd\x3e\xb0\x44\x48\x3d\x69\x33\x29\x14\xc2\x0a\xb4\x7b\xa4\x5d\xa4\xfc\xe7\x69\x9f\xa3\x19\x85\x3b\x9c\x78\x6e\x43\x45\x33\x27\x93\x53\x30\x70\x51\xf6\xa1\xe2\x86\xb4\xeb\x89\x99\x6d\x55\xc3\xfa\xc3\x8a\xef\x21\xe2\xe1\x1e\x74\xa0\x23\xfc\xbc\xa4\x63\xe0\xe9\x06\xa5\xcd\x82\x7e\xb7\x05\xae\x28\x34\x5e\x0d\x3c\x9b\x8f\xbb\xb0\x73\x8a\x2a\x8c\x7b\x82\x16\x87\x41\x11\xc8\x11\xb0\xdf\x7d\xf6\x0f\xd9\x43\xbf\xaf\xae\xc0\x28\x56\x01\xc0\x55\x72\x2e\x41\xc4\xd7\xd5\x95\x0e\xbe\x09\xda\x46\xd6\xb4\xbc\x23\x47\x80\x27\x9b\xa7\xfd\x9c\x31\x04\xe5\x61\x9d\x41\xcf\x85\x80\xa3\x29\xcc\x9d\xae\x5f\x3c\xd3\x85\x5b\xd9\x13\xa6\xb6\x42\xba\x73\x2f\x6d\x3b\xbf\xa8\xe7\xe9\xdd\x93\xcd\x53\xf4\xf0\x6a\x1d\xda\xef\x92\x2f\xf0\xd4\x21\x48\x92\xb4\x20\x93\x28\x09\x79\xbf\xa2\x64\xca\x1b\x7a\xa6\x9a\x1f\xa7\x49\x9e\xc6\xb4\x7f\x1e\x64\x49\xa7\x8d\x4b\x70\x67\x39\x8c\x15\xc7\xe9\xb4\x8d\x4c\x5f\x45\x8f\x19\x2a\x1c\x87\x4b\x54\xb0\x21\x1c\x98\x0b\xe6\xae\xe3\x5b\x9d\x3d\xde\xad\x9e\x49\x10\xe6\x11\x0a\x6a\x94\xbe\x0e\x61\x8a\x1b\x2c\xc7\x0b\x3a\x66\x12\x80\x67\x3d\xf6\xc0\x21\xd3\x28\x18\x7f\x56\x21\x44\xc1\x13\x81\x38\xec\xca\xeb\xd6\x4e\x90\x4d\x97\xf0\x12\xe4\x44\xfd\x42\xce\x78\x4c\x2b\x74\x59\x23\x84\x7e\xae\x2c\x86\xdd\xc6\x75\x1c\x08\x36\xf1\x5b\xa6\x1b\x0b\xcd\x36\x92\x65\x1c\x3b\xe8\x4e\x25\xa5\x09\xe7\x77\xfa\x00\x2c\x21\x26\x28\xc8\x1a\xd7\xcc\x02\x26\xfb\xa3\xc8\x54\x1a\x22\xf1\x9b\x73\xf6\x4e\xda\x83\x83\x52\xbb\xe7\x65\xac\x3d\xc9\xde\xd9\x61\xab\xd3\xed\xe9\x86\x10\x86\xeb\x67\x2a\x28\x8a\x60\x3c\xfb\x98\x1e\x48\x3f\x58\x78\xca\xa4\x73\x2c\x7c\xe6\xd6\x53\xcb\xc7\xcd\x3f\x9d\xe1\xc8\xa2\xfd\x20\x8e\xd5\x7e\x22\x80\x4b\xce\x14\x4e\x37\xd5\x01\xc3\x73\xc2\xf0\x1e\x31\x80\x54\x5b\xc3\x6d\x90\xee\xf9\xaa\x6f\x0d\xb7\x41\x76\xc7\x21\xdb\x76\x00\xd8\xda\x08\x5b\xc3\xc7\x3b\x4c\x64\x7e\x7c\x2f\x32\xdf\x8b\xcc\x7f\x6d\x91\x19\x45\x7b\x81\xb3\xf7\x5d\x85\x7b\xf9\x7b\x9e\x26\xd9\x62\x6c\xca\x9b\xbf\xf0\x44\x75\x75\x98\x65\xa9\x2d\x02\xf3\x34\x25\x89\xba\x2a\x0a\x36\x58\x43\xc8\x74\x64\x4c\x40\xc7\xa7\x52\x49\x53\x64\xe4\x22\xae\x77\x8d\x9f\xc0\x20\x0c\xa5\x4b\x47\xc6\x8e\x45\x61\xf0\x92\x0d\x5d\x13\x09\x96\x45\x60\x10\x86\x1e\x1b\x5b\x22\xc6\xcf\x0b\x15\xda\xba\x75\xb0\x06\xe3\xc4\xac\x38\x0c\x7d\x32\xb7\x6f\xe0\x39\x0f\x0a\x2e\x21\x6a\x47\x24\x99\x76\x55\xff\x05\x8c\xb7\x6b\xbe\xfd\xdc\x74\x2e\xa0\xf0\x6b\x74\xd3\x9d\x02\x7d\x4f\x94\x84\x5c\xcd\x24\x61\x7b\xa8\x6e\x9a\x65\x3d\x21\x89\xe6\xae\x4c\xcc\xc9\x87\xff\x12\xc2\xa2\x06\x10\xf8\xc1\x1e\x26\x15\x2a\x7b\x04\x5e\xb7\x97\x3c\x60\x13\x55\x9e\x00\xcc\x29\x3e\x1e\x94\x0a\xec\xbc\x48\x49\xb5\x4c\xac\x91\xfd\x11\x95\xf6\x1d\xd9\xc7\x2e\xb0\x2e\x16\x51\x3f\xca\xff\x11\xc4\x51\xf8\x9e\xe6\x8b\x34\xc9\xa9\x68\xca\x79\x7c\xe7\x8c\xc1\xdf\x5e\x87\xaf\xb1\xfe\x61\x72\xe6\xad\x75\xd7\xa9\xf4\xda\xed\x5f\x69\xe5\xdc\x65\x93\x33\x58\xbe\xe7\x82\x6b\x08\x5f\x86\x68\xbc\x2f\xfa\x00\x4e\x23\x70\x82\x13\xc4\x5e\x4f\x85\x3a\xdf\x10\xbf\x28\x01\x94\xa5\xf5\x93\x7c\xf0\xad\xe1\x36\xe8\xd1\xc4\x8a\x6c\x0d\x77\xc0\xea\xad\x51\x90\xef\xfb\x0d\xff\x7e\xc3\xff\xf3\x6e\xf8\x7a\xbf\x57\x62\xf9\x1d\xa9\xc8\x1a\xea\xaa\xd8\x89\x27\xb3\xc0\x72\x21\xeb\x0f\x20\x73\x55\x75\x9a\x84\x43\xef\xa6\xb0\x1e\x4c\x3e\x88\x12\xd0\xfb\xe8\x10\x82\xc0\x94\xc6\xd0\x88\x38\xee\xdb\x3f\xb9\x7a\x09\x3f\x32\x83\x6d\xde\x7e\xa7\xcc\xe1\x0e\x34\xd8\x3b\x09\xa5\xe4\x02\x30\xf6\xbd\x26\xd2\x95\xb3\x99\xea\x6d\x40\x38\xfb\xf5\x57\x6d\x3e\xf5\x1c\x45\x3d\x51\xce\xba\xd5\x09\x46\x91\x47\x0d\x82\xdc\x3e\x13\xcb\xcf\x32\x8f\xef\xbd\xb7\x47\xda\xa8\x4f\x6d\xf2\xe0\x81\xe1\xc7\x19\x9d\x9b\x79\xb3\x86\xb3\xff\xeb\xae\xb5\x0d\x57\x35\xe8\xf1\x0c\x4d\x3a\x90\x58\xb2\x5d\x43\x1e\x77\x18\xed\xd9\x19\xac\x8a\x18\x58\xee\x69\x1a\x68\x4f\x1c\xde\x39\x42\x39\xa8\x42\x23\xd2\xf2\x48\xed\x55\x03\xe9\x51\xc5\xf3\x12\x9e\xa2\xf8\xd1\xda\xfb\xb2\x29\x08\x43\x49\xc3\xb9\x3e\x86\x63\xda\x90\x69\xd7\xaa\xa6\x52\x7a\xe2\xa4\xe2\xaf\xb2\xf2\x64\xaf\x8f\xeb\x37\x27\x14\xf4\x0a\x71\x95\xd9\xc7\x9a\x2a\xa5\xfd\x51\xfd\xf9\x44\x8b\x99\x54\x37\xeb\x4e\x9a\x7e\x2f\x6a\x55\xa9\x13\x47\xcd\xa1\x11\xa0\x55\xa5\x0d\xe6\x95\x73\x8b\x46\x93\xca\xf9\xcd\xdd\xcd\xa8\x5d\x5f\xbd\xa2\x46\x32\xbc\xbb\x98\x5b\xce\x7b\x2d\xb5\xb2\xe0\xac\x42\xdb\xa8\x78\xac\x39\x79\xae\xde\x8a\x77\xac\x74\x3a\xf7\xe3\xb8\x72\xba\x00\x48\x5c\xf4\xac\x4c\x60\x5c\x15\x5a\xd3\xc1\xd5\xa9\xcd\x78\x14\xe8\x2a\xd5\xca\xa8\xad\x8a\xdc\x94\x9b\x1c\xb0\xfd\x93\x93\x3e\xa5\x45\x2e\x8c\x57\xe2\x4b\x12\xd2\x45\x9c\x5e\xd2\x50\x9a\x08\xc2\xf3\xc1\xf1\x2c\x88\x12\xfb\xb9\x1a\xd4\xf6\x63\x9a\xc9\x1e\x79\x7c\x0f\xc8\x03\xab\x8f\x24\xe5\xba\xbc\x56\xaa\xc5\x35\xc3\x43\xee\xb1\xbc\xdc\xd0\xcf\xda\x4a\x5a\xc4\x06\x0f\xb2\x25\xa4\xb0\xd4\xe4\x0b\xf1\x9a\x21\x90\x8c\xa3\xe6\xfd\x11\x82\x94\xef\xc9\x87\x65\x90\x3f\x18\x90\xf3\x20\xe2\xea\x72\x10\xb9\x16\x85\x56\xc1\xca\x9b\x32\x73\xde\xc5\x52\x50\xf1\xa2\x75\xc7\x68\xd7\x74\xbc\xbc\x4e\xe1\x69\xb2\xd1\xbe\xbd\x2b\x41\x7f\x37\x36\x76\xcd\x63\xd3\x60\x40\xf2\x22\x5d\x70\x5d\x6d\x94\x4c\x49\x30\x61\x5d\xf9\x66\x93\xcf\x55\x4e\x3a\x45\x34\xa7\xe9\xb2\xe8\x3a\x47\x47\x8e\x80\x1f\xc8\x37\x9b\xde\xc3\x22\xef\x7d\x9f\xd5\xfe\xb3\xa8\x5c\x87\x54\xe8\x92\x2f\xd7\x9e\x33\x9d\x8d\x40\xfe\x60\xcf\x7b\x0e\x55\x33\xe2\x3d\x6d\xea\x93\x9f\xf6\x0b\xac\x18\x13\xdc\x97\x04\x7c\x65\x8c\x19\x61\x83\x8f\xe0\x11\x93\x98\x97\x49\x68\x63\xa0\xed\x3b\x7c\xd2\x18\x39\x14\xc1\x7f\x8e\x37\xe2\x1b\xb7\xca\x96\x1f\xae\x59\xf9\x13\x71\xb1\x66\x50\xcd\x94\x16\x1f\x75\x53\xef\x39\xa9\x69\x8e\x82\xba\xf1\x2a\xc8\x67\x98\xa8\x7a\x92\x30\xbb\xfe\x23\x7c\x34\xe9\x08\x00\x3f\xb5\x79\x0b\x79\x3b\x08\x11\x8c\x44\x5d\xfd\xb1\xb9\x00\xcd\x1e\x41\x98\x23\x7f\x77\xe4\x5f\x99\xf3\xf6\x27\xca\x79\x7b\xd9\x5f\x34\xe9\x98\x14\x77\x75\x45\xd6\xa1\xc5\xca\x62\x44\xb1\x6e\x0f\x6d\xe2\xbf\x9b\x2c\x01\xfc\xd7\x70\x39\xd8\x43\x4a\x43\x14\x21\x7a\xa7\x72\x66\xe4\xdf\x60\xa0\xee\xf9\xe2\x74\x8a\xa8\x16\x8e\x15\x92\x8d\xaf\xb7\xbb\x35\xcd\x13\x43\x54\x53\x1c\xb5\x64\xaa\x1b\x54\x36\x18\x10\xbe\x59\x49\x71\x21\x48\x42\x22\x6e\x46\x48\x30\x0d\xa2\x44\xac\x9c\x73\x2a\x02\xfc\xd5\xfc\xf9\x65\x4f\x7b\x03\xac\xa9\xc1\x96\x75\x9c\xed\xbf\x66\x48\x63\xee\x96\x4d\x5c\x0a\xb2\x2d\x81\xed\x8e\x39\x1d\xa7\x49\x48\x18\xc3\xad\xad\x04\x91\x6e\x3d\xb1\x12\x83\x23\x82\x2e\xac\x69\x87\xbd\x5e\x8c\xee\xb8\x43\xd8\x75\x3b\x12\x25\xc4\x89\x16\x71\xca\xbc\x48\x33\x1a\x2a\x37\xee\x5c\x02\x01\x8d\xcf\x34\xc8\x49\x30\x67\x1b\x52\xdf\xcb\xaf\xed\xbf\x52\xfe\x6d\xff\x79\xbc\xcb\xdf\x45\x17\xab\x7b\x78\x5d\x9a\x5b\xc6\x31\xdc\x12\x36\x24\xd2\x4e\x36\x3d\x50\xa0\x2b\x06\x49\xe8\x3f\x06\xec\x98\x7d\xa9\x7c\x69\x58\x52\x9c\x05\x56\x73\x68\xb0\x2b\xc5\x07\x06\x38\x55\x05\xa3\xc8\xb8\x5c\xe0\x2f\x8a\xa8\x3c\xbe\x43\x5a\x30\x8a\xc8\x1e\x83\x94\x72\xd6\x43\xae\x09\xad\x1f\x93\x3e\x21\x25\x24\x40\xa2\xa9\x28\x2e\x6b\x91\x63\x4b\xe8\xb9\x4a\x92\x63\x4a\x2e\xaf\x31\x31\x58\xba\x91\x2d\x69\x53\x10\xc4\xdd\x15\x8b\x6e\x57\x14\xb5\xe5\x60\x43\xb2\x10\xbe\x4e\xa4\xa2\x38\x74\x4a\xfb\x24\x65\x01\xa1\xa4\x65\x7d\xfc\x93\x49\xaa\x2d\x3d\xf1\x50\x68\xa0\x27\x82\xa1\xd4\x77\xfd\x42\x2a\xb6\xe8\x6f\x65\x0d\xec\x4f\xfd\xe0\xd2\xb5\x3a\x45\x62\xfa\xeb\x48\x3a\xe8\xa9\xd9\xc7\x1c\x6c\x30\xe0\xa1\x15\xb5\x95\x85\x51\xa9\xb6\x95\xf8\x72\xbd\xcb\x80\x25\x96\xd6\xcd\xb6\x05\x62\x50\xc5\x70\xc6\xcd\xe0\x2d\x0e\x10\x32\x7e\x94\x10\x47\x63\x0a\x57\x0d\xda\x5e\xc3\x8a\xfe\xe7\xb3\x1d\x01\xfb\x8f\x72\x8b\x11\xe2\x58\x8d\xe4\xfd\x45\xba\x30\x1c\xcc\x99\xdd\x8b\x83\xbc\x10\x90\x4e\xd5\xfe\xee\x70\x42\xea\xb0\x82\xe0\xbc\x68\x5d\xbd\x38\x81\x38\xb4\x90\x6e\xf7\x49\xa3\xb0\xa6\x4b\xac\x21\x01\xdc\xe7\x41\x49\x7e\x20\x9b\x76\x6d\x62\xa6\x25\xed\xef\xcb\xb5\x5c\xaf\x05\x90\x7f\xb7\x52\x09\x22\x34\x59\xcc\x52\xaa\xd3\x94\xa9\x1d\x1e\xd6\xba\xd9\xe5\xfe\x22\xb8\x0c\x46\x31\xf5\x75\xcf\x3d\x0e\x70\xfb\xa9\x9c\x26\xa1\x0e\x48\x95\xa4\xc9\x23\x51\x09\x46\x87\xbd\x4d\x5c\x97\x4d\x3d\xf8\xf6\x63\x9c\xd1\xaf\x82\xed\xc8\xa5\xd2\x83\x11\xa3\x5a\xe5\x04\x81\xed\xdb\xc6\x1e\xaf\x68\xd7\x9c\xc4\xd2\x1b\x41\x7c\xa2\x35\x74\x00\x52\xee\x83\x58\x08\xa6\x96\x20\xa4\xe4\x3c\xc8\x95\x40\xb9\x66\xe2\x8a\x2f\x6d\xb8\x7a\x45\x47\x18\x6d\x98\x65\xdd\xbf\xce\x82\x7c\xe6\x43\x3a\xeb\x35\xcd\xb2\xb2\x9b\x48\x7c\xe5\xe8\xbb\x57\xac\x92\x78\x98\x38\x1a\x86\xfc\xda\x0b\x71\x5d\xd6\x13\x7f\x5b\x25\xc7\x2e\xb2\x07\x65\x4a\x84\xaf\x52\x09\x71\x12\x65\x79\x51\x2e\x20\xae\x28\xe3\x95\x68\x40\x7c\x6a\x0f\xdf\xf5\xab\xf1\x55\xe7\xf8\x12\x02\x6d\xf2\x81\xd7\xcd\xb3\xd5\x58\x53\x94\xd7\xa2\x7a\x95\xa1\xfb\x79\x9a\xd2\xc9\x73\x20\xa1\x2b\x13\xd8\x95\x9b\x20\x3b\xdf\xbe\xe0\x76\xa5\x90\x24\x3e\x0d\x03\xb4\x1b\x0b\x5e\xb6\xd6\xac\x4e\x3b\xeb\xd9\xd4\x45\x4d\xd7\xa6\x0c\x34\x51\xf5\x0f\xd6\x06\x03\x6b\x07\x36\x2e\x70\xb4\xcb\x63\xa4\xbe\xb4\x2a\xef\xf0\x7d\x79\x30\x30\x7c\xe9\x96\x86\x9d\x1e\x8f\xc1\x2d\x6e\xca\xe3\x34\x45\xc9\xb4\x42\x36\x33\xd5\xd8\xe6\xc8\xf9\x24\x5e\xbb\x9c\x08\x8b\x43\x55\xa2\x10\xf9\x82\xa4\xae\xa6\x12\xd1\x84\x24\xa9\xae\x81\xb1\xb7\x45\x90\xe7\x34\xec\xb1\x2a\xb4\xeb\x3b\x06\x91\xa3\x25\x6d\xf2\x32\x45\x78\x30\x03\x16\x3a\x0d\x73\x48\x9f\xef\x54\xd3\x66\x95\xac\x2c\x43\x69\x4b\x79\xad\xad\x2c\x66\xc8\xb5\x24\xc4\xaa\x81\x08\x61\xd2\xa8\x40\x75\xa9\x27\x0b\x8c\xe8\x38\x58\xe6\x94\x9d\xc4\xc3\x34\x69\x17\xe4\x3c\x48\xc0\x28\x29\x5f\xa4\x51\xcc\xaf\xc3\x93\x82\x66\x93\x60\xac\xbc\x63\x37\x38\x8a\x37\x39\x6e\xdb\xfb\x54\x3d\x43\x24\x8e\x7f\x5d\xb5\xa8\xd1\xe2\xfc\x89\x16\xdc\x5d\x33\xdb\x20\x7b\xe4\x7c\x16\x8d\x67\x60\x35\xc0\xd6\x77\x91\x8a\x7d\x8c\x2c\xe2\x65\x5e\x7f\xf7\x2a\x18\x41\xcd\x04\x6b\xee\xe1\xb7\x64\xaa\x91\x61\x57\x17\x54\x55\xb1\x7a\x01\xf2\x36\xc2\x63\xb9\xe0\x88\xac\x95\x6f\x24\xc8\x54\x09\x31\xe6\x53\x87\x3e\xb7\x48\x6f\xce\x7d\x3d\xc7\x1e\xef\x79\xb7\xc1\xfd\x79\x19\x6f\x72\x4e\xc3\xde\x63\x70\xc9\x53\x16\xdf\x81\xd8\xdd\xfe\xb4\x61\x38\xc7\x9f\xfb\x7a\x85\x78\x4e\xd3\x5e\xbb\x25\x8b\x6e\x77\x95\xfd\xb3\x69\x2c\xd1\x1a\x7e\x5b\x66\x02\xad\x4c\x1a\x5a\xc3\xed\x1d\xd7\x26\x5a\x8c\xbc\x35\xdc\xd9\xba\x3e\xed\x6d\x3f\xb9\xb7\x7d\xba\xb7\x7d\xfa\x6b\xdb\x3e\x21\x63\x67\x61\x03\x79\x07\xd6\xce\x25\x7e\x2c\x85\x75\x25\x7f\x98\x75\x34\x91\x97\xce\xfb\xd9\x34\x1f\x96\xa8\x6e\x90\x90\x27\x8e\xb0\xa2\x12\x1c\xfb\x4e\x6e\x27\x8c\x7d\xca\x4a\x09\xb6\x71\x02\x3e\xdf\xf3\xf5\xe1\xfd\xbb\x03\xce\xdc\x6f\xd3\x01\x1e\x70\x09\x58\x2d\x85\x17\x8c\x45\x4a\xde\xbf\x3b\x10\x17\x05\xfe\x0e\x88\xf7\xe8\xe0\x45\x51\xb7\x3c\x4b\x73\x7c\xfd\xe5\x36\x7e\x70\xf4\xf6\xed\xcb\x83\x8f\x87\x47\x6f\xc9\xcb\xf7\xef\x8f\xde\x0f\xc9\x81\xd2\xff\x8e\x79\x95\xfc\x48\x1f\x52\xd2\xde\x20\xac\x3e\xb2\xd1\xee\xfb\xfb\xa0\x5d\xde\x34\x1d\xbb\x7a\x68\xcf\xb5\x08\x05\x5b\x3d\x11\xaf\xcc\xdf\x84\xb4\xa4\x1d\x12\xdb\x2a\x18\x0d\x13\xde\xa5\xd1\x3c\x0f\xa6\x94\xec\x91\xf5\x75\xf1\xd4\x90\x6d\xeb\xe2\x77\x9f\x87\x8c\x75\x52\xfa\xb2\xd8\x33\xe2\x4d\x1e\x12\x35\x5d\x7f\xff\x70\xf4\x16\x66\x25\x53\x5d\xf2\x84\x59\x15\x7d\x73\x1e\x93\x69\x1c\x88\xaa\xcd\xd1\xea\xd9\xfc\xc8\xef\xab\xf1\x78\xe7\x79\xd3\x29\xfd\x78\xf8\xe6\xe5\xd1\xf1\xc7\x21\x11\xb7\xde\x8c\xb8\x58\x27\xe7\x39\xd9\x20\x6d\xf6\x5f\x30\x9e\x31\x8e\xd1\x36\x62\xda\x08\x3f\x92\xdf\xde\xef\x56\xf7\xbb\xd5\x5f\x7b\xb7\x42\x9b\x15\x3c\xbb\xfc\xa3\x9a\xe9\x36\x7f\xcd\xde\xe8\x11\xfd\x1d\xbe\x65\x97\x4e\x87\xd8\xfa\x57\x87\x33\x1c\x93\x29\x37\x8e\x21\xe2\x91\x2d\xb4\xa5\x0f\x0b\xb6\x15\xf2\xd7\x7e\x08\xbf\x90\xb6\xbc\x48\x93\x8e\xf3\x79\xec\x0a\x52\xf1\x1e\x39\x4f\x93\x6e\xcd\x1b\x7a\x94\x99\xa4\xc9\xe5\x3c\x5d\xaa\x16\x55\x42\xc9\xe9\x4d\x22\x6d\x4a\x25\xae\x68\xc8\xe5\x01\x88\x62\xe0\x84\x6b\x12\x69\xea\x78\xf6\x3c\x4d\xe3\x6b\x08\xaf\x1a\x82\x0f\x72\xbe\x49\x50\x0e\x19\xa2\xd9\x81\x07\x22\x34\x34\x3c\xa6\xcb\x13\x1f\x44\x23\x60\x8b\x52\xd4\x3e\x58\x33\xa6\x09\xbb\xdf\x62\x10\xa6\xe7\x28\x5e\xaf\x1d\x81\x01\x21\xdf\xbd\x13\x89\x3c\xa2\x42\xd4\x17\x35\xc1\x05\x87\xf8\x5d\x62\xef\xea\x2f\xaf\x0d\x96\x4b\xaf\x88\x31\xb6\x39\x7d\x86\xdc\x07\x38\xb8\x31\xb2\x70\x1d\x6a\xf7\xe0\xde\x70\x41\xde\x0a\xca\x51\x87\xaa\xab\xf2\x12\xc4\x29\xd1\xf5\x50\xde\xd1\xf4\xda\x7c\x74\xb0\x42\x3d\x43\x2b\x84\x43\xf3\x8a\x71\xe1\xa2\xd5\xf4\xb0\xd2\x88\xa4\x2b\xf5\x1b\x0d\x27\x8f\xa6\x49\x50\x2c\x33\x7b\x38\x38\xbd\x6c\x3c\x18\xa6\x7c\x3c\x0a\xaa\x6a\x40\xe0\xc1\xa0\x79\xff\xc5\x13\x07\x49\xde\x82\x23\x05\x49\xa8\x54\x4b\x45\x0a\x41\x89\x27\x51\x12\xc4\x7e\xb3\x67\x5e\x87\xcf\xa8\x14\xaf\x6b\x2b\x4b\x54\x6f\x20\x45\xe6\xd1\x33\x9a\x5d\x16\x33\xae\xb2\x9e\x8f\x22\x60\x19\x29\x8f\x12\x0d\x7d\x13\x71\x16\x2a\xb1\xe5\xf1\x0d\x22\xba\xe3\xb8\xb6\x53\x8b\x5b\xfd\x42\xaf\x00\xef\x3d\x88\x68\x7f\x1d\xca\x41\x47\x9d\x6b\x11\xa9\xd7\x5c\xb7\x76\x1e\xb7\x9f\xa2\x72\xfe\xb2\x55\x38\x17\xe4\x8e\x3a\x25\xb5\x77\xba\xae\x4a\x53\xcc\xd3\x47\xd9\xb1\xdb\xb2\x74\x14\xc3\xa2\x92\x9f\x83\xe7\x65\x11\x4c\x5b\x94\x3f\x89\x20\xc6\x94\x65\x0d\x20\x80\xf0\xfc\x31\xba\xd1\xc9\xc9\x32\x8e\x4b\x9e\xb8\x68\xcd\x22\x71\x6f\xff\x4d\x85\x30\xd4\x57\x16\x9a\x11\x32\xad\xd1\x9c\x55\x5c\xf7\x0b\xec\x3b\x8f\x63\x3a\x7c\xfb\xea\x91\x33\xfb\xea\xbc\x6b\x07\xd7\x5b\xa9\x36\xe8\x7b\x0d\xc5\x99\x44\x32\x4e\x93\x71\x50\x74\x8c\xd9\xef\x96\x3b\xb2\x29\xe5\x7a\xc2\x8b\x4d\x39\xd7\xb3\x77\x5b\x5a\xc6\xe1\x42\x7e\xf7\xe0\xf2\x30\xc1\x15\x84\xe5\x10\x9c\x10\x78\x2d\xa1\x6a\xf6\xc1\x03\xd0\x37\x98\xbd\xa8\xde\xa6\xcb\xbd\xef\x00\x0e\xee\xd0\xfd\x4e\x90\x4d\xad\xd5\xa5\xc5\xc7\x67\x46\xc9\x21\xfe\x12\xae\x79\xb6\x90\x2b\x14\x31\x3e\x71\xff\xa2\xea\xb5\x9f\x6a\xf1\xc9\x24\x5f\x94\x94\x86\xeb\xdb\xee\xee\xb2\x95\xf9\x4b\x1a\x25\x9d\x56\xcb\xad\x5c\xbd\x8a\xe3\xe4\xc6\xf1\x84\xaf\x37\x40\x36\xec\xb0\x65\xde\xed\xe1\x1e\xe1\xab\x9a\x24\x2d\x0e\x8d\xbe\x2a\x14\x7a\x1c\x0e\x69\xe0\x86\x6d\xc3\xb3\x85\x6e\xcf\x6a\x05\xb7\xaf\x36\x12\xc4\xb5\xd3\x65\xb1\x58\x16\xaf\xd3\xa9\x66\xd7\xc2\x19\x0f\x5a\x2d\xd2\xfb\x0f\x77\x34\x83\xc4\x32\x13\x4c\x73\x6b\x18\x93\xed\x07\x8a\xc3\xf0\x5b\x2e\x83\x9f\x66\x34\x5c\x8e\x29\x9a\xab\x60\x3c\xee\x11\xe1\x8b\x12\xf3\x93\x60\x3c\x3e\x11\xc9\x9c\x27\x32\xa4\x88\x6f\x49\xe5\xcf\xcc\x29\xeb\xe7\xb3\x68\x52\x74\xba\x64\xe8\x60\x54\x66\x39\x4a\xab\x60\x3c\x96\x5a\x2a\x6e\xed\xcd\x49\x9b\xc6\xb4\xa0\x72\x1c\xda\x4b\x92\x99\xce\xa9\xea\x06\x2c\x03\xdd\x5f\x89\x87\x25\x62\x69\xb3\xad\x9e\x8b\x71\xa5\x9e\x15\xee\x4a\x2e\x32\x1a\xae\x16\x7e\x3c\x9e\x1b\x6c\xe9\xe7\x8f\xee\x92\x69\xbb\xde\x25\x53\x55\xf1\xad\x72\x23\x3b\xb3\x02\x62\x48\x80\x86\xf3\x07\x5b\xec\xb0\xfd\x3e\x39\x02\xe5\x1f\xca\x01\x54\x29\x2d\x63\xdb\xff\x06\xaf\x1a\xad\x67\x6d\xde\x27\x8d\x95\xd4\xf8\xb5\xbc\x4d\x31\x50\xf3\xe4\x5a\xc6\x01\xa5\x81\x21\xb4\x74\x82\x00\x4e\x0d\xea\xf5\x01\x60\x07\x56\x9a\x28\xbc\xa0\x27\x8a\xdd\xf3\xb6\x4f\x4b\x07\x60\x58\x4d\x78\xef\x84\x0d\x5c\x22\x97\x58\x55\x57\xc2\x75\x8e\xb2\x6e\xe8\x1b\xeb\x69\x13\x05\xfc\x6d\x9d\x5d\x0e\xfc\xba\xc9\x37\x9c\x06\x3d\xfa\xbf\xea\x48\x22\x38\x88\xc8\xda\x60\x40\x3e\x1e\xbd\x38\x1a\x92\x8c\x72\x8b\xac\x1e\xc9\x53\x61\x3a\xa3\xae\xb8\xb4\x31\x4e\xc0\x35\x5d\x7d\x56\x2e\x2a\xda\x39\x49\xe8\x98\xe6\x79\x90\x5d\xb2\xc5\x02\x21\xb0\x73\x46\x6e\x6d\x70\x58\x0c\xee\xa2\xc9\x79\x9a\x7d\xe6\x52\xde\x7c\x19\x17\xd1\x22\x46\xa1\x1c\xcc\xe0\x29\x7e\xff\x46\x83\x87\xc4\x6b\xcc\xfd\x8d\xb4\xe5\xe6\x75\x98\x66\x0c\xb2\x79\xc3\x88\x54\x37\x46\x43\xbe\x71\x98\x27\x13\x55\xaa\x2f\x71\xe4\xf3\x60\xb3\xce\x3a\x77\xe2\xc2\x9e\xfa\xce\x0f\x65\xb0\x16\x3b\x25\x8e\x81\xa3\xd9\x4f\xe1\xd0\xc9\x57\x53\x8d\x1d\xa4\xb7\x9e\xd2\x23\x94\xae\x5f\x10\xbc\x3d\x26\x07\xc0\x73\xe4\xe6\x39\x3e\x6c\xf0\x1c\xc5\xf4\x84\x49\x8f\xd9\x45\x8f\xe9\xa7\x28\x96\xd3\xc2\x0a\x15\xe3\x73\x72\x55\x79\x10\xab\x9e\xee\x88\x56\x8c\x57\xc3\x78\x86\x5c\x46\x2f\x44\x47\x39\xb9\x5c\x79\xd8\xaa\xe0\x1d\x0c\x9c\x20\xc3\x51\x7a\xd1\x37\xd8\x91\xfe\xd8\x25\x12\x40\x72\x21\xf8\x7f\x57\xa6\x2a\x96\xc3\x7f\xa8\x74\xc4\x68\xe4\x4f\x53\x8e\xa4\x17\xe2\x7d\xb7\xcb\xcd\x39\x1a\xb4\x6b\xa2\x12\xfe\x5c\xc2\x91\x5b\xc3\x1d\x70\x61\x84\xbd\x86\x33\xc6\xfc\xdd\xfd\xcd\xe8\xfd\xcd\xe8\x5f\xfb\x66\x54\x5c\x8b\x8a\x37\xbf\xff\x15\x01\xf6\xee\xd4\x65\x38\x1c\x02\x1e\x92\x83\x34\x39\xa3\x8c\x15\x05\x22\xe6\x31\x9c\x83\xe1\x2c\x00\x81\x8b\x65\x24\x17\x46\xc0\x41\x9c\xa7\x24\x88\xe3\xf4\x3c\xe7\xf1\xd9\x41\x51\x97\xf7\xd7\x58\x45\x52\xf0\x7f\x13\x5d\xd0\xf0\x9a\x67\xad\xb9\xf7\x1a\x6b\xe2\x46\xb5\x48\xed\x28\xc7\x42\x65\xa9\x0e\x9c\x1d\x53\x25\x4a\xae\xae\x64\x84\x74\x9d\xd1\x56\x3a\xd4\x76\xd7\x56\x06\xf0\xb3\x9c\x10\x91\xb8\x62\x96\xf7\xa1\x23\xf5\x8b\x46\x43\x5c\x0f\x71\x34\x01\x55\x73\x17\x6a\xdf\x74\xea\x04\x48\xc1\xf7\xf1\x93\x56\xe3\xce\x48\x46\x51\x52\xed\xc0\x91\x8b\x89\x9a\x8c\xd3\xca\xcb\x1f\xdb\x12\x36\x55\xfa\x7d\x71\xd8\xea\xb1\x49\x38\xa3\x59\x34\x01\xc7\x1e\x19\x1d\x07\x8c\xe3\xa0\x48\x35\x0f\x1e\x90\x38\xf8\xf5\x92\xc4\x69\x10\x92\xf0\x32\x09\xe6\xd1\x98\xa4\x09\xcd\xa1\x35\x31\x21\xba\x21\x11\xcd\x3a\x55\x7a\x02\x80\x92\x06\xf6\xb2\x71\x07\x8a\xcd\xd6\x94\x16\x47\xea\x90\xec\x71\xe1\xcc\x26\x46\x0b\xac\x75\xfe\x01\xb0\x32\x41\x4c\x89\x3c\x26\x97\xdf\x7a\x18\x9a\xfe\xd2\xab\x17\x9e\x9d\x9f\x47\x10\xb0\x04\xf5\x8a\x80\x0e\x22\xa7\xfc\x04\x3d\x74\x9e\x56\x71\xe1\x7d\x99\x51\xa1\x5e\xec\xc1\x05\xde\x98\xaf\x0e\x7e\x38\x9e\xd1\x0b\x9f\xda\x40\x6b\x4d\xad\x04\xcb\x15\x65\x83\x22\x86\xe6\x53\xc4\xd5\x2e\x55\xca\x5b\x0a\x7f\x19\x85\xfb\x99\x88\x4f\xce\xaa\x12\x8b\xac\x4b\x86\x72\xbd\x09\x30\x57\x56\xf2\x5d\x13\x78\xde\xd7\x41\x37\x87\x56\xb7\x7b\x0e\x1c\x5b\x02\x1a\x8a\x7d\xb9\x30\x45\x8a\xeb\x71\xf3\x03\x19\x96\x59\x02\x05\x38\x28\xb3\xdd\x1a\xdc\x5f\x0d\x57\xba\xd6\xea\xab\x72\x5d\x5f\xef\x6e\x52\xa3\x28\x65\xea\xa7\xd0\x41\x87\x53\x60\x3e\x63\x14\xe8\x41\xb8\x45\xea\x52\x55\xb3\x1f\x86\xfc\x59\x84\x52\xa2\x05\x49\x48\x72\x5a\xe4\x64\xb9\x80\x0c\x71\x1a\x01\x96\x11\x15\x34\x63\x7b\x47\x7a\x26\x84\x2d\xe1\xc7\xb4\xbf\xb6\x86\x9e\x46\xbc\x4e\xa7\xf9\x7e\xf1\xa1\x08\xb2\x62\xcd\xd6\x34\xe6\x34\x9e\xa8\xc4\x89\xfb\x80\x59\xb0\x70\xb3\x16\x23\x50\x18\x8d\x27\x8e\x13\x1f\xf9\xca\x6e\x4a\x0b\xae\xcf\x62\x85\xad\xa7\x76\xa0\x5f\xd0\xc3\xcc\xa1\x7b\x44\x9e\x3c\x2d\x9e\xc1\x5a\xe9\xfb\x18\x07\x64\x4c\x69\xd1\xb1\x1e\xfd\x08\x4b\x46\xe7\x94\x33\x18\x88\x17\x34\xf0\x4c\x94\xf5\x51\xa0\x0d\xcc\x26\xe1\xa2\x5b\x26\x4a\xb3\x23\x70\x85\xd1\xef\xf7\xc9\x2f\x4b\xee\x09\x98\xb5\xc9\x78\xaf\x73\x5e\x2e\x79\x19\x59\xf1\x2a\xf2\xda\x7e\x02\x6b\xad\x74\x35\x0c\xff\x19\x93\x67\x7a\x0f\xa6\xdc\x90\xb3\xee\x9d\x26\x7f\xbc\x63\x9a\x7d\x1a\xfd\xab\x77\xc4\xfa\xf5\x48\x77\x91\xc6\x31\x27\x1f\x3f\xd9\x0a\xda\xd4\x60\x36\x5d\x2a\x95\x08\xa8\x6d\x93\x37\xca\x0c\xd7\x20\x96\xb4\x84\x5c\xc4\x8c\xa6\xce\x9c\x4a\x23\x0b\x46\x7a\x72\xac\xbe\x49\xf0\x3d\x9b\xf2\xd1\x44\xda\xf8\x24\xdf\x94\x3a\x6e\x46\x19\xda\x4c\x19\x86\xa6\x95\xd7\xcf\xac\x04\x5d\xc9\x50\x16\x72\x49\xe7\x56\xe8\xb9\x1d\x91\x96\xea\x03\xa0\x4f\xb6\x37\x6a\xc6\x78\xde\xa5\x71\xcc\xf8\x8c\xee\x09\xa7\xc1\x21\x2f\xc2\xce\x69\x74\x4e\x93\x02\x8e\x9c\x7d\x46\x71\x30\x34\xbd\x97\x2c\x84\xa1\xfd\x09\xc7\x14\x90\xe3\x61\x78\xda\x93\x57\x54\x46\x72\x4f\x13\xa3\xc8\xc1\x7e\x8c\xb8\x82\x18\xe8\x97\x6d\xd6\x32\x6c\xa1\x43\xe2\x96\x4c\xd6\x23\x4e\x7c\x0f\xb9\xdc\x3c\xb7\x03\x3d\x71\x9a\x3a\xc8\x28\x8c\x09\xec\xb5\x0f\x3c\x2f\x1d\x81\xd9\x71\x0d\x36\xba\x70\x35\xf0\x81\x34\x7c\xab\xa8\xca\x4a\x75\x5d\xa5\xca\x1e\xbf\x52\xcd\xec\x0c\xb2\x25\x20\xa5\x2e\xe3\x4b\xad\x31\xb5\xb0\xa9\xc5\x60\x4b\xf4\x45\xd0\x0e\x1a\xcc\x04\x04\x29\x67\xde\x7d\x32\xa6\x56\x88\xb0\xac\x51\x19\x62\xcb\x3d\x28\xcb\xd7\x6c\xcf\xc9\xc2\xd7\x4e\xea\x77\x69\xbf\xfb\x09\x3d\x17\xb7\x4e\x18\x07\xd8\x59\x18\x67\x92\x51\x68\xf8\xc6\xf3\x33\xc7\x9a\x65\xdf\x19\x8f\x3c\x62\xee\x78\x54\xcb\x07\x89\xe0\xc8\xe2\x5c\x58\x41\xbd\x96\x47\x52\x97\xbd\x54\x94\xf5\x77\xa3\x5a\xef\x6c\x2c\x6d\x46\x04\xa1\xeb\x08\x10\xfb\x6a\xc8\x28\x5c\x32\xb0\x33\xc7\x82\x26\x21\x18\xb8\xa9\x49\x0e\x72\x50\xb4\x24\x39\xa3\x50\xe5\x0c\x46\x57\x94\x4e\x00\x98\x15\x62\x52\x4f\x97\x2b\x57\x54\xeb\xcb\x24\xc8\xf3\x68\x9a\xd0\xb0\xef\xf6\xd1\xa6\x28\x1f\x4f\xf6\xcd\x8e\x92\xb1\xc6\xa3\x9a\x09\xf2\x36\x83\x4d\xc6\xd0\x48\xb4\x3d\x31\x89\xb1\x74\x18\xc4\x19\x0d\xc2\x4b\xfd\x60\x5d\x0b\x8a\xf9\xed\x29\xcd\x14\x64\xa5\xf4\x5a\x37\xae\x68\xd2\xb1\x5a\x53\x4e\xe0\x36\x5d\x97\x5c\x7a\x65\x72\x2e\xee\xf3\x0b\xc9\xa4\xe8\x22\x15\x63\x8b\xe6\x73\x1a\x46\x41\x41\xe3\x4b\xbb\x59\x41\xee\xe3\xa6\xb4\x6d\x4a\x27\x50\x7d\xa7\xc4\xd5\x84\xcf\x6d\x15\xd6\x64\x73\x96\xcf\xb6\x1f\x3e\x18\x74\x97\x7b\xee\x84\xe9\xb0\x37\x73\x93\xb7\x71\xc3\x3e\xd4\x0f\xa9\x8e\x31\x98\x23\x1e\x8d\x35\x4f\xe2\xba\xd4\x1d\x08\xc2\x35\xba\x13\xbe\x6e\x3a\x10\xbc\xef\xd6\x8f\xc7\x91\x1c\xd2\x85\x14\x1c\xcc\x81\xd4\xf0\x77\x78\x5a\x3e\x4f\xcf\xa4\x4a\x93\x04\xf9\x65\x32\x56\x87\x1f\x9f\x60\xe4\xe3\xdb\xcb\x04\xde\x4e\x1b\x08\x40\x32\x86\x85\x2d\x87\x77\x61\x43\xf8\x55\x6a\x36\x04\x7f\x07\xa3\x53\x2b\x66\xbb\xcf\x7b\x82\x23\x53\x78\x4d\x4e\x54\x49\x5b\x28\xb7\x76\xd4\x12\x3b\xca\xc1\x80\x1c\x4e\x34\x67\x8c\x72\xf5\xae\xef\x92\x0a\xff\x2b\x24\x2a\x88\x76\xd3\xa5\xcb\x9d\xcf\x28\x18\x63\x88\xd1\x77\x09\x67\xaa\x39\x89\x0a\x93\xad\x7a\x37\x6a\x87\xd8\xd5\x32\xf3\xed\x1e\x3e\xf4\x8b\x1a\xed\x09\xc5\xfb\x31\x84\x48\xf1\xf0\xb7\xaf\xe8\xa0\xc7\x92\xc7\x33\x6a\x5b\xef\xc5\xe9\xb4\xac\x5d\x62\x31\xa6\x8a\xb3\x05\xd4\x32\x64\x7b\x42\x89\x3f\x3e\x7f\xc4\x12\x13\xc4\x39\x00\xd8\x03\x6b\x4e\x47\x8e\x9f\x29\x21\x88\x1f\xbe\xe0\x09\x43\x41\x63\x9d\x6e\x9f\xef\xc8\xe3\x40\x7a\x2c\x04\xbf\x2a\x34\x24\x6c\x75\xcf\xb2\x34\x49\x97\xb9\x72\x5f\x28\x0c\x03\xd8\x6e\x6f\xbb\x22\xe2\xd5\x08\x61\xb7\xed\x35\xaf\x05\xa7\x12\xa9\xb6\xd2\x6b\x42\x40\xae\x0d\x1d\xab\xa1\x7e\x0e\x6f\x31\x6f\xd7\x35\xfc\xd8\xb9\x22\xe5\xb8\x75\x82\xbf\x55\x5c\x90\x5e\x9f\xf6\x76\x36\x9b\x5c\x81\xb6\x97\x39\xd7\x8b\x8f\x8b\xf6\xda\xfd\x85\xe8\xfd\x85\xe8\x9f\xf8\x42\x54\x3f\x15\x45\x2a\xeb\x9b\xbc\x17\x15\xc0\x2b\xdc\x64\xfa\x82\xbf\x35\x7e\x62\x9a\x4c\xa2\xa9\x17\x8e\x67\x49\xc0\xc3\x51\x60\x05\x75\x89\x46\x41\xe2\x09\xd4\x02\xda\x64\x1e\x69\x8a\xdb\x48\xf3\xcb\xcc\x51\x34\x15\x1e\x0c\x2c\x2b\x46\x0e\xf4\x3c\x9a\x5a\x4a\x7d\x6c\xcd\xc8\x35\xce\x57\x1c\xe2\x4a\xc1\x5e\x9b\x5e\xab\x74\x3a\xb6\xc4\x05\x3d\x63\x49\x1b\x86\x54\xc4\x7b\xe7\x7d\x86\x56\xa4\xaa\xac\x04\xdb\x55\x4a\xa0\x28\x7f\x97\x51\x71\x0d\x8a\x6e\x27\x8c\xba\x47\x3a\xdd\x6a\x60\x84\x4b\xb0\x83\x84\x70\x7f\x4f\xae\xae\xdc\x3c\x71\x36\xf5\x67\xd2\x20\x8b\x23\x56\x14\x75\x2d\x59\x2c\x8b\x17\x74\x12\x2c\x63\xef\xc5\x49\x5d\x1f\xd9\x8e\x6c\xb7\xa3\xae\x7c\xbd\xe1\x5b\x18\xc9\xf4\x43\xd4\xa2\xc7\xf7\x54\xf9\x3d\x0e\xee\x82\x35\x8a\xdf\xa2\xfb\xf6\x8b\x2e\x2e\xa0\xb0\x5a\x4a\xe6\xd8\x68\xd4\x53\x21\xca\xf6\xe0\x41\xd2\xd6\x2b\x7a\xe1\x19\xb9\x58\x55\x7c\xb0\x39\x32\x8a\x4c\x27\x24\x30\x7c\x03\x82\xe3\x49\x65\x47\xa0\xec\x02\xd8\xba\x7b\xf5\xf2\x9f\xd6\x72\x83\x3a\x98\x5c\xec\x5d\x68\x52\x97\x6f\xf8\xd8\x75\x0c\xdf\xe5\x15\xb9\xd4\xf6\xbb\x75\x7a\x23\x7f\x7f\x31\x2e\x8f\xe1\xfa\x0f\x5d\xc1\xc2\xe7\xd5\x95\x45\x43\xfb\x63\x88\xbb\x80\x1c\x9f\x61\x78\x8f\xc7\x2d\x59\x2d\xf4\x49\xb8\xa1\xf2\x5f\x3d\x9a\x72\x10\xae\xba\x48\x45\xc0\xe8\xa8\x20\xf3\x68\x3a\xe3\x82\xa3\xf2\x5e\x2c\x94\x54\x4e\xcb\x45\x5a\xdb\x6e\x91\x9a\xad\x9e\xb4\xe7\xc1\xc5\x8f\x94\xbe\xa3\xd9\x4f\x41\xde\xee\x11\xf6\xfd\x2e\x8b\xd2\x2c\x2a\x2e\x8d\xf4\x69\x90\xbf\xcb\xa2\x31\x15\xbf\xd9\x7f\x30\xcd\xec\x47\x92\x26\x63\xea\x7b\xc5\xf8\x99\x5e\x56\xbc\x63\xfc\x4c\x2f\x9b\xbe\x64\x84\x9a\x1c\x5c\xf3\x1a\xf6\x90\xdd\xc5\x0b\x3a\x8e\xe6\x41\xdc\xc1\x00\xee\x4b\x32\xf3\xb2\xf5\x6b\x13\x3b\xf2\xb9\x79\xd7\x34\xef\xab\xfa\xee\x49\xff\xa6\xd4\x7d\x4f\xd7\x7f\x44\xba\x16\x42\x91\x43\xd8\x70\xff\x2a\x83\x09\x09\xaa\xf6\x8a\x4a\x8d\xe9\xf9\xc2\x14\x8f\x44\xfa\x9a\x21\x13\xd5\x52\x70\x71\xd1\xfd\xa2\x34\x83\x17\x7d\xbc\x9d\xae\xcb\xd3\xb9\xd6\x88\x99\x00\xca\x43\x46\x2a\xf1\x67\x02\xa8\x37\x20\x2c\x1d\xe1\x02\x5e\x9b\xf9\xab\x77\xa0\xbc\x6d\xd8\x50\x52\xf9\x77\xd1\x07\x92\xf2\x17\x82\x2c\x0d\x39\x0d\x72\x3f\xdc\x34\xc8\x0d\x28\x20\x5f\x04\xaa\x45\x45\x94\x6f\x0c\x15\xaf\x0d\x93\x50\x35\x55\xdb\x60\x25\xf5\x63\x18\xc3\xcf\xa7\x6a\xc9\x59\x75\xd5\x2d\xba\xe0\xe5\x2d\x3b\xb0\x46\x0f\x8a\x8b\xbe\x34\xfc\xf3\x56\x80\x9f\x19\x4b\x2d\xc4\xc5\xca\xcb\x46\x86\xe8\xb9\xc9\xf2\x11\xd1\x82\x2a\x57\x91\x0a\x5e\xb5\xca\x52\xb2\x2b\xb6\x5c\xd6\xe0\xa8\x43\x3a\xca\x50\xcd\xda\xf2\x41\xb9\xf4\xe9\x81\xd2\xa4\x27\x33\x1b\x2c\xb5\x52\xd0\xf2\x26\x4b\x16\x9d\x0a\x86\xb3\x9c\x2f\xe3\xa0\x88\xce\xe8\x4f\x41\x7e\x9c\xc3\x9b\xbe\xb2\xaa\x1c\x58\xab\xae\x69\x6d\x0d\x53\xa3\x1c\x1a\x3b\x9d\x4c\xe8\x58\xd4\xcc\x57\x6e\xc9\x72\x28\x2f\xe0\xa3\xe7\x52\x68\x7b\x51\x9a\xd6\x22\xb2\x58\x9c\x4e\x6d\xe3\x4b\x9d\x81\x22\x0a\x39\xda\x41\x50\xf1\x79\x75\x83\x9e\x07\xca\x0c\xb6\x4e\x11\x28\x5a\x6a\xb4\x0e\x81\xc8\x9a\xaf\x3c\x38\x38\x55\x2e\x36\xa8\xb0\xc1\x52\x33\x6b\xc2\x26\x46\x50\x83\xf6\xd9\x4d\x94\x73\x26\xf0\x52\x28\xf4\x0f\xf8\x68\xd8\x1f\x05\x39\xad\xe5\x8d\x3e\x50\x1f\x19\x78\xe0\x0c\x02\xe0\xf9\xd3\x20\x7f\x1d\xcd\xa3\xc2\x43\xbf\x26\x80\x28\xab\x12\x4b\x88\xde\xc8\x37\xca\xe4\xd1\xaf\xbe\xdd\x4e\x67\x1a\xd0\x45\x34\xa7\x79\x11\xcc\x17\xa5\x45\x14\x84\x5e\x58\x3c\x23\x29\x63\x5b\x46\x76\x59\xb5\x4a\xa7\x82\x3a\x13\x46\x93\x49\x34\x5e\xc6\xf0\xae\xa7\x0c\xd3\x1a\xc8\x1c\x48\x5a\x04\xf1\x8b\x26\x15\x58\x90\x58\x6a\x36\x57\xaa\x00\xd7\x3c\xce\x5c\xaf\x6e\xb6\x2b\x6b\x46\x05\x9d\x77\xed\x17\x7d\x8e\x59\x25\x40\xb9\x17\xd8\xc6\xaa\xf6\x49\x6d\xbc\x60\xdd\xf2\x1e\x71\x9d\x4c\x83\xc5\x1d\xa7\x53\xef\x2a\xc6\x1c\xc5\xb7\x86\xe3\x74\xaa\xd5\x6f\xee\x42\x86\x7a\x8d\xc5\x8c\x2b\xc4\x4b\x19\x5d\x7b\x44\x13\xf6\x65\x6c\x6a\x6a\x9c\x56\x86\x87\xc6\xec\xa2\xbb\xb8\x4e\x67\xd7\x32\x2a\x6e\xb0\xfd\x79\x2b\x31\x9a\x88\xd3\xa9\xa7\x6a\x99\x5a\x52\xa5\x2a\x64\x9e\x2e\xe0\x2a\xa7\xfe\xc4\x7c\x3e\x8b\x72\xc6\x8d\x17\x69\x5e\xdc\xe0\xc8\xfc\x2e\xcd\xab\xa5\x22\x37\x06\x53\x25\xd7\x76\x2b\xc5\x13\xcd\x3a\xa9\xcc\x42\x07\x03\xe8\x74\x7f\x11\x5c\xc2\xbb\x8a\x3d\x43\x4d\x86\xb3\x04\x92\x21\xa9\x28\x62\xef\x79\x4d\x66\x62\xd8\xf3\x34\xfb\xfc\x31\x7d\x97\xa5\x67\xb4\xbc\x0c\x02\xc2\x65\x17\x42\xe2\x2d\x2f\x28\x21\x50\x68\x81\x09\x8e\x3c\x65\x58\x52\x73\xd6\xc2\x3b\xc9\xdd\xac\x60\x9e\x82\xd2\xc9\x9e\xf1\xf5\x8c\x9c\xa0\xcf\x53\x32\x54\x66\x0c\xd7\xba\x55\xae\x83\xe7\xea\xf8\x38\x4e\xcf\xe1\x59\x89\xd4\x6b\x54\x55\x5f\xfd\x0c\x82\xc7\x4e\x64\xc4\x44\xd2\x24\xbe\xe4\x01\x21\x0a\xe3\x75\x86\x7c\x21\xc1\x5f\x42\xf8\x1e\xf6\xc8\x67\x12\x64\x68\x3f\xda\xc1\x0f\x24\xec\xa3\x35\xeb\x63\x23\xde\xa5\xee\x83\x80\xfe\x85\x95\xaa\x97\x9b\xd5\x51\xba\x9f\xac\xcd\x7b\x85\x6a\xc2\x16\x74\x0d\xf8\xa5\x17\x8b\x28\xbb\xf4\xac\x78\x94\x8b\xc9\x2d\xe7\xee\x63\xbc\xd0\x2c\xaf\x6c\x09\x58\xa0\x9e\x05\x00\x94\xed\x93\x25\x2c\x88\xee\xae\x6f\x55\xbe\x0f\xce\x25\xc9\x88\x14\x2f\x18\xaa\x7e\x3f\x1f\x47\x91\xbd\x7c\x65\x19\xbc\xdb\xfe\x3d\x17\x88\x53\x70\x48\x9a\xd3\xeb\x50\x35\x00\xfe\x94\x21\x0a\x9a\x8f\x39\x0c\x06\xab\xac\x08\x58\x9b\x78\x35\x96\x2e\x46\xbd\xdc\x6e\xb1\x92\xac\x6b\x00\x8e\xa2\x66\xf4\xaf\x98\xaa\xad\x91\xf3\x85\x4b\xc1\x66\x3e\x11\xbf\xc6\x4b\xe8\x39\xdc\xe8\x75\xcc\x60\xda\x70\xd5\x31\x0a\x92\x7e\x94\xff\x23\x88\xa3\xb0\x03\xb1\x2e\x44\xca\x8b\x28\xa3\xe3\xa2\xe3\xbb\xe7\x10\x2e\xc5\x00\x50\xd4\xd8\xe9\x3a\x97\x28\x58\xd0\xd1\x31\x88\x64\x0f\x3c\xd5\x1a\x5e\xeb\x3c\x15\x35\xa8\x42\xf4\xcc\xac\x89\xab\x27\x6c\x1b\x12\xe1\x57\x5c\xc2\xb6\x65\x50\x70\xbd\xd0\x3f\x5c\x26\xe3\x28\xf1\x4b\x2b\xc2\x91\x37\xba\x79\x5a\x37\x93\x88\xeb\xd7\xc8\x10\xcf\xc0\x0b\x12\x18\x21\x46\xc9\x14\x0e\x38\xde\xe3\xad\x0b\x66\xfa\x92\x12\x6e\x9d\x6a\x2a\xc0\x50\x66\xf9\x59\x34\x9d\xd1\xbc\xae\x3c\x86\x42\xb4\x23\x72\x3f\x27\xe9\x79\xf2\xa1\x08\x0a\xea\xf3\x2b\x88\x72\xcb\x1b\xc0\x55\xec\xda\x35\x2c\x96\x71\x4c\xc3\xba\x2a\x30\x54\xc9\x51\x57\xbb\x97\x2a\x89\x20\x50\x77\x81\x3a\xac\x85\xe8\xe9\x7a\x2a\x2a\xa8\x29\x69\xdc\xad\x0d\x3d\x69\x08\xd6\x77\x36\x18\x96\x67\xa1\x92\x36\x2f\x19\xfa\x93\x51\x09\x63\xe7\x1b\x7a\xd2\x38\x6c\xd9\xf5\xfb\xb0\x34\x07\x97\xf3\x0f\xa8\x3c\xaf\xa4\xac\xad\x93\xf3\x54\x61\x83\x18\xbd\x37\x54\x0e\x43\x6f\x2a\x86\xc7\xa7\x9a\xa1\x27\x0d\xc3\x5a\x68\xf4\x24\x62\x68\x9b\x0b\x0d\x4b\xd2\x39\xf7\x32\x6c\xb6\xf8\x05\x53\x6b\xb8\xf5\xb4\xcc\x13\x11\x63\xe1\xad\xe1\xce\xce\xf5\x69\x6f\x67\xeb\xde\x8b\xc5\xbd\xd1\xd6\x7f\x8d\xd1\x96\xa0\xf4\xbb\x08\x47\xb3\x9a\xef\xfe\x86\x96\x5a\x3c\x5a\x8e\x69\x82\xc5\xd3\xbe\x42\x10\x80\xe6\x6e\xfb\x83\x38\x1e\x58\x91\x2d\xe1\x3d\xae\x1d\x16\xc7\x75\xe6\x2f\x8d\xca\xdd\x08\x60\x15\x4e\xfc\x7d\x21\xc0\x3e\xf1\xcd\x4d\x38\x99\xc7\xc1\x6f\x57\x77\x00\xaf\x2b\x15\xbb\x03\xae\x95\x27\xdd\xae\x5a\x88\x9b\x17\xc0\xb1\x0e\xea\x94\xdf\x18\x46\x06\xc7\x15\x20\xe2\x13\x43\xdc\x49\x00\x02\xb6\x3f\xd8\x93\x61\xb8\xaa\x04\xbb\x00\xfd\x84\x0b\x9f\x2c\xb2\x69\x8e\x65\xdd\x1b\x84\x7c\x96\x32\xbe\x0e\x8f\x07\x7e\x20\x80\xd7\xf3\x47\x47\xd9\x34\xe7\x51\x06\xd6\x85\xf0\xd6\xac\xc3\x58\x18\xab\xec\x34\xee\xde\x0f\x0e\x29\xc9\x1c\x1c\xec\x4f\xbc\x6e\x74\x07\xe7\x1f\x9b\xed\xbb\xa0\x42\x4c\xec\x68\x3c\x34\x44\x44\x55\x48\x3f\x1c\x85\xd8\x17\xb7\x2a\xca\xc9\x38\xcd\x32\xd7\xa5\x24\x9c\x80\x82\x82\xee\x67\xd3\xdc\x17\xe5\x4f\xc7\x19\x7f\x48\xfe\x06\x27\xa8\x9c\x7c\x81\xf3\xd3\x35\x6b\x2f\x2a\xc4\x9b\x0e\xc3\xeb\xa4\x67\xaa\x70\x3b\xa5\x73\xa4\x0f\xb9\x1c\x0a\x50\xe4\xd8\x82\x04\x1a\xf1\xec\x8c\xcf\x1f\x02\x80\x42\xc8\x70\xe7\x02\x9b\x27\x38\x11\xd4\xd1\xbb\xd8\x56\x1b\xc0\xd3\xbf\x2c\xb8\xcc\xd7\x8c\xb9\x5b\xef\x38\xc1\x1f\x83\xae\x72\x49\xce\xce\xc5\x8e\x8a\xde\xba\x31\x10\xe0\xdc\xbd\x70\x25\xbc\xbe\x94\x30\xca\x58\x05\xac\x57\xb4\x70\x56\x97\xd8\x91\x84\xb8\xbe\xb7\x57\x46\xc8\xe6\xcb\x25\x76\xf6\x15\xf1\xd7\x2a\xa2\xa4\x75\x1c\x87\x02\x55\x2e\x78\xa5\x52\x06\x6c\x94\x30\xa9\x18\x41\x24\xd2\x77\x1c\xcc\x43\x5e\xce\xa6\xa1\x5d\x6f\x97\xb8\xe3\x0d\x62\xd5\xaa\x36\xd9\xaa\xa4\x3c\xd5\x7e\x25\xd9\x19\x61\x47\x57\x67\x18\xab\xf2\x0b\x33\x5a\x68\x49\x38\xd2\x6b\xcd\xcd\xf1\xf2\xe9\x78\x62\x83\x16\xa9\xdf\xf3\xbf\x11\x3b\x74\x8f\x94\x78\xf5\xf7\x39\x87\x17\xef\x56\xd0\x70\x8d\xa0\xa4\x15\xb6\x4f\x25\x51\x6b\x24\xea\x6f\x16\x9d\xc3\x5b\xbc\x72\xde\x6f\x14\xa3\x43\xb8\x17\xdf\xec\x91\xa7\x52\x1b\x54\xd1\xc4\x32\x59\x04\xe3\xcf\x47\x5c\x0f\x6d\x58\x01\x42\x92\xa1\x1b\x32\x93\x74\x17\x4c\x37\x4b\xb2\x2a\xfe\x43\x91\xde\x1e\xd9\x26\xcf\x64\xa2\xf4\x80\x4e\xe4\x39\x50\xbb\x04\x50\x7e\xcb\xcb\x1c\xa0\x63\x21\xa7\x27\x8a\x9b\x33\x2a\x74\x29\xd8\x7d\xb3\x8a\x7d\x77\xb2\x79\x4a\x86\x3e\x27\xdd\x07\x10\xfb\x39\x40\xe1\xb6\x25\xb2\xec\x80\xde\x41\x1c\xe3\xc5\xdd\xef\xf7\xe5\xfa\x3e\xb0\xcb\x5a\x9b\x8f\xe3\x1e\xe7\x90\x6f\x77\x10\xd6\x57\x82\xb2\xdd\x28\x50\x35\xf4\xcc\xb8\xfd\x32\x99\xfb\x82\x83\xb7\x87\xf2\xd0\x15\x18\xaf\xc3\x82\x24\x34\x7d\xa8\x48\x30\x1e\xf8\x9a\x9f\x8c\x58\x1d\x3c\x68\x20\x03\x17\x68\xf3\xd2\xae\x98\x55\x88\xdb\x5b\x47\xb5\xd0\xab\xb2\xd8\xc8\xab\x04\x3e\xf6\xef\x9b\x52\x06\xb3\xac\x25\xd5\x1e\x03\x07\x19\x2d\xff\x09\x97\xc9\x86\x58\x88\xd9\x0f\xb8\x7d\x36\xa5\x2f\x5c\x04\x8b\x3f\x76\x31\xad\xd0\xe7\xae\x9a\x25\x97\x96\x70\xfa\x5a\x7e\xdd\xf7\x14\x57\xeb\x68\x15\xe3\xa3\xc5\x8c\x23\x41\x54\xdd\x33\xba\xe6\x3e\xbc\x83\x52\x78\x09\x77\x8c\xf5\x80\x9c\x8d\x3b\x4f\x65\x9b\x34\xd8\x73\x1d\xdc\xb8\x3c\x00\xb9\xb7\x91\x2f\x2a\x0c\x27\x08\x3d\x6e\x5c\xb1\x6b\x3a\x15\xe6\x9d\xa6\xa1\xe3\x40\xbd\xc8\x2e\xad\x77\x83\x08\x14\x9e\x0a\x96\x8f\x97\x18\x6f\x1b\xc7\xf0\xb8\xbc\xe3\xb8\xa8\xe1\x14\xbf\x47\xa8\xd7\x39\x8e\xdd\x79\xd9\x3a\x92\x64\x2a\x37\x8a\x26\xe7\x4a\x7b\xdb\x30\x8b\xd4\xee\x0a\x56\x0b\x7f\xaa\xa5\x56\xbb\x66\x24\x49\x09\x40\x61\x8e\xf9\x03\xd9\x84\x43\x8d\x71\xd6\x74\xa5\x43\x1c\x31\x34\x48\xf8\x3b\xed\x24\x14\xbe\x1c\x21\xe4\x6b\xf2\x48\x1e\x54\x9d\xd8\xb7\x35\xcb\xd5\x88\xf7\xc6\xd6\x8d\x35\x0f\x1d\xf3\x16\x4f\x54\x57\x0b\xde\xdc\xd1\x3e\xcd\x8b\x68\x1e\x14\xf4\xa7\x00\x14\x88\x75\x54\x85\xc0\xeb\x28\x0a\xd7\x7c\x17\xd4\xf4\xf5\xa9\xa3\xd9\x0c\xa1\x71\xd5\xcd\x8e\x07\xb4\x6c\x66\xde\xcb\x66\xa8\x0c\x1d\x06\x21\x50\xa4\x2e\x50\xc8\x07\x78\x2a\xa6\xb4\x78\x61\x87\xf6\x91\x3b\xab\x5d\x4d\xdd\x5c\x89\xba\xee\x78\x9e\x1a\x21\x5e\xde\xe8\x8a\x95\x59\x11\xa9\xdf\x2b\x35\xdf\x22\x00\x21\x2e\x2a\xf1\x8c\xc8\xbe\x12\x61\xbf\x6d\x34\x42\x55\xff\x8d\x02\x12\xaa\x42\xab\x0e\xf2\x6b\x46\x27\xd4\x3a\x1a\x36\xc0\x6c\x31\x96\x6e\xaf\x72\x7e\x6a\xae\x63\x44\x02\xba\xdc\x2a\xa5\x62\x5c\xa2\xec\x1f\x9b\x2b\x11\x23\x4a\x8b\x04\xc3\x62\x8a\x11\x6c\x04\xcf\x89\xeb\x27\xce\xd2\xb8\x3e\x03\xe7\xb3\x9f\x58\x8f\xdb\x64\xc8\x3f\xac\x9d\xa4\xdd\x73\x84\x97\xa1\xf6\xcf\xa6\xf2\x94\x67\x39\x31\x9c\x53\x9d\xc5\x3b\x2e\xfd\x96\x72\x06\x59\x4b\x0c\x32\xac\x4a\xd9\xf6\xa3\x02\x16\x55\x6f\x3d\x9e\xd8\x42\x78\x82\x0b\x43\xd0\x59\x37\xb1\xa3\xad\x71\x60\x9b\x2f\xb0\x0c\x25\x7d\x73\xe8\xb4\xb2\xad\xc2\x42\x67\x3f\x58\x2c\xe2\x4b\xe1\x29\xa8\x11\x61\x75\x6d\x33\x36\xbe\x05\x58\xcd\xb0\xc4\x1b\xd5\x5d\x33\x0f\x22\xfe\x8e\x66\x3c\x3a\x04\xcf\xad\x63\xef\x78\x26\xec\x6b\x85\xdf\x91\xe9\x7a\xc5\x63\x57\x49\xa5\xe0\xe2\xb0\xa9\x31\x5c\x06\xe8\x4a\xcd\xde\xc9\x2f\x2b\x6e\x8a\x48\x7c\x24\x3a\xa9\xb4\x98\xde\xad\xa5\x8b\x1f\xf6\xf9\xa7\x8c\x3d\x24\xcb\x02\x81\x47\xd9\x78\x19\x07\xd9\xfa\xfa\xfa\x7a\x75\xc4\x21\x49\x41\xbb\x77\x12\x73\x88\x6b\x7f\x5b\xc3\xed\x27\x7e\x07\x2e\xdb\xf7\xb7\xff\xf7\xb7\xff\x7f\xed\xdb\x7f\x71\xf5\xcf\x60\x65\x4c\x28\x7f\x24\x8b\xdf\x2d\x46\x85\xcf\xb2\xa0\xda\x10\x60\x6d\x30\x80\x98\x57\x41\xc6\x48\x99\xed\x60\xcb\xdc\x1c\x22\x23\xb8\x30\x9a\x4c\x68\x46\x93\x82\xd0\xe4\x2c\x87\x42\xa3\x2c\x3d\xcf\x69\xb6\x86\x1c\x7a\x9e\x47\x49\x98\x9e\x83\xc6\x02\x45\x7a\x20\x0f\x1e\x88\x9c\xfe\x3f\xdf\xbc\x7e\x55\x14\x0b\xe1\x2b\x96\x73\x4d\x33\x8d\xec\xf9\x61\x81\xf5\x89\x40\x05\xd1\x34\x49\x19\x23\x88\xa3\x84\xb2\x9e\x24\x69\x48\xd7\x90\x77\x30\xa7\x46\x35\xf0\x8b\x79\xcc\x46\x26\x36\xb6\x76\xb7\x69\x23\xd7\x1c\x93\xff\x7c\xf5\x7e\xdb\xa8\x6e\x96\x6d\xb7\xbb\xa5\xa5\xa4\xe4\xc0\x5a\x78\x27\x91\xe9\x9a\x44\x80\xfc\xc4\x44\x7b\x70\x8f\xc9\x9d\x69\xb3\x5e\x2a\x03\x08\xa3\x3c\xde\xf2\x67\x69\x5e\xf4\x48\x11\xcd\x69\xba\x2c\x7a\xac\xc2\xac\x07\x4a\xe6\xf3\x34\x13\x8f\xd1\x60\x33\x61\x70\x64\x8f\xc0\x7f\x57\x57\xa4\x2d\x88\x3d\x4e\xc7\x41\xcc\x12\x87\x4f\xbf\x79\xfc\x0d\x04\x96\xe5\x7b\x0f\xaf\x90\xed\x84\xe2\xd7\xd5\x15\xd9\x54\xd9\xac\x19\xb2\x07\xad\xa9\x34\xd9\x28\xd9\x53\xed\xd7\x0a\x4f\x8b\x8c\x2e\x20\x52\x1b\x3d\xb7\xa6\xcc\x92\x9d\x04\xe0\x7b\x74\x96\x11\x92\xd3\xf3\x34\x8d\x69\x90\x5c\xc3\x1d\x2b\xdb\x9f\xa5\x04\xa3\xb1\x2c\xdc\x32\xa2\x03\x9f\xd9\x96\xe1\xfb\x07\x63\x1a\xc9\x5d\x66\x07\xcc\x8b\x40\x56\x3d\x47\x35\xbf\x41\xe1\x84\xc4\x78\x18\xdc\x00\xea\x6c\x42\xb4\x78\x05\x43\x7e\xf5\x7e\x5b\xc7\x75\xe5\x92\x16\xc2\x3c\x9a\x08\x06\x63\x38\xbf\xb3\x2a\x32\xc6\xc3\xab\x04\x79\x58\xd6\x9a\x2e\x68\xd2\x69\xbf\x3b\xfa\xf0\x51\x86\xa2\xe4\x84\xc3\x3b\xb7\xbb\x86\x3c\xe9\xc1\xdc\x3e\x78\x60\x4e\xaa\x71\xe8\x5b\x82\x41\x4d\xfb\x79\x90\x47\x63\xd2\x26\x1b\xd0\x85\xe7\x4b\xc6\x1e\x50\x15\x1b\xa4\x3d\x54\x57\x85\xaa\x9e\x7e\x91\x8a\x47\x6b\xed\x51\x90\xd3\x27\x8f\xdb\xd6\xf8\xb5\x1f\xe9\x57\x34\x08\x69\xd6\x69\xef\x03\x5f\x8d\x7e\x0d\xf8\x69\x0b\xda\xe7\x23\xac\x28\xc4\xe4\x63\x9a\x14\x8f\xd8\x41\xbb\xdd\x23\x6d\x26\xf9\x47\x63\xa8\x62\xf0\x4b\x2e\xd5\x8e\xea\xc6\x4a\x4c\x59\x0d\xb9\xf2\x88\x23\x97\xc9\x18\x1d\xaa\x6d\x4d\xb2\xef\xe2\x79\x81\xae\xaf\xfd\xb1\xa5\xab\x48\x2f\xb7\x63\x0d\x4a\x5d\x9a\x4d\x72\x92\x66\x4c\x5a\x15\xc1\x8a\x81\x1e\xb5\x76\x5f\x63\x2e\x09\x3b\xf0\x20\x82\x47\x91\x89\x26\x97\xaa\x7e\x81\x64\xa9\xc8\xc7\x6e\xa2\x7d\xd6\x00\x07\x69\x92\x50\xf1\x6c\x41\x52\x98\xa6\x44\xe3\x72\x51\xb6\x2e\x03\x36\x7c\xa4\x17\x85\xd3\x41\x01\x8b\x5e\x6b\x08\xeb\x78\xb3\x5b\x55\x5d\x7a\x2f\xea\xef\xf8\x1a\xc4\xab\xa4\x79\xec\x60\xa0\x81\xa0\x86\x08\xf6\x15\xc7\xa9\xa0\x04\x91\xf5\xa3\x13\xad\x83\x14\x59\x34\x9d\xd2\x8c\x87\x18\x62\xb3\x0f\x62\x8b\xf2\x17\xca\x70\x50\x47\x30\xd0\x03\x1f\xd5\x98\x91\x82\x9b\xd0\x0f\x18\xaf\xec\x1a\xdc\x24\x01\xdf\xce\x79\x11\x14\x74\x3c\x0b\x92\xa9\x5f\x81\xc0\xcd\xfb\x25\xe2\x83\xf0\x12\x0c\xeb\xe1\x46\xf8\x31\xe3\x30\x36\xcb\x5b\x37\x23\xfd\x36\xa0\x18\x0d\x28\x6f\x95\x50\x08\x29\xfb\x32\xab\x86\xa2\xe0\x4c\xe6\xbd\xb5\x52\x37\x56\x2b\xd2\x16\xc1\x57\x5b\xf6\xc5\x96\xd1\x32\x3b\x0b\x5e\x5b\x28\xd6\x1b\x81\x8b\x59\xb3\xb2\xbc\xaf\x97\xde\x47\x5e\xaa\x83\x37\x0f\xb1\x90\xef\x96\x03\xd8\x5d\xa8\x62\x02\x62\xa5\xe1\x75\xa5\x2f\xcb\xe3\x4b\x46\xef\xfc\x6d\x29\x2c\x2e\x46\xd5\x25\x6b\x2b\xca\x45\xfd\xd4\x64\xa6\x4a\x08\x90\x0a\x4e\x5b\x18\x60\xe7\x87\xa4\x5d\x90\x49\x10\xc5\x34\xec\x93\x23\x76\x4e\x3b\x8f\xd8\xd9\x23\x80\xa8\x60\xe5\xab\x09\xb5\xe9\x99\x0b\x8d\x4f\xa5\xcf\x50\xd1\x27\xa2\x70\x48\xbe\x53\x7f\x52\xdf\xc7\x76\x9f\x6c\x31\x1e\x92\xf6\x76\x7f\x53\x29\x0f\xa5\xfe\xb1\x9d\xd0\xe2\x53\x1c\xe5\x05\x4d\xa2\x64\xaa\xb2\x95\xf6\xf0\xd4\x30\xe8\x92\x0a\xae\x8c\x87\x38\x73\xc9\x57\x5a\x15\xb2\x41\xea\x49\x70\xd4\x05\x78\xe8\x52\x55\x60\x9c\xf6\x99\x98\xdb\x1a\x3e\x65\xbf\x0c\xf9\xb9\x35\xdc\xfa\x96\x9d\xfc\x77\xee\x4f\xfe\xf7\x27\xff\xbf\xf8\xc9\x5f\x1b\xfe\xc3\xa3\xc5\x3b\x32\xfa\x57\x86\x9c\xf8\x54\x39\x8a\xa6\xdc\x06\xb7\xff\x0b\x3f\xa1\xf3\x7b\x90\xf0\x35\x9d\x98\x1b\x82\x8a\xf5\x78\x89\x1e\xce\x19\x1b\x27\x87\xe0\xec\xe2\x7c\xc6\x7a\xdf\x31\x0d\xb4\xbe\xe7\x85\xc9\x43\xb2\xed\xbe\xbc\x03\x8b\x3f\x26\xc5\x9b\xef\x0f\x89\xff\x45\x9c\x60\xee\xef\xc4\xa9\x2e\x48\xc8\xe1\xf3\xfd\xb7\x62\x92\x43\xf2\xdd\xb7\x64\x9c\xce\x17\x4b\x11\x67\x65\x74\x49\xe6\xe9\x59\x94\x4c\x51\x34\xb1\xc7\x64\x3c\x0b\x32\xd8\x0b\xf8\xcd\x6c\xc8\x4d\xa9\xa4\xb9\xba\x84\x8e\x29\x7f\xb4\x50\xa4\xac\x41\x8e\xab\x9c\x74\xf6\xc9\x1e\xd9\xda\xec\x91\xe7\xec\xff\xad\x1e\xe9\xf7\xfb\x3d\xf2\x7f\x64\x8f\xec\x7c\xd3\x65\x87\x1d\x92\x2f\xe8\x38\x9a\x44\x7c\x21\x1d\x7e\x38\xda\xda\x79\xb2\xf5\xc4\x36\x31\x8b\xf2\x14\xd2\xc5\x38\x5c\xaf\xb2\xd7\xfc\x4d\x2c\xeb\x08\x1b\xa0\x79\xb5\x86\x6f\x96\x85\x24\x15\x4a\x30\xe1\xda\xc0\xac\xdf\x98\x50\x56\x31\x9e\x47\x36\xa2\xf6\x7e\xbb\xcf\xd0\x72\x90\x86\x74\xbf\xe8\x6c\x22\xad\x35\x1b\x5b\xfb\xff\x9c\x6c\xce\x00\xf9\xbb\x5d\x20\xd6\x22\x3d\x5e\x2c\x68\x76\x10\xe4\x5a\x95\x8d\xb2\xf3\xe5\x28\x2f\xb2\xce\xe3\xae\x7c\x91\x2b\x12\x36\x7b\x8f\xad\x1b\x33\x9e\xbb\x88\xa3\xa2\xd3\x6e\x77\xcd\xc7\xca\x49\xd7\xb4\xae\x1a\xa7\x21\x1b\x5c\xe2\xeb\xbc\x94\x0f\x01\xe6\x87\x3d\xb2\xcf\x04\x42\xf8\xf8\x7e\x8f\xfc\x5f\xd7\x89\x01\xe0\x99\x59\x31\xb1\x06\xa4\x72\x2a\x1b\x52\xf2\x88\xec\x93\x0d\xb2\xb5\x89\xec\x8c\x7c\x7e\xf1\x65\xec\x51\xdb\x86\xe9\xba\xdb\xff\x25\x8d\x12\x36\x4c\xdb\x52\x71\xbc\x04\x8f\xbb\x30\xc5\x6f\x8e\x5e\x30\xc2\xde\xda\x94\x4c\x49\x58\xf8\x01\xe5\x7b\x28\xee\xdb\xcd\x27\x8f\x6d\x82\x9b\xa7\xe1\x77\xdf\x6e\x6d\x96\x11\x9a\x49\x5f\xda\x8f\x31\xa7\x26\x51\xb8\x92\x8a\x32\x3a\x0f\xa2\x84\xeb\x8e\x58\x9e\xbe\x7b\x14\xce\x5d\x4c\xf6\x20\x80\xb5\xdd\xf2\x76\xd7\x72\x5b\x03\xcc\x4a\x82\x29\x8b\xd7\xef\x0c\x13\x39\xdd\x24\xc8\xda\x87\x49\xc1\x3d\xe2\xf4\xc8\xd6\x66\x97\xfc\xff\x19\xd6\x36\x9c\x5a\xb8\x53\x1c\x61\x7e\xee\x7b\x81\xab\xea\x52\x25\x75\x7d\xc6\x3c\xd5\xbf\x43\xe2\x26\xe8\xb0\x0e\x84\xc1\x3f\x5c\xa8\x43\x82\x78\xeb\x20\xd8\xa7\x9c\x2f\xff\xe4\x0c\xb0\xb7\x6b\xff\x24\x08\x4b\x68\xbd\xe4\xdc\xae\x3a\x51\x66\xeb\xfa\x49\x21\x08\xd1\x72\x2e\x5f\xe7\x58\x44\xc5\x60\xf6\x55\x8e\xd3\xf7\x00\x65\x49\x31\x9a\x0d\xe1\x5a\xb1\x35\xac\x15\x63\x39\x7d\x54\x63\x9d\xcf\x80\x20\x7f\x2e\x7d\x06\xa0\x97\x0a\x22\x1a\x28\xd9\x7a\x82\x58\xd8\x28\xc8\xe9\xce\x13\xb2\x07\x65\xb4\x7a\x68\xe7\x89\x61\x02\x10\x86\x94\x6b\x16\x61\x0f\xec\xf0\x42\x3d\xb2\xf5\x8d\x29\x09\xab\x7e\x3e\x1f\x05\x49\x87\x17\x33\x99\x9f\xb5\x98\x85\x5b\x12\xb4\x70\x9f\xb3\xa1\x17\xa9\xb1\x7b\xb1\xe9\x23\xe0\x5a\x35\xbb\x94\x2b\x9a\x2b\x93\xc0\x5e\xf7\x1d\x8f\x05\x91\xa4\x85\x10\xca\xbe\x8f\x7e\x68\x4d\x41\x22\xe1\xee\x6e\x26\x1a\xa9\xf9\x2c\xe0\xd2\x1a\xec\x6f\x17\xe3\x78\x99\x47\x67\x2a\x74\x65\x34\x8a\xe2\xa8\x50\x02\xce\x28\x48\x3e\x0f\x46\x59\x90\x8c\x67\x24\xa7\xd9\x59\x34\x96\x1b\x60\xc0\x3d\xbd\xb6\xbe\x1f\x44\x3f\xf4\x6d\x1a\x52\x61\x24\x72\xb9\x0b\x4d\x68\xc6\xb6\xa1\x20\x9e\xa6\x59\x54\xcc\xe6\x24\xa4\xf9\x38\x8b\x46\x9c\x2d\x09\xf9\x87\x26\xfd\xf3\xe8\x73\xb4\xa0\x61\x14\x80\x10\xc4\xbe\x06\x87\x49\x41\xb3\x24\xe0\x4f\x27\x3e\x3d\x0f\x92\xcf\x9f\x84\x9b\xd9\x4f\x7c\x5e\xff\x7f\x3f\x89\x91\x26\xd3\x4f\x6c\x88\x9f\xe0\x2d\xd1\xa7\x30\x9a\x46\xce\x53\x0e\x39\x35\x3e\x8a\x1c\xc9\x3d\x55\xce\x80\xf4\x19\x53\xa4\x9e\x6d\xb6\x01\xad\x3e\xb7\x57\xe4\xc8\x62\x8b\x62\x46\x0f\xf8\x3e\xd5\xfe\xe7\xcb\xf6\xee\x9a\x97\x67\x0a\x1e\xdb\xb1\x76\xee\x0e\xae\x60\x83\xb4\x37\x41\x54\x82\x56\xb0\xb9\x0b\x43\xc7\x0b\x86\x0d\xb2\x47\x3a\x5c\x9c\xea\x7c\xf7\x94\x3c\xd2\x4d\x74\xe5\xb3\x81\x47\xdb\xd6\x7e\xab\xbc\x6e\x98\x4d\xa1\x3a\x45\x83\x35\x6a\x2b\xc1\x44\x10\xae\x80\xb0\x79\x00\xf1\x28\xc9\x8b\xa8\x58\x16\xd2\x59\x72\x14\xd2\xa4\x60\x9b\x96\xed\x78\x9f\xd7\x72\x98\x84\x51\x46\x4d\x03\x06\xf3\x8d\x4d\xde\x93\xb2\xac\x7a\x64\x03\xaf\xa6\x5a\xa8\xa5\x16\x34\xd5\xd2\x6d\xb5\x56\xe1\x45\x66\x4f\xbc\x0e\x94\xcd\x23\xb0\xc9\x19\xda\x2f\x3f\xbe\x62\xf3\x20\x5f\xb7\x60\x0c\xa0\x54\xd5\xb7\xae\xc5\xaf\xd3\x2a\x7e\x2d\x9f\xd2\x71\xe4\x8a\xe8\xdc\x51\xce\x5f\xca\x61\x3e\xee\xc8\x9d\xe0\x41\xa5\x54\xde\x54\x7b\x91\x47\xf1\x21\x15\x1e\xfc\x39\x1d\x6f\x49\x09\x9d\x87\xc8\x3f\x4b\xa5\x9c\x10\x61\x3f\x2f\x11\x27\x2b\x2c\xfc\x69\x27\x2f\xb5\xba\x72\x85\x05\xe8\x7a\xe9\xeb\x41\x3c\x66\x1d\x95\xc1\x3b\xaa\x1e\x49\x3d\x5a\x1b\x18\x1b\xd6\xd6\xb8\xa3\xb4\x28\x61\xf0\x9f\x7f\xbe\x3c\xd9\x7c\xf4\xdd\xe9\x97\xed\xeb\xce\xcb\x8f\xaf\xd8\xef\xfd\x47\xff\x77\xfa\x65\x6b\xe7\xfa\x4a\x7d\xec\x6c\xf6\x76\xb6\xae\xbb\xff\x33\xe8\x17\xa0\x04\x55\x1b\xb8\xf1\x2e\xaf\x8c\x31\x20\x70\xfe\x3c\x6f\x6b\x45\x84\x89\x27\x98\x70\xfa\xf7\xa2\xed\x85\x5e\x82\x77\x83\xb7\x17\xee\x4a\xb2\x10\xa7\x07\x85\x1f\xf7\xec\x3c\x86\x38\xff\xfe\xbc\x6f\x6e\x38\xec\x09\x89\x92\x92\x81\x1b\xdc\xe7\x6e\x86\xee\x65\x23\x8d\x06\xbf\xbd\xd9\xc8\x6a\x93\x8b\x94\x6c\xa4\xf9\x72\xce\x00\x8f\x73\x71\x7c\x98\xa7\xe1\xa3\xef\xbe\x7d\xb4\xb5\xa9\xb2\xe1\x8c\x0b\xbd\x1b\xa7\x31\xe9\x1c\x7e\x38\x1a\x1c\xbe\x3c\x20\xec\xdc\x30\xdc\xde\xdc\xdc\xe9\xda\x3c\x19\x55\xeb\x9e\x42\x51\xae\x33\x70\x99\xd7\x70\xd8\xe2\x4c\xb8\xdd\x23\xdb\xcd\x6c\x55\x31\x53\x35\xb6\x14\x42\xa7\x7d\xf2\xcf\xf7\x2f\x7f\x72\x1c\x09\xaa\x02\xfe\xd1\x94\xd6\xe8\x4e\x2a\x82\xac\x1b\x9e\x26\x80\x0e\x78\x99\x73\x86\xfc\x6d\x8f\x3c\xee\x92\x21\x69\xb7\x1b\x8d\x7b\x1c\x47\xf0\x90\x4c\x75\x10\x94\x4f\x51\x62\x8f\x8f\x61\xe1\xa7\xfd\x7f\x1c\xfd\xf8\xaf\xa3\xf7\xff\x6b\xcf\x2a\xd4\x51\x32\xa7\x76\xfd\xde\xc9\xe5\x40\xb7\x1e\xfb\xd6\xd6\xea\x23\x17\xab\xc9\x7f\x2e\x71\x0f\x1e\xee\xd0\x9c\x0a\x9c\xe1\x05\x9e\x73\x08\xbe\x77\x12\x83\xf3\x39\x20\x33\x0e\x1d\xee\x80\x1f\xa3\x43\x6c\xe9\x51\x46\x9e\x3f\xd4\x29\xc5\x38\xa1\xf2\x33\x8a\x79\x9e\xd9\x7a\xd2\xed\x91\xed\x4d\xe5\xe2\xcc\x90\xf2\x24\x7a\xad\x41\xca\xc2\xcd\x16\x68\x89\x57\xaa\x43\xc8\xe2\x4a\x7d\xac\x57\x6c\x0d\xcd\xcf\xeb\xd3\xde\xce\xe3\x7b\x35\xfe\xbd\x1a\xff\x2f\xae\xc6\x17\x2a\xfc\xc5\xb8\xda\x7e\xef\x16\x16\x77\x2d\x1d\xa2\xb0\xb5\xbb\x52\x68\xb5\x1a\x3b\x3d\xae\x67\x5a\x8c\xbd\x96\x60\x8b\xa0\x98\xf5\x48\x42\x0d\xeb\xef\x4f\xa0\xb9\x70\x1e\x9e\xca\xab\x6a\x1c\xdc\x59\x7a\x2d\x10\xf6\x3a\x60\xe3\xc3\xfe\xe3\xa9\x3a\x6b\xac\x6e\x78\x81\x2b\x16\x32\xa1\xf3\x85\x41\x0f\x75\x79\xe5\x8a\xd2\x2a\xd6\x4f\x93\x4e\x1b\x46\xd5\xc6\xc1\x50\xbb\x86\xfd\x74\x9e\x32\x26\xc6\xdf\x12\x1e\xbe\x3b\x20\xfa\x5e\x99\xbf\x30\x6c\xf7\x08\x45\xac\xf7\x13\x67\x83\xe2\xc2\xbb\x63\x3b\xc3\xf4\xf6\x20\x09\x71\xfb\xa8\xf9\xd2\xca\xc8\x9a\x7a\x63\xf0\xfa\xf0\xc3\xc7\x97\x6f\x61\x05\x1d\x1c\xbd\x7d\xfb\xf2\xe0\xe3\xe1\xd1\x5b\xf2\xfe\xe5\x87\x77\x47\x6f\x3f\xbc\xfc\x50\xda\x6a\x18\x14\x01\x6e\x96\x7d\xe3\xcd\x69\xf0\x50\x98\x11\xce\x83\x8b\x71\x3a\x5f\xc4\xf4\x22\x2a\x2e\x87\xe4\x09\x50\x96\xd5\x43\xd0\x85\x2a\x3b\x04\x56\x95\xde\x6f\xba\x9e\xc8\x35\xc2\xe6\xe0\x8b\x19\xc8\x1a\x0e\x7e\xa1\x6d\x3b\x21\xba\xc3\x03\x7c\x03\x7f\x09\xc9\xf9\x2c\x1a\xcf\xc8\x3c\x28\xc6\x33\x21\xbe\xf2\x4d\x88\x31\xb4\xd0\x28\xe7\x89\x5a\x00\x4d\xfb\x23\x5d\xc3\x75\x94\xd3\x5b\xb0\x40\xf0\xc7\xa1\x8d\x26\x9d\x4f\x7e\x42\x3e\x81\xb7\x71\x51\x78\xea\xba\x43\x57\x85\xd9\x58\x05\xd8\xae\x03\x65\xc7\x0c\x2f\x8d\xa5\x0b\xd5\x88\xbe\xdb\x15\x5d\x3b\x58\x9c\x44\x19\x35\x3c\x02\xd8\xe8\x2a\x1b\x0f\x1b\x8a\xa7\xf5\x0a\x70\x1d\xd8\x17\x9b\xb6\xe8\xbf\x90\xc6\xb4\xa0\x55\x35\xd8\x83\xb1\x71\x83\x5f\x61\xff\xcc\x76\x2d\x20\x44\x41\x10\xbc\x3e\x50\xee\x70\x5b\xa9\x84\x3b\xcb\x21\x29\x77\xb5\x1c\x15\xfd\xb5\x35\x29\x0c\x9a\x24\xbc\x66\xab\x3d\xe0\x45\x26\x13\xfe\x34\xcf\x43\xe2\x91\x59\x18\x7b\x56\xc5\xab\xca\x66\x83\x3d\x4b\x5e\xfb\x07\xf7\x6c\xae\x1d\xf4\xca\x25\xfe\xe2\xe5\xa3\x83\x57\xc7\x6f\xff\xf7\xe5\x7b\x55\x4f\x48\xc7\xb3\x65\xf2\x99\x86\xe2\x55\x09\x7f\x31\x2a\xfe\xfa\x19\x5d\xc4\xc1\x98\x76\x06\xff\xbe\x3e\xf9\x77\xf2\xef\xec\xf4\xd9\xbf\xbf\x0c\xa6\xbd\xf6\xf5\xd5\xa3\x47\x57\x5f\xda\x5d\x70\x2d\xfc\xc5\x0b\xff\xef\x53\x59\xe2\x44\x94\x39\x65\x85\x4e\x64\xa9\xd3\x13\x7f\x39\xbb\x94\x51\xa8\xa4\x8c\x6e\x0b\xb5\xa4\x1a\x42\x65\xc4\x35\x1f\xcb\x6e\x4b\x4e\x6a\x60\xc0\x5d\xb3\x80\x78\xc4\x5f\x06\x03\xb8\x03\xa5\xc2\x1d\x06\x78\xda\x80\x0a\xd6\x1c\xd2\x67\x79\x07\x2c\xcb\x5c\xb9\xc2\xef\x8c\x05\x43\x36\x08\x7f\xff\x6a\x88\xea\xea\xce\xda\xe2\x64\xae\x53\x03\x9f\x2d\x18\xf4\x1d\x95\x12\xd6\x34\xdc\x98\x66\xcd\x5d\x7c\xba\x33\x7b\x76\x67\xc4\xd0\xc1\xe7\xae\xb2\xa0\x06\xd7\x77\xc9\x98\xc6\xe0\x50\x5f\x3e\xe2\x34\xca\x8c\x63\x1a\x64\xd2\x84\xcb\x6a\x45\x24\x5b\x0b\xda\x0f\x04\xbe\x1a\x0a\x59\x91\x6f\x8f\x33\xcb\xdb\x7b\x1d\xfe\xab\xb4\xab\x14\x38\xc3\xf0\xd7\x3d\xb2\xb5\xb9\xb9\x49\x1e\xf2\xcb\x19\xcf\x5d\xab\xd7\xf1\x03\xbc\xdb\x03\xec\x48\x7c\x31\x0e\x92\x53\x41\x2f\x3c\x16\x8b\x78\xd7\xb7\x3a\xaa\xdc\x19\xb3\x48\x04\xc2\xfd\x08\xcb\x4a\xa7\xc3\x9c\x45\xf0\x80\xf0\xa6\xdd\x9e\xa5\xad\xc7\xe0\xc2\xf9\x0f\xe3\x91\x3f\x89\x2d\x34\x08\xc3\x1c\x47\x0a\x17\x56\x0e\xae\x34\xc6\xd5\xc3\xbd\x35\xbe\xe1\xca\x83\x81\x38\x6b\x47\xdc\x5d\xbd\xe0\x7a\xb0\x1b\xcb\x5b\x21\x95\x7a\x18\xf2\x52\x41\x96\x45\x67\x14\x33\xdc\x20\x54\xb3\x27\xdb\xab\xe0\xb0\x1e\x68\xc3\x5b\xbd\xdf\xa6\x14\xc9\x14\xf2\xb5\x7a\x14\x92\xab\x2b\xf9\x75\xb2\x79\xaa\xb6\x4c\xb8\xc2\xe6\x7d\xd3\xd0\x22\xc1\x2c\xc1\x13\xb1\x44\xe7\xdd\xbc\xc8\x9e\xea\x4d\x95\xc4\xcb\x40\xfb\xaa\x61\x59\xb7\xdc\xd5\xe4\x3a\xc2\x2b\x95\x9c\xcf\xa8\xf4\x3b\x10\x72\xb1\x1c\x4e\x5f\xa0\x71\x67\xfb\x7b\x88\xd0\x2c\x88\xb8\x02\xb5\xae\x7d\xa7\x3a\xda\x4f\xd2\xac\xc3\xf0\xf2\x99\x5e\xf2\x93\xa2\x6f\x00\xa6\x13\x98\x8e\x1f\xa8\x3f\x0b\xf2\xa3\xf3\xe4\x1d\x04\x5a\x2a\x2e\x21\x76\xa1\xc5\x05\x4a\xd0\xf3\x99\x5e\x9e\x96\xdb\x76\xb6\xd3\x84\x1c\xbe\x3b\x68\xdb\x41\xfc\x85\x6c\x51\x51\xa7\x63\x66\xa1\x97\xc9\x01\xf6\x41\x28\xdc\x8d\x13\x74\xdc\x88\x72\x92\x17\x11\x0f\x46\x12\x85\x88\xa8\xb1\x59\x68\x29\xc2\xfd\x76\x9c\x9d\xf2\xd3\x92\x94\x03\xd8\xee\x91\x51\xd1\x8f\x1e\xa7\x02\xb3\x57\xd3\x34\xa1\x42\xf3\xd4\x59\xff\x64\x8b\xfd\xe7\x59\x54\x80\xbf\x14\x8b\x1b\x21\x10\xeb\x08\xf5\xc9\x3d\x43\x49\x17\x83\xeb\x65\xb5\x0b\x05\x92\x77\xe8\x55\x2f\x08\xd6\x30\xfd\x58\xf5\xd2\x0f\xe8\xe9\x0a\x31\x36\xd9\x5d\x83\x73\xaf\x80\x22\x89\xa6\x7a\x2c\x11\xcf\x11\xaa\xf6\xac\x29\x7b\x19\xa2\x67\xbf\xbe\x51\x55\x58\x3c\xdf\x4c\x6c\x50\x54\x8d\xa5\x06\x73\x28\xb5\xfb\x28\xb1\xfe\x7c\xfb\xa4\x65\x76\x27\xb4\x89\xd6\x19\xc5\x71\xc7\xf3\xaf\x74\x09\x56\xd6\xfa\xb5\x59\xab\xbd\x61\xb3\xdb\x8d\x76\x8b\xe4\xd8\x30\xbb\x8f\xed\xb4\x35\x1f\x84\x17\x5b\x69\x41\xf2\xe5\x62\x91\x66\x05\xe8\xd6\xf8\x4d\xed\xbb\x03\xa2\xb4\x2a\x6d\xc3\x11\x64\x39\x61\x36\x7e\xa9\x70\x93\xc5\x58\x4f\x65\x2b\x51\x98\xf7\x58\x0f\x34\x55\x69\x41\x8f\x1c\xea\xda\xbb\x69\xa9\xb7\x1b\x57\x8f\xab\x31\xe8\x38\x69\x2f\x79\xa5\x7d\x7d\xda\xdb\xf9\xe6\x5e\xa5\x7b\xaf\xd2\xfd\xaf\x50\xe9\x8a\x87\x15\xb7\x7a\x8e\xbd\x1f\x64\x69\x42\xfe\x77\x39\x0f\xce\xa2\x9c\x7c\x1f\xb0\xcf\xbf\x7d\xe6\x9f\xfd\x39\xf5\xaa\x7b\x07\x03\x72\x98\x44\x45\x14\xc4\xd1\xaf\x94\xfc\x9d\xf7\x82\x11\x6a\x40\x72\xb0\xc4\x92\x06\x37\x30\x50\xb6\x54\x0d\x27\xe7\x7d\xd0\xea\xca\x62\x32\x8a\x88\x08\x40\x75\x18\x0e\xc9\x66\xdd\xcd\x1b\xb7\xf6\x60\xc3\xb7\xdd\xea\x7a\xcd\x4c\xbc\xee\x74\xf5\x2b\x34\x19\xeb\x6a\x22\x11\x0a\x2d\x69\x83\x1e\x8f\x13\x5e\xfe\x3a\xa5\x87\x54\x3d\x13\x59\x8d\xcc\x92\xbe\x77\xbd\x6e\x88\xd0\x08\x58\x7b\x4e\xef\x07\x6b\x02\x3d\x25\xae\x78\x79\x5b\x3d\xd1\x98\xe1\x34\x95\x67\x75\xcb\x54\xcb\xb2\x49\xc7\x98\x47\x99\xed\xae\xb7\x51\xd4\xa9\x20\x3c\x63\x67\x54\x39\x3b\xe4\xf0\x05\xe4\xc8\xde\xa9\x49\xdb\xd8\x28\xf3\x33\xe4\x7f\xfd\xc3\xdf\x0a\x39\xd5\xe8\x6c\xf9\x3c\x48\x8c\x54\xa5\xcb\x77\x41\xfc\x7f\x76\x60\x92\x2f\x84\x9a\x1b\x5e\x48\x1c\xa8\xc3\xa3\x34\x20\xf2\x9b\xea\x28\x65\x5d\x5d\xe4\x33\xcf\xcb\x6c\xab\x01\xbf\x79\x86\x44\x83\xd5\x9e\x15\x10\x99\x27\x5a\x97\xa1\xdc\xa7\x0f\xd2\x39\x0b\xa0\x67\xaa\xed\x3e\x3d\xa3\xd9\x65\x47\x7a\x43\xfe\x10\x25\xd3\x98\xbe\xe1\x08\xef\x92\x21\xf1\x66\xe8\x9a\xc4\xb4\xaa\x8e\xf8\xc1\xc5\x04\xaa\x83\x96\x12\xde\x25\xdd\x20\x0b\x22\x99\xc6\x29\xd2\xb0\x2d\x12\x19\x72\x7e\xf6\xf6\xf6\x38\xd5\x60\x20\xe1\x76\x41\xc2\xb2\x33\x37\x03\xe3\xd7\xba\x6d\x5f\x75\x42\x86\xb5\x7c\x4a\x0e\x06\x3c\x34\x9f\x4a\x12\x5e\xd9\x31\x73\x91\xeb\xb1\x91\x3f\x79\xce\x88\x46\xf0\x1e\xad\x86\x1d\x3d\x67\x40\xe5\x2e\xbe\x45\xc7\x2d\xfe\xc2\xeb\xca\x39\x53\x15\x55\x49\x01\x27\xec\x82\xf2\x48\x2c\x8a\x8e\xe4\x3d\x5d\x32\x89\x68\x1c\x5a\xa6\x07\xa2\x15\xa3\xa7\x16\xcf\xc1\x1d\xb4\x18\x0f\xef\x9a\x45\x86\x32\xd9\x8a\xfa\x20\xc9\xc2\x75\x84\xe5\xb0\x37\x09\xdb\x97\xac\x4d\x7e\x0b\x16\x67\xea\xe1\x1d\x59\x51\xd4\x27\xe4\x44\x26\x06\x3e\xb9\x17\x03\xef\xc5\xc0\xbf\xb6\x18\xa8\xdf\xe7\xf1\x45\x73\x57\x2f\xf4\xee\xe6\xee\x9e\x81\xbc\x91\xea\xc6\x52\x63\x65\x38\x27\x8a\x48\x2d\xd2\x0a\x99\x7d\xa2\x53\xa4\x70\xb9\x26\x73\xd9\xa7\x71\x71\x0f\x3c\x4f\xe7\x6b\xc9\x60\x13\x81\x81\x4f\x7e\x1c\x94\x50\x1b\x42\xe3\x0c\x54\x82\x7b\x7a\xf6\x15\xb1\x72\x0c\xa5\x2b\x68\x0c\xde\x04\x49\x30\xa5\xfa\x75\x3e\x63\x59\x1c\x15\x86\x2a\x40\xba\xf0\xd0\xe0\x68\xbf\x9f\x1b\x18\x72\x2a\xce\xe6\x35\xf6\xef\x21\x65\x1c\x26\x4a\x4c\xff\x9e\x96\xf8\x37\x0a\x72\xee\x73\xa1\x2c\x12\xc5\x94\x82\x97\x4a\xcf\x26\x65\x7a\x9a\xb7\x1d\x8b\xca\x36\xcd\xf6\x80\xc4\x1c\x44\x88\x36\x4a\x63\x4d\x18\xee\x44\x51\xf8\x1c\x45\x1c\xca\x8e\x4f\xfa\x32\xcc\x99\x60\xa3\x52\xea\xdc\x1c\x73\x67\x9c\xfa\x92\x42\x84\xe6\x10\xdb\xae\x1a\x67\x9f\xbc\x61\xac\x3c\xa2\xb9\x08\x22\x0d\xf8\x70\xbc\x50\x1a\x9e\x3d\x1b\xe3\x4d\x0e\xea\xea\xed\x32\x8e\xb5\x63\x8c\x1e\x93\x22\xe9\x45\x04\xd7\x66\x3e\xdc\xfd\x31\xe3\x0f\xdd\x59\xd8\x1d\xb2\xf6\xb5\xe2\xee\x38\x98\x6c\x14\x6d\xc7\x0e\x70\xa2\x42\xc9\x98\x07\x31\x52\x13\x3e\xe6\xfd\xbb\x03\x11\x61\xa2\x3a\x76\x8c\x46\x9b\x70\xf5\xca\x09\x0f\x90\xae\x4e\x9c\x36\x9a\x38\xe8\x21\x83\x74\xb1\x64\x10\x9d\x4a\xf2\xa0\x03\xd5\x52\x89\x8d\x75\x0f\x77\x2d\xa1\x20\xdf\xe3\x46\x4f\x69\x4b\x86\x54\x4e\x17\x7b\x04\x82\x64\x57\x85\x90\x22\xcf\xf4\x6f\x4e\xdd\x50\xe4\x94\xb1\x03\xf4\x59\xe3\x59\xdf\xc1\x3a\xe7\xf7\x2a\x7a\x2d\xc6\xbc\x8b\x78\xee\x80\xb7\xfa\xac\x68\xba\x23\x2e\xc1\xbd\x27\x46\x8a\x19\x2c\x17\xa3\xd0\xde\xac\xc0\xd9\x0c\x1c\x7b\x9e\x79\x01\x54\x55\xde\xd8\x24\x02\x17\xbe\x90\x45\xf2\xfd\x94\xa4\xc3\x15\x22\x17\x05\x72\xdd\x36\x42\x42\xb3\x18\x44\xd8\x1d\xab\xd8\x47\x6c\x2f\xc9\x2b\x3b\x5f\x16\xf2\x04\x00\xa3\x65\x80\x01\x21\xcf\x08\x30\xa4\x8e\x29\x7e\x2d\x88\x54\x67\x80\x66\xa9\x44\x99\x51\xe5\x56\x19\xab\x38\x1c\x54\x49\x17\xb9\x1c\x9f\xa6\xb4\x35\xfa\x05\xa3\x8b\x65\xc8\xa1\x8d\x96\x51\x1c\x02\xc2\xc4\xa0\x58\xa6\xe3\xdf\x16\x18\xfe\xc7\xa3\x17\x47\xeb\xeb\xeb\x20\xde\xb7\x73\xb2\x9c\xc6\x97\x7d\x11\x45\x8c\x1d\x08\x96\x39\xdb\x13\x0b\xd5\x4a\x82\x5c\xca\xb2\xdf\xd2\xae\x46\xdd\x90\x30\xc6\x01\x19\xea\xbd\xf5\x96\x11\xe9\x69\xf4\xcb\x09\xcb\x3e\xd9\x3c\x3d\x65\x62\x17\xfe\xbc\xba\x52\x76\x9b\x36\x28\xff\xb1\x05\x65\xd8\x58\x76\xfd\x57\x45\x56\xed\x00\x49\x10\x17\x76\xd0\xab\x10\x55\x76\x8b\xaa\x2e\xd5\xb5\xd1\x29\x0f\x81\x92\xf8\x9f\x65\x11\xc7\xcf\xb7\x90\xdf\xf5\x69\x78\x15\x3f\xd0\xc4\x8a\x60\xe1\x0b\x55\x60\x9c\xd5\xa1\x2d\x53\xa2\xd4\x17\x53\xfa\x7e\xc6\x88\xc5\xa2\xcc\xeb\x3c\xa6\x79\x76\xc3\x1c\x5e\xb4\x83\x99\x99\x32\x8a\xb4\x0c\x68\xbc\xe1\x54\xcc\xee\x1a\xd5\x94\x0f\xc1\xbe\x86\x12\xa4\xc2\xb2\x9a\x7a\x7a\x96\x61\xae\x68\x52\xef\xce\x51\x72\xc8\x65\x46\xe1\x86\xf4\xfd\xbb\x03\xe5\x81\x89\x9b\xb2\x8c\x83\x44\x09\x9b\x51\x22\x94\x2e\x7e\x5f\x4f\x99\xeb\xeb\xb1\xdf\xef\x5f\xe3\xf8\x6e\xb6\x2f\x3d\xad\xc9\x94\x45\x3d\x9c\xb4\xce\xa7\x7d\xa9\xbb\xf9\x55\x88\x50\xd2\x80\xe9\x93\x1e\xcf\x5a\x19\xa2\x45\xc9\x12\xc5\xce\x1b\x69\x03\xd3\xf4\xfa\xef\xdb\x7b\xbd\xcf\xbd\xde\xe7\xaf\xad\xf7\x11\x4a\x9f\x70\x74\x8b\x9b\x3f\x9f\xde\x47\x69\x6b\xb0\xe2\x87\x33\x27\xa5\xd1\x79\xf1\xdc\xe0\x23\x6c\x18\xa6\xcb\x0f\x47\x53\x01\x23\xb5\x92\x77\x2b\x02\x85\xad\x69\x79\x29\xef\x78\x6c\xfa\xc5\x05\x17\xf9\x42\x2c\xe9\xca\x92\x83\x3a\xac\x66\xb4\xb3\x08\x20\x47\xed\xd2\xf1\x75\xd0\xd2\x37\xeb\x5d\xbe\x3c\x60\xd1\x62\x59\xa8\xc7\x6b\x09\x3d\x17\xd8\xec\xe8\xed\x92\x09\x1d\x43\xd2\x56\x70\x56\x1c\x8d\x21\x69\x87\xa3\x4f\xbe\x5c\x29\x26\xee\xa8\x3e\xa9\x46\xa7\xb4\x59\xa3\x0a\xce\xdb\xa8\x2f\x57\x36\xba\xed\x36\xba\x58\x16\xaf\xe8\x45\xfd\x30\x5f\xd1\x8b\xb2\x31\x9a\x59\xd5\x03\xac\x6f\x8b\x03\x95\x0d\xcd\xdf\x96\x35\x2e\xb1\x19\x9d\x68\x38\x39\x11\x3d\x8d\xe4\x9e\x18\x7a\x4f\x74\x0b\x80\x4f\x4b\x76\xae\x17\xcf\xf5\xae\xc5\x69\xa7\x35\xdc\x81\x2d\xea\xe9\xfd\x16\x75\xbf\x45\xfd\xb5\xb7\x28\x7d\x35\x41\x8b\xd9\x8d\xee\x25\x04\xf0\xdd\xbe\x4a\x2c\x89\xfe\xef\x0b\xff\xef\xbb\x04\xf1\xdf\x83\xd4\x6c\x9b\x0c\x44\x9a\x23\x5b\x40\x0b\x91\x2c\xc1\xc6\x65\xed\x8d\xd3\x64\x12\x4d\x25\x18\x0a\x85\x83\xa1\x65\x64\x15\x09\x76\x2e\x9e\xad\x19\x17\x34\x22\x51\xc2\xfc\xc8\x43\x81\x5b\xc8\x80\x44\x09\x72\x98\x7f\xb8\x4c\xc6\x7c\x8b\xc1\x50\x39\x4f\x95\x60\x8c\x15\x67\xd4\x06\x12\xa9\xaa\x2e\xee\xa0\x08\x43\x44\xa3\x20\x91\xd9\xdc\xeb\xa1\xd3\x1f\x99\xac\x84\x10\xf0\x99\xd6\xe4\xce\x40\xe9\xbc\xc5\x1b\x41\x50\x02\x6e\x9e\x76\xc9\x83\x07\x44\xfc\xee\x83\x4e\xf0\x68\xd2\x69\x6f\x5e\xb4\xb9\xeb\x92\xcd\x2e\x79\x46\x5a\xb4\x98\xb1\xdd\x03\x02\x93\x3e\xbf\x7c\x15\xe4\xb3\x16\x19\xda\xc9\x5c\xa3\xdb\xd2\x52\x02\x8a\xff\xf4\x63\x96\xce\x9f\xff\x06\x3d\x6d\x8b\x2e\xa1\xb0\x42\xcf\x2f\xa1\x61\xd6\xe9\xfd\x24\x3c\x64\xe5\x54\x34\x2f\x2f\x24\x1f\x87\x82\xd5\xe3\x59\x26\xe3\x98\xfe\x46\x03\x38\x66\x6d\xd5\x74\x1d\xc3\x94\x76\x5a\xce\x0f\x1a\xe7\x41\xba\x4c\x1a\x5d\x33\xdd\xc1\x38\xbc\x6d\x73\x12\xc2\x43\x29\x01\xe3\xa3\x72\xa6\xe0\x37\xec\xff\xb1\x6a\x10\x4d\x86\x33\x09\x18\xc0\xe8\xb3\xea\xde\xcb\x62\x76\xd7\x07\x84\xc6\x87\x83\x3b\x3a\x1b\x40\x00\xe0\xf2\xb3\x01\x57\x7d\x70\x2e\x1e\x51\x6f\x8f\x16\xb8\x33\x8b\x9a\x7e\x2c\x6e\xd0\x05\x74\xc7\xcd\xb9\x2b\xf7\x7f\x41\xb0\x87\xee\xc3\xe7\xfb\x6f\xad\x60\x64\x82\xa7\x72\xad\x0c\x7f\x40\x2b\x74\x33\xd7\x6b\x6b\xbc\x77\x7d\x6e\x19\xa5\xde\xd2\xbc\x2c\x66\x5a\x1b\xd4\x23\x6d\x1c\xba\xb9\xdd\x13\xc3\x9c\xd2\x62\x58\xa2\xf3\x94\xbe\x4a\xfb\xb8\xa0\x18\x49\x4f\xe8\xe9\x8c\xc2\x67\x41\x6c\x44\x19\xeb\x5b\x81\xb3\xcf\x82\xd8\x71\x46\xa2\xd2\xae\xd7\x00\x3d\x2b\x0d\x45\xf8\xf9\xbb\xc9\x60\x44\xd1\x9b\x0c\x47\x14\x6d\x38\xa0\x26\x67\x51\xc6\x5d\x82\x18\x2c\x37\x6b\x4f\x4e\x02\xd0\x3d\x3d\x49\x36\xe5\xe4\xab\x23\x14\xb2\xe6\x34\xae\xf0\x86\xe4\x44\x0b\x54\xfc\x7a\x4f\xb8\xd1\xfc\x51\xdf\xe6\xd9\x10\x38\xf2\x39\xe7\x27\x0a\x18\x85\x8e\xb4\xee\xb1\x86\xb8\x1a\x9e\xa7\x7c\xd6\x28\xa0\x92\x63\x73\x9a\x05\x53\xba\x5f\x34\x39\x39\x0b\xd0\x52\x1c\xf9\x20\xd4\xa1\xb6\x02\x4b\x7c\xdd\x71\x8e\x5d\xa4\x70\xb2\x5c\x05\x2d\xde\x81\x09\xe7\x8e\x35\x63\x62\x50\xa5\xc3\xb1\x32\x7f\xfb\xf9\xf6\x0e\x4c\xae\xfa\x3a\x7a\xe6\xec\xc8\x1a\x9a\x12\x19\x6f\x37\x2c\x5f\x6f\x7b\xce\x12\xd7\xf6\xaf\x6c\xf1\x92\xeb\xd5\xe8\x97\x35\x51\x4d\xbb\xb0\xff\xd6\x63\x02\xc0\x1c\x4c\x28\x89\xee\x6b\x60\x02\x91\xf2\x2d\x06\xdd\x5b\x2b\xa1\xec\xf9\x22\x8a\xf9\xe1\xad\x96\xbc\x05\x68\x05\x8d\xbb\x10\x12\x0f\x9b\xe5\xf4\x67\xcb\x6b\x0d\xe9\xd1\x2e\xe6\x74\xab\x4a\x64\x75\x3b\xb8\x75\xcb\x89\xaa\x9a\x1b\x39\x85\x2f\xe8\x38\x9a\x07\x71\x39\x2a\xb4\x14\xd8\x10\x09\xba\x40\x09\x51\xfe\x71\x07\x6c\x0a\x4f\x35\x83\x2d\x8f\x95\x5c\x72\x04\x03\xf9\xba\x72\xd0\xf5\x2b\x08\x55\x58\xcd\x3c\x3e\x7a\x4e\xa8\x2b\x8d\x49\x95\x72\x06\x57\x76\xf8\xfd\x23\x71\x9a\x9b\xe0\xe9\x3d\x1d\xd3\x68\xd1\x80\xcc\xdd\x32\x4d\x08\xc0\x05\xbd\x2d\x05\x88\x1a\x1b\x0f\xb0\xe1\x2a\xae\xe5\x62\x9e\xc1\xd9\x80\x4d\x28\x80\x8b\x45\x77\x24\x20\xd6\x2e\x6f\x76\x40\x7a\x1f\x9c\x37\x5f\xe2\x6e\x01\x3f\x22\x2a\xe1\x9a\x70\x36\x86\x07\x8f\x2c\xe4\x86\x96\xae\xeb\x6d\xa3\xae\xde\xbc\x9f\xf6\x4c\xf9\xd6\x98\x6f\x1c\xd1\x34\x59\x61\x1c\x26\x74\xc9\x38\x4a\x81\xbe\xf2\x38\x1a\x74\xbe\xbc\xc7\x77\x2e\x6b\x97\x10\x8e\x30\xee\xaa\xea\x28\x04\xfe\xf7\x76\xd4\xca\xb9\x49\x47\xd9\x5e\x70\x67\x27\x02\x33\x42\x7a\xd5\x98\x10\xa4\x7f\x68\x7e\x80\x9b\x50\x8c\x31\xc2\x5b\x71\xa5\x31\x97\x4f\x65\x5c\xf3\xba\x69\xe3\xd0\x7d\x19\xec\xbc\x64\x0a\xcd\x3a\x7d\x63\x2d\xed\xc8\xeb\xd7\xaf\x1b\xf6\x21\x2e\xa5\x20\x55\xd3\x4a\x2d\x7f\xa0\xd9\x82\xd6\x6e\x4f\x0a\x03\x1c\xba\x1a\x01\x0e\x4c\x45\x2f\xf2\xe5\x68\x1e\x15\x3f\xa7\x59\x9d\x94\xa4\x01\x4b\x56\xba\x2f\xbf\xfa\xea\xbb\x41\xab\x02\xaa\x74\x2b\x2e\x69\xcf\x3a\xe2\x38\xd7\xdf\x5a\xf1\xd3\xc3\x69\x4a\xd1\x61\xa4\x1e\xa4\xa1\x09\x06\x4b\xd8\x48\x01\xd9\xdf\x2a\x24\x0e\x60\x6e\x49\x5b\x7c\x70\x21\xf4\x51\xc2\xc8\x43\x05\xcb\xd2\x95\x60\x5a\x06\x20\x64\xa7\xb2\x6c\xab\x51\xd3\xa0\x17\x31\x12\x9d\xe8\x8a\x01\x28\xcf\xdc\xaf\xcc\x42\xa5\x25\x50\xf3\xe6\x8a\x76\x32\x5e\xbf\x7e\xed\x02\x73\xea\x47\x55\x2a\xc2\x34\x06\xcd\x12\xe0\x5b\x58\x38\xf0\x98\x6c\x4a\xd9\x5d\xe5\xa3\x59\xd3\x11\x23\x5d\xa5\x75\x35\x4d\x47\xd5\xc2\x8d\x92\x51\x90\x63\x45\x85\xe8\x00\x30\x4a\xb1\x6e\x05\x8c\x02\xb9\xee\xf6\x56\x68\x63\x1e\x25\xa6\x75\x8b\xd3\x82\x80\xb8\x61\xfd\xb3\x20\x9f\x65\x41\x51\x39\x86\x12\x98\x46\x3b\xc3\xea\x3d\x92\xb7\xb3\x15\x1d\xf2\x83\xd4\x9f\x33\xc4\x75\xb0\x79\xb8\x58\xbd\x87\xd3\x20\x7f\x97\x45\xe3\x4a\x9c\x95\xc0\xdc\x58\x09\xbc\x7a\x2f\x45\xd8\xa1\xbc\xaa\x97\x0a\xe6\x86\x6d\x8c\xd0\x15\x5a\x45\x33\xe5\x60\x5f\x89\x86\x64\x4c\x86\x7f\x70\x5b\x9b\xaa\xbe\xd9\xa0\xa8\x45\xcc\x42\x8c\x6b\x97\xfe\x58\xdb\x31\xa0\x2b\xcd\x51\x64\xbc\x58\x08\xc6\x45\x9a\x49\xf1\x47\x5a\x3e\x80\x19\x71\x8f\x30\x58\xc3\x96\x58\x40\xfb\x1a\x9b\x48\x4b\x07\xe7\x29\x6a\x0f\x3d\xb7\xe3\x50\x07\x19\x05\x4b\x25\x78\x3c\x76\x60\x99\xd3\xa3\x38\x45\xdc\x7a\x42\xd7\xc3\x50\x21\xc3\xcd\x5b\x37\x76\x3d\x69\x8e\xd1\xa7\xc5\xac\xd3\xed\xb9\x24\xfb\x3a\x9d\x22\xd9\xb8\x59\x97\x7c\x03\xd5\x86\x18\xd5\x2e\xf4\x25\xf6\x3b\xa2\x40\x7f\x1a\xa7\xa3\x20\xee\x33\xa4\xf6\x03\x37\x59\xc4\x3c\xf3\x35\x19\x8d\x83\xc5\xdb\x9b\x36\xcb\x0a\x3b\x8d\xf2\xc4\xaa\x26\x91\x55\x8a\x6e\xd0\x7e\xfa\x80\x63\x4a\xc9\x02\x1d\xff\xf4\xd4\x79\xa3\x7a\x59\xcc\xb4\x3d\x9f\x65\x98\xd3\x1a\x6e\x3d\xed\xb5\x1c\x03\x21\x61\xa0\xae\x2d\x73\x5a\xc3\xed\x6f\x20\x81\xcf\x69\x6b\xb8\xfd\x1d\xff\x54\xb4\xd0\x1a\xee\xf0\x22\xd1\x28\x48\x5a\xc3\x9d\x9d\x9e\x69\x3e\xf8\xff\xb1\xf7\xee\xfb\x6d\xdb\xcc\xa2\xe8\xdf\xc9\x53\xa0\xdd\xe7\x6b\xa4\x98\xb6\x75\xb7\xa3\xc4\x5d\xcb\x91\xed\xd8\x2b\x71\xec\x6d\x3b\x6d\xbf\xed\x9f\xeb\x1f\x25\x42\x16\x1b\x89\xd4\x22\x29\x5f\xd2\x78\xbf\xcf\x79\x8e\xf3\x62\xe7\x87\xc1\x85\xb8\x52\x94\x2f\x69\xda\x65\x7f\x6b\xa5\x22\x09\x0c\x06\xc0\x60\x30\x18\xcc\x05\x1e\xd9\x20\xfd\xd8\x6d\xb5\xe0\x99\x9b\x11\xfd\xd8\x6d\x51\xf0\x8c\xb3\xff\xd8\x6d\x51\xb4\xf8\x65\xef\x8f\xdd\x16\x69\x90\x1b\x01\xfd\xd8\x6d\x35\x6f\xcf\xbc\xe6\xab\x27\x7b\xc4\x27\x7b\xc4\x7f\xb6\x3d\xa2\xcb\x18\xf1\xde\x36\xf3\xe5\xcd\x04\x4b\xd8\x00\x42\xb9\x8f\x38\x7b\x4c\x13\x7b\x78\x3b\xdf\x64\x25\x37\xae\xbf\x8b\xcd\x4a\x09\x93\xfa\xd5\xd5\xd5\x3c\x26\x8d\x2d\xce\x0d\x4b\xd8\x48\x58\x3c\x80\xc3\xd9\x08\xf9\xd3\x50\xc2\xfd\x91\x0e\x24\x66\x32\x7a\x4d\xe0\x51\x33\xd6\xdf\x55\xb8\xc2\x38\xd1\x75\xe3\x46\x2b\xae\x42\x0b\x08\x7c\xb2\xf8\x65\x6c\x6a\x1f\x71\x66\xd9\xd4\xd4\xcd\x4b\xde\x5d\x6e\xcf\xbc\x56\xed\x69\xb7\x78\xda\x2d\xfe\xd9\xbb\xc5\x77\x6a\xbd\xfe\x70\x86\xe6\x25\xed\xe0\x73\x53\xce\x43\x9c\xa4\x71\xe4\x8f\x9f\xec\x39\x1f\xdb\x9e\xf3\xb6\x9c\x85\x5f\x84\xaf\x72\xb3\xc1\x22\xfd\x70\x5e\xd0\x54\x11\x4f\xd9\xac\x9e\x5b\x0b\xdd\xe3\x86\x32\x9c\x90\x8d\xe0\xc8\xbf\x7a\x8f\xe7\xdd\x5c\xc8\x45\x5f\x78\xcf\x9f\x3d\xd3\x71\x33\x0a\x14\x78\xa6\x95\xbf\x89\x33\xdb\x11\x1f\x24\xc3\xbd\x67\xcf\x4a\xde\x4d\x97\xbe\x82\xc3\x83\x23\x3c\x88\x2f\x69\x70\xa8\xa2\x3b\x2b\x5e\xce\x8a\xab\xfa\xb5\x60\x40\x66\xd1\x38\x1e\x7c\x2e\x47\x29\x4a\xd9\x02\x62\x71\x95\x2b\x63\xf0\x58\x6e\xdc\x9c\xa3\xf7\xc0\x37\xdf\xf9\xdc\xcf\xbd\xfe\x5e\xe4\x9a\xd3\x76\x6d\xec\xec\x52\xf9\xf9\x29\x37\x3b\xc5\x73\xb3\xc8\x5d\xa6\x3e\x37\x1a\xf2\x36\xc9\x9a\x35\x2c\x35\x22\x2d\xde\xfc\xad\x42\x41\xd2\xed\x09\xa7\x6a\xd7\x6d\x87\xf3\x52\x44\x02\x27\xcb\xbb\x8f\x77\x3e\xd8\x9c\xa3\x16\xce\xa7\x43\x2e\xec\x10\xcb\x4d\xb9\x9c\x6f\xb7\xb9\x70\x6e\x51\x11\x69\x5a\x21\x5d\x4e\xaf\x3f\xc9\xe9\x4f\x72\xfa\x3f\x5b\x4e\x67\x42\x7a\x3a\x72\x68\x75\xe6\x88\xdf\x38\xc1\xb3\x09\x01\xfd\xf3\x1c\x25\xd0\x20\x4e\xf0\x4a\x18\xab\x72\xfa\x5a\xe9\xc0\x09\x25\x1d\x2d\xe7\xf9\x6b\x42\xa1\xe3\xd1\xe8\xd1\xb5\x43\xdf\x8f\x3c\x4e\xb8\xe3\xf1\x48\xb9\xdd\xc0\x57\x2c\xe8\xf4\xce\xb7\xb8\xd0\x49\x47\xf3\x2f\x74\xd2\x11\x5c\xe8\x50\xc1\x65\x91\x7b\x9b\x22\x39\xdf\xbd\x39\x19\xe2\x81\xb4\x35\x5d\x5a\x6f\xea\x98\x88\x90\x8e\x46\xe7\xf6\x02\xaa\x51\x08\xb2\xe8\xb2\x8a\x1a\x0d\xa3\x61\xec\x6e\xd1\xf2\xf5\x7e\xcd\xa5\x38\xdb\xf7\xaf\x19\x11\x1c\x87\x5f\xf4\xcb\x61\xa9\xed\x79\x45\x55\xab\x9f\xbb\x20\x12\x46\x87\xf1\xaf\xc5\x08\xd8\x8a\xdc\xaf\xe1\x89\x9f\x7c\x3e\x49\x66\x69\x86\x83\x43\x6c\x5c\x06\x4b\xcd\x17\x17\xbc\x1f\x12\x11\x26\x32\xdd\xa1\x1f\x16\xb4\xef\x2c\x73\x3f\x0a\xf0\x83\xe0\x30\x09\x2f\xfd\x0c\xd3\x23\xa1\xa3\xf5\xa2\x62\xf7\xeb\x3b\x4d\xfa\x35\xb7\xfb\x45\xc5\xee\x87\xc0\xc8\x4f\xe7\xb6\xee\x2c\x73\xbf\xa6\x2f\x70\x46\x37\xf4\xc2\xb1\x2f\x28\x75\xff\xe6\x4b\xcc\x7d\x51\xb1\x7b\xd3\xfd\xf1\xcd\xa4\xb0\x71\x57\x91\x7b\x53\xfd\xbc\x86\x5d\x45\xee\x3b\xe4\x44\x8e\xcb\x30\x05\xbd\x93\xc4\x93\x43\x3f\x4d\xaf\xe2\x24\x28\x1a\xff\x92\x75\xee\xbd\x0e\xe6\x8d\x89\xab\xc8\xbd\xc9\x70\x5e\xc3\xae\x22\x0f\xc1\x7a\xe6\xb5\x5d\x50\xca\xde\xbc\x78\x58\x5d\x45\xe9\xac\x0f\x37\x6f\x34\xa5\xf1\x2c\xca\x9f\x27\x61\x9a\x86\xd1\xc5\xf3\xd2\xd8\x4e\xe3\x54\xbf\xba\x92\xb0\xb4\x7c\xb5\xe8\x29\x50\xb1\xde\x11\xcd\xbf\xe5\x3a\x1e\x8d\xa4\x04\x62\x9a\xed\x85\x72\x8a\xd6\x2c\x23\x5a\x8d\xa7\x33\xf4\xd3\x19\xfa\x9f\x7d\x86\xce\xef\xba\xfa\x5f\xbe\x68\x77\x5d\x9b\x63\x7c\x8d\xde\xe2\x04\x5f\xa4\x5f\xfc\xf4\x4b\x88\xde\xf8\x63\x7c\xfd\x9f\x49\x36\x4c\x57\x46\x33\xf5\x38\xdc\x61\xd1\x4c\x8f\xf0\x10\x27\x38\x1a\xe0\x2e\x22\xed\xa7\xdd\xd5\xd5\x8b\x30\x1b\xcd\xfa\x2b\x83\x78\xb2\xca\x4f\xdd\xab\x17\xf1\xb2\xf8\xdd\x1f\xc7\xfd\xd5\xf4\xca\x4f\x26\xab\x61\x94\xe1\x24\xf2\xc7\xab\xa4\x4b\xf8\x3a\xe3\xff\x5d\xb9\x88\xff\xd7\x87\x66\xf3\x91\xaf\xc6\xf2\xfb\xae\x63\x82\xcd\x3f\xfc\x70\x0d\x3f\xfe\x16\x97\x5d\xd4\xf2\x15\x67\x57\x71\xf2\xf9\x08\x43\xa8\xda\x22\x45\xb9\x5e\xdc\xd4\x96\xf7\xbf\x7c\x39\x2f\x28\x75\x1f\xdf\xbc\x9b\x68\xb0\x1d\xf9\xfd\x31\x9e\x87\xa5\x54\xd2\x8e\xa0\xbd\xc0\x7d\x70\xbb\xf2\xa7\x25\x71\xcb\x4b\x3a\x70\xb3\x16\xb8\x07\x6e\x41\x7c\x15\xb1\x28\xc4\x45\x88\xf1\x62\x76\xac\x2c\x5f\xcb\xbb\x9b\x3a\x10\x9b\x4d\x4b\xa0\x45\x0b\xd9\x91\x32\xbe\xdd\x1b\xa5\x04\x67\x49\x88\x2f\xe7\x45\x80\xe0\xc5\xec\x68\x59\xbe\xde\x87\xb4\x32\xb2\xdb\xcd\x21\x2a\x52\xc6\x41\x4e\xda\xa7\x7b\x0f\xd1\x05\x2e\xe1\xce\x6c\xc7\x45\xfd\x70\x8f\x31\xa1\xd9\x1b\xe6\xc4\x48\xb5\xe3\xa0\x7e\xb8\xf7\x68\xb0\x84\x2d\xc5\xc8\xd0\x42\x76\x7c\x8c\x6f\x1c\xa5\x56\x29\x94\x0a\x6e\x75\x0d\x15\xa7\xce\x96\xa5\xdb\xbf\x9c\x1f\x4a\x2f\x73\x46\x94\xbf\xe4\x7c\x40\xba\x71\x9c\xaa\xcf\x9c\xfa\x25\x40\x84\x04\xf3\xc7\x0b\x2c\x5d\x4c\x4e\x67\xd2\x83\x24\x8b\x3f\xea\x35\xe3\x28\xbc\x74\xfa\xc6\x90\x39\x81\xef\xce\x33\x64\x31\x6c\x8b\x52\x56\x81\x0d\xdf\x1d\xc7\x2b\xcb\xf9\x8a\x08\x4b\xb6\x60\xb7\xd6\x7b\xc9\xe6\xd3\x99\xea\xe9\x4c\xf5\xcf\x3e\x53\xb1\x03\x15\xbf\x20\xfa\xb6\x51\xda\xef\x62\x58\xcd\xbd\xa3\xfc\x69\xc8\x85\x71\x9a\xe2\x2f\x1b\x15\x59\xa0\xd1\xeb\xb2\xc2\xa8\x94\xbc\x74\x76\x33\x25\xf2\x01\x8b\x40\xf9\xfa\xb9\xc4\xc0\xc3\x6c\x30\xaa\x90\xef\x7a\x6e\x91\x81\x9f\x62\xf4\x82\x50\x7c\x9a\xbd\xe8\x2a\x9f\x60\xb2\x92\x8b\x74\x25\x1d\x85\xc3\xac\xa2\x25\x14\x41\x46\x72\xc0\x9a\x59\x80\xb1\x64\x70\x5f\x8b\xf0\x15\x8d\x40\x45\x2f\x64\x5f\x5b\xd0\x98\xe2\x28\x08\xa3\x8b\x47\xc7\xe3\x90\xb6\x23\xdb\x10\xd9\x90\x62\xa1\x03\x4d\x6c\x34\x70\x46\x65\x9a\x60\xe5\x56\x92\x0e\x44\xa9\xf9\x96\x84\x0c\x9a\x2e\x23\x28\xa4\x60\x91\x9d\x2c\x52\x75\x14\x46\x69\xe6\x8f\xc7\xa5\x5a\xd6\x4a\xdb\x7d\xdd\xdd\x85\x0a\xf0\xb8\xc0\xd9\x87\xf8\xa2\x44\x40\x31\x52\xca\xe9\x63\x4f\x5b\xd4\x8a\x14\xb4\x3a\x8d\xe7\xc6\xe1\x20\x45\xe6\xb4\xd7\x1b\xf9\xd1\x05\x2e\xd1\xa4\x4d\xf8\xa0\x20\x64\x93\x2c\x65\xf4\x14\x41\x88\x74\x4c\x6a\x24\x1e\x8f\x65\x79\x60\x61\x7e\x93\x8e\x46\x2b\xc0\x1a\x0d\x76\x93\x8e\x4c\x76\xe3\x16\x9f\xe6\xdc\xd2\x18\x64\x80\x8c\x5b\x1a\xc5\x92\xe0\x41\xd5\xf4\x6e\x62\x44\x36\x4d\xfd\xe3\x21\x62\x92\x2e\x32\xae\x29\x68\xb3\x0c\x07\xbd\xe8\xfd\x9a\xd7\xc8\xf8\x01\xda\x96\x49\xcf\x90\x44\x29\x0e\x38\x1b\x75\xc9\x3f\x14\x58\x3a\x1a\x75\xc9\x3f\x54\x78\xb5\x25\x64\x68\xb5\x9e\x44\xd2\x27\x91\xf4\x1f\x2e\x92\xe6\x7a\x7e\xee\x63\xfd\x40\xd9\xa2\xa9\x7f\xf8\x11\xbe\x20\xf3\xec\x27\x9b\xfd\xd0\x91\x97\x20\x5d\x7d\xa7\x16\x85\x44\xff\x5c\x3d\x1f\x0e\xfc\xa9\x0c\xc4\x05\x63\xaf\xb7\x79\x68\x42\x90\x30\x61\x8e\xe8\xcc\x7a\x19\x6d\xa0\x17\xb5\xeb\x41\x27\x78\x15\x34\x06\x41\xab\xf5\xca\x5f\x6b\xb7\x06\xad\x57\xad\x46\xa7\x85\xeb\xeb\xb5\x57\x83\x76\x0d\x37\x5b\x41\xa7\xd5\xee\x34\xfa\x2f\x72\x5c\x6c\x60\xfc\xba\x5f\xaf\xd7\xfb\x83\xda\x5a\x6b\xf0\x6a\x30\xf4\xd7\xd6\xeb\xc3\xda\xa0\xb9\x8e\x3b\xcd\x7e\xd0\xae\x0f\x5e\xd5\xfb\xeb\xfe\xb0\x56\x7b\xe1\xe6\x4d\x14\xc7\xae\x24\xe9\xfa\xfd\xb0\x6b\x19\xc4\x9c\x13\x32\x37\xf8\xae\xb5\x7f\x74\xa7\xa7\x85\x09\xda\x06\x64\x7d\x5c\x2d\x70\xcd\xee\x52\xa8\x0a\xc7\x2c\x9e\xc5\x1f\xbb\x75\xef\xc7\x39\xf3\xf4\x63\xb7\x41\x98\x6d\xfb\x89\xd9\x3e\x31\xdb\x7f\x36\xb3\xcd\x79\x2d\x57\x7e\x69\xcc\xb6\xc8\x30\x79\x98\xc4\x5f\xf0\xc4\x8f\x56\x02\xfc\xf3\xb7\x4a\xe7\xaf\x5f\x90\xde\x27\x5d\x3f\x55\x24\x4a\xdf\xe9\x0b\x25\x23\x81\x56\x22\xd5\x4b\xdc\x25\xf7\xfe\xe2\x19\xfe\x0b\xb2\xf5\xf3\xb1\x78\xfc\x7c\xfd\x65\x73\x7c\xdf\x3b\xc5\xb7\xa5\x4b\x05\x49\xbe\x6d\xa1\x5b\xb4\x11\xfe\x0f\xdb\x5b\x5a\x17\xd2\x6d\x7f\x27\xe9\xb5\x9d\xfd\x7e\xa0\x04\xdb\x3f\x6c\x50\xc2\xd1\x5e\x91\x0d\x65\x18\x46\x38\xb8\x4f\x06\x6e\x9e\x39\x36\x8b\x11\x4b\x5b\x9d\xa7\xb3\x86\x74\xdc\xe2\xb0\x2d\x12\xb2\xae\xa0\x7d\xb2\xb1\x85\x38\x65\x94\x04\xc3\xa4\x8d\xa5\x96\x03\x7b\x6e\x36\x7d\x3e\xae\x3b\x6c\xa4\xbe\x7e\x9c\x8d\xc7\xb7\x92\xb1\x7b\x38\x44\xf8\x3a\x4c\xa1\xb8\x75\xc8\xb5\x16\x0b\x73\xca\xf3\x0c\x36\xbc\x35\x9a\xc3\x46\xce\xbb\xbf\x8c\xea\x67\x55\x47\x9a\xf9\x95\x69\x3c\xad\x54\x21\x6f\x35\xbb\xf7\x22\xfc\x1f\xd6\x13\x8c\xd6\x0f\xb2\x70\xa3\x0e\x37\xb5\x6f\xc8\x31\xcb\x62\x3b\x29\xaa\x76\x10\x2e\x62\x64\xaf\x78\x2f\x9c\xd4\x58\x3e\x7d\x37\xd4\x11\xa2\x24\xe2\x09\x4a\xf2\x74\xde\xef\x70\x56\x91\x4e\xe7\x38\x9a\x4d\x70\xe2\xf7\xc7\xb8\x8b\xb2\x64\x86\x4d\xd5\x9f\x3f\xc1\x69\x61\xaa\x6e\x29\x9f\x37\x14\x06\xe5\x2d\x92\x72\x78\xa7\x73\x92\x78\xa7\x5a\x16\xef\xd4\x91\xc6\x5b\x2f\xf2\x5a\xd1\x88\x89\xe6\xeb\x3c\x7b\x3f\xed\x84\x3d\xbb\x4b\xdc\xff\xc3\x83\xf2\x1e\x1d\x32\xd6\x17\x02\xdf\x4f\x6f\xa2\xc1\x3b\xd8\x6f\x88\xc8\x0b\x5d\xa8\x9e\x29\x39\xd1\x37\x59\x91\x8a\xe4\xa6\xa1\x55\x53\x26\x09\x40\xa8\x2c\x03\x6e\x97\xd1\x12\xe0\xb0\x32\x18\xf9\xc9\x66\x56\xa9\x55\x57\xb2\xf8\xd3\x74\x8a\x93\x9e\x9f\xe2\x4a\x95\x7f\x86\xac\xca\x95\x7a\xd5\xb9\xf1\xf0\x99\x75\x67\x1e\xcd\x37\xee\x3c\x8d\x2a\x8f\x88\xc6\x6b\x5c\x90\x0e\x99\x2b\x46\x08\x28\x4a\x82\x6d\xf1\xd6\x96\x62\x5b\x55\xf4\xf0\xcc\xf6\xa2\x0a\xdd\xee\x25\x8d\x4d\x9e\x8a\xbb\xa8\x83\x7c\xd4\x17\xeb\x65\x7e\xd7\xef\x0e\x02\x86\x72\x33\x27\x6b\x87\x68\xda\xf3\x05\x7b\x55\x32\x3d\xba\x9a\x12\xdd\x3e\xd8\x66\x52\xf4\x5b\x35\x7b\xf9\x05\xce\x16\x4c\x5e\x7e\x81\x5d\xdb\xc9\xf7\x9d\xbb\xdc\x42\x1c\xe5\xb3\x97\xeb\x66\x73\x5d\x59\x1e\x35\x95\xe4\xa7\x67\xaa\x7a\x9d\x4c\x13\xab\xa2\x6d\x56\x25\x13\xa1\xcb\x53\xf6\x58\xe9\xd0\xf9\x00\x49\x07\x73\x2d\x66\x0f\x39\x62\x77\x9e\x8e\xd8\x4f\x47\xec\x7f\xf6\x11\x5b\xd2\x67\x32\x0e\x31\x61\x2c\x5d\x3d\x69\xff\x17\x1e\x0e\x13\x7c\x83\x7e\x0d\xc7\x83\xcf\x18\xbd\xf9\x03\x0f\x87\xae\x68\x3d\x0b\x85\xf6\xd9\xf7\x13\x72\x84\x3f\xf0\xa3\x01\xf6\xa1\xac\x2d\xa8\xcf\x1d\xe2\x00\xb1\x2a\xef\xfc\x4b\xf4\x6b\x1c\x07\xe8\xcd\x85\xf3\x90\xdf\xca\x0f\xf9\xff\xc5\xb8\xa9\xe2\x3c\xcc\x58\x6c\x51\x4a\x5b\x4b\xa0\x3a\x3d\x0b\xad\x2d\x05\x2d\x4e\x92\x58\x0b\x1e\xb4\x4a\xdf\x51\x1b\x04\xba\xed\xec\x65\x2f\x52\xb2\x31\x4e\xe3\x28\x0d\xfb\x63\x4a\x60\x53\x1f\x9c\x48\xd0\x84\xdd\xf9\x90\xbd\x68\x9a\xc4\x97\x61\x80\x93\x54\xd4\xf2\xc7\x69\x6c\x56\x8d\xc7\x63\x52\x95\x50\x1b\xb7\x1e\x47\x51\x1c\xd0\xaf\x61\x34\x88\x27\x32\x64\x02\x8c\xe5\x14\xa0\x57\xae\x59\x38\xc1\x64\xb1\x85\x29\xaa\xa3\x14\x0f\xe2\x28\x80\xdd\x31\x8c\x2e\xc6\x38\x8b\x23\x18\x4e\xd2\xbd\x82\x83\x3e\x47\x55\x39\xee\xf3\x97\x68\x43\x74\x45\xd2\x33\x90\xb6\x41\x03\x7c\x2b\xbd\xe4\xb8\xc8\x5a\x07\xe7\xe1\x8f\x48\x28\xa3\x24\x8e\xe2\x59\x3a\xbe\x81\x30\x18\x8e\x7d\x98\x7c\xb2\x9c\x47\x50\xe0\x67\xbe\xf3\x84\xac\xf6\x56\x51\x79\x44\x81\xd2\x79\x02\x46\x3e\xa9\xfd\xa0\xf4\x5e\x49\x6e\x18\x47\x69\x4c\xb6\x2e\x42\x14\x15\x4a\x1a\x2b\x7b\xd1\xa5\x3f\x0e\x83\x43\x56\xbe\x22\xcb\x3c\xdc\x0b\x1b\x06\x43\x92\xf0\xd5\x3d\x9e\x91\xf9\x4a\x16\x1f\xd2\x77\x80\xd2\x0a\xed\xbd\x07\xdd\x64\xc6\x16\xd2\xf9\x85\x9d\xca\x37\xd4\xb9\xa2\xc2\x2c\x03\xcd\xaf\xca\xa1\x53\xbc\x91\x30\xfd\x85\xa0\x7b\x44\xa9\x10\x0b\x41\x4d\xea\x66\x36\x4a\xe2\x2b\xa4\x76\x4f\x2f\xaf\x74\x87\x75\x93\x7e\x5a\x29\x75\xf2\xf7\x17\x9a\x7d\x90\x66\x0b\x49\x40\x3f\x97\x0a\xe9\x67\x3e\x31\x00\x70\x83\x22\xa4\xe0\xb9\xa5\x68\x83\xa7\xce\x94\x64\xe3\x22\xea\x78\x18\x42\x30\xe7\x9e\xca\xfd\x0c\x64\x09\x79\x9e\x74\x0a\x27\x89\x2e\xe2\x5b\x7a\x53\xd5\xcd\x6d\xc8\x9f\x02\x67\x11\x1a\x9b\x3f\x64\x46\x6d\xb9\x7d\x43\xc8\x65\xd9\x5e\x15\x12\xd4\x83\x73\xba\x8f\x0d\x36\x6a\x2c\x3a\x19\x90\x02\x6f\xc9\x77\x8b\x92\x89\xd6\x7b\x08\xc2\x84\x16\xbe\x33\xc2\x04\x9c\x64\xea\xe4\x4c\xe6\x6e\xa4\x98\x3e\x00\x2d\xaa\x34\xc8\xf5\x6c\x30\x1b\x15\xde\xca\xbd\x48\x2f\x9d\x47\x7b\x4a\x87\x04\xd1\xa1\x39\xdb\x1f\xce\xc4\xbe\x4a\xa4\x4d\x7e\x26\x64\x22\x9f\x41\x71\x19\x9f\x2a\xbb\x6a\xae\x90\x96\x44\x5d\x75\xd7\x77\x6e\xf7\xf3\x76\xee\x8c\x1c\xa9\x98\xe0\xa2\x23\x4a\xbe\x1d\x8a\x4f\x73\x39\x36\x8d\xfd\x7f\x0b\xd0\xf6\x82\xb9\x4b\xc6\xf2\x55\x98\x25\x71\x4c\xb2\x38\x88\xd1\x60\x8c\xfd\x68\x36\x45\x11\xc0\x27\x03\x2c\x8e\xed\x45\x43\x25\x61\x6f\x59\x79\x14\x49\x39\x20\x8a\x68\x5c\x1d\x4b\x22\x1c\x9d\xd2\xd2\x67\x44\x48\x22\xd5\xbb\x88\x02\x09\x83\xae\x01\xa8\x6b\x03\xd9\xcd\x7f\xde\xf2\x9c\xd8\xab\xab\xfa\xe8\x2b\x0c\x80\x09\x60\xea\x6e\xce\x10\xaa\x88\x15\x3e\x67\x72\xe3\xa9\x10\x4a\x89\x08\xca\xcc\x68\xe1\x74\x73\x11\x92\x23\x5d\xa8\xeb\x8e\x49\x1d\xcb\x9c\x1b\x73\x5b\x38\xf2\x02\x84\x4a\xa4\x50\x97\x77\x88\x5a\x96\x59\x06\xf9\xb5\x34\x3c\x39\xfe\x6c\x74\x2a\x4c\xa3\xfa\x19\xdf\xa4\x95\xbc\x6e\x95\x6b\x79\x21\x59\x3c\xfa\xe9\x27\xe4\x1a\x43\x42\x4c\xc9\x09\x7d\x5f\x51\x0a\xbd\x56\xc7\x59\x17\x80\x0b\xc6\x3b\xdf\x7d\x12\x4c\x78\x01\x91\xff\xf9\xb0\x4f\xf0\x60\xe4\x47\x61\x3a\xe1\xc7\xd0\x62\xe6\x00\x00\x8a\x87\x97\xb6\x21\x0f\xec\x67\x8c\xa7\x22\x7f\x00\xef\xec\xea\xcb\x3f\xd2\x51\x18\x91\x86\xae\x07\xf1\x64\x3a\xc6\xd7\x61\x76\xd3\x6d\xc3\x91\x8c\x14\x20\x04\x51\x21\x9b\xc3\x67\x7c\x43\x35\x05\x62\x34\xa5\xf1\x5a\x5d\x45\x09\x9e\xc4\x97\x18\xf9\xe3\x31\xf4\x2a\xf5\x10\xbe\x1e\xe0\x69\x06\x62\x3f\x7b\x25\x97\xcf\x46\xf8\x06\x45\x98\x8e\x48\x1f\xb3\xfa\x01\xe9\xf1\xcc\x1f\x8f\x6f\x50\xff\x06\x86\x8c\x0c\x0f\x4b\x05\x00\x34\xf3\x2b\xd9\x90\xc2\xe8\xa2\x52\x95\xf6\x81\xca\x0f\x4a\xef\xd0\xd7\xaf\x04\xdf\x95\x30\x0a\xf0\xf5\xc1\xb0\x02\x6e\x8a\x84\xd8\xce\x5f\x54\x61\xf2\x97\xeb\xfa\x06\x21\x51\xd8\x67\x7c\x73\xb6\x22\x56\xa2\x6e\x0e\x6d\x52\x24\x29\x6f\x98\x26\xff\x8d\xc9\x13\x4e\x99\x64\xde\x07\xd4\x36\x17\xc5\x51\x19\x9e\x40\x4d\x6a\x8b\x68\x92\x59\x0c\x9b\x2a\x50\x07\x15\xa2\x0e\x01\x67\xe9\x4c\x8a\x33\xa5\xf7\x04\xb0\xa4\x8a\xf4\xd0\x60\x65\xfb\x64\xf7\xfc\xf0\xe0\xc3\x87\xbd\x8f\xef\xce\x4f\xf6\xf6\xb7\x0f\x3e\x9d\xc8\xc7\xa3\x32\x33\x60\x0a\x55\x8a\xc4\xf4\x28\x47\x47\x53\x26\x23\x78\x6d\xf9\x99\x8f\x36\xd0\xe9\xd9\x6b\xf5\xfd\x1e\xb8\x1b\xf3\xd7\xe5\x96\xaa\x00\xb8\x32\x9d\xa5\xa3\x8a\x4e\xf7\x4c\xc4\x53\x4a\xef\x05\x29\x2d\xfc\x19\xdf\x54\x8d\x31\xc8\x01\x2e\x30\x78\xa5\xc4\x4d\x01\x99\x35\xca\x97\xd4\xc4\x9f\x2a\x4c\x32\x04\xb2\x05\x86\x02\x24\x46\x48\x53\x1d\xa6\x7d\x7f\x2a\xa9\x2e\x24\xbd\xb6\xea\x29\x4e\x05\x57\xe0\x1a\xd5\x3f\xf5\x31\xd8\xf7\xa7\xa7\x50\x2d\x84\x2d\x9e\x8f\xcc\x29\x14\x3f\x93\x3c\xd2\x45\xe3\x8a\xdf\x3c\x5a\x58\x66\x8e\x55\xa9\x59\x09\x6f\x72\x72\xb0\x75\xd0\xe5\x44\x86\xc6\xf1\xc5\x7f\xe8\x52\x75\xec\x90\xab\xef\x2b\x49\x97\x50\x16\xa4\xd6\xa3\x23\xfb\xb6\x32\xf1\xa7\x15\x97\xb1\x02\xff\x03\xfb\xc5\x20\x1f\x65\x32\xf6\xec\xa8\x17\x06\xb2\xe3\x8d\xa0\x88\xcf\x18\xa5\xb3\x04\xf4\xc4\x9c\x59\x85\x29\x4a\xb3\x90\xd0\x03\xe5\xe4\x38\x40\xfe\x10\x1c\x84\x92\x24\xbc\xf4\xc7\xda\x5e\xab\xc0\x24\x03\x02\x6e\xff\x74\x69\x84\xc1\x99\x8e\x62\xde\xa5\x95\x41\x6e\x0f\xa0\xd6\x11\x5f\x9c\x0e\x33\x5c\x77\x22\x7f\xba\x45\x78\xcc\xf4\xcc\x96\x1a\x43\x7f\x9c\x62\xf9\x96\x8d\xb9\x3d\xcd\x1d\x53\x91\xce\x9f\xb5\x89\xee\x00\x83\xcc\x0b\xcc\xb8\xb4\x68\x1d\x87\xff\xd7\xc6\x78\xfe\x00\x35\x4b\x8c\x63\x79\xc5\x00\x52\x28\x4c\xea\x25\x54\x54\x47\x49\x5b\xec\xee\x61\x52\x71\x71\xeb\x19\x90\x7c\xc9\xe9\xca\xb8\x74\xa4\x07\xd5\x50\x6f\xbc\xb4\xd4\x4b\x66\xea\x0a\xa6\x90\xfe\xb1\xdb\x80\xd0\x3e\x4c\x19\xfe\x63\xb7\x09\x6e\xa8\x6b\x65\xee\xc8\x58\xcc\x4d\x9c\x65\x61\x74\x61\xf7\xec\x05\xc6\x14\x48\x99\x6b\xd1\x86\xf0\x59\x7b\x6d\x94\xc8\x23\x3d\x0b\xfb\x20\x57\xd0\x22\xd6\x28\xeb\x37\x41\x79\xfd\xe9\x5a\xef\xe9\x5a\xef\x1f\x7e\xad\xc7\x22\xfa\xb2\x53\xcb\x5d\xa2\xfa\xce\x33\x87\x75\xe4\xbe\xd0\x52\x5f\x2c\x62\x38\xcb\x97\x74\x9d\x1d\x0e\x36\x83\x20\x85\xa1\x13\xbb\x9b\x1f\x81\x5a\x2a\x45\x33\x2a\x7e\x31\xa7\x37\x8f\x08\x5f\x61\x06\x91\xf2\x10\x24\x05\xa0\x9b\x2a\xdd\xed\x9f\x3f\x97\xcf\x07\xec\x7c\xf6\x5c\x57\x12\x91\x6d\xf3\x39\xbb\xb6\x92\xca\x49\xbc\x8a\xc6\xe9\xe1\xae\x74\xa4\x5c\x1c\x31\x87\x2b\x85\xa3\x31\xb9\x89\x8c\xbd\x45\xd5\xe8\x12\x8a\xe8\xbe\xcd\x7b\x9a\x5a\x36\x0b\x9b\x3d\x0e\xff\x53\xf7\x2d\x7d\x7b\x72\xe9\x2e\x85\x85\x20\x0f\x44\x04\x28\xff\xf4\x13\xe0\x4e\x15\x53\x61\x74\x01\xdc\xb8\xaa\x40\xe4\xd7\x17\xf3\x52\x9a\x52\x88\xb2\x97\xf2\x5d\x3b\x29\xa4\xa1\xb1\x9f\x42\x33\xc7\x19\x99\xec\x1f\x36\x36\x8c\x81\xe6\x7f\xc6\x8b\xd5\x55\x9a\xb9\x5d\x21\x29\x58\x6a\x59\x32\x23\x32\x5b\x92\x66\x28\x8d\xa9\x9d\xe3\x74\x0a\xac\x1b\xce\xce\x7e\x74\x93\x91\x03\xbf\x87\xfa\x78\x48\x18\x00\x5d\xe2\xfc\x0a\x15\x46\x83\x2a\x19\xb5\xbf\x70\x58\xf9\xc1\x82\xf5\x4f\x3f\x21\xdb\xc8\x57\x8d\xfa\xc8\xbc\x6e\x20\xa8\x5a\xdc\xa3\x9d\x9d\x8d\x29\xdf\x8c\xf0\x75\x86\x7a\x87\x9f\xd0\xe0\x66\x30\xc6\x9e\xe8\x26\x0c\xbb\xd8\x6c\xa0\x27\xd0\x65\x66\xb3\x34\x4d\xe2\x01\xe1\x59\x29\x1d\x1d\xa3\x15\xe9\x18\x2c\x96\x89\x6d\x2e\x2c\x1d\x61\xa4\xa1\x97\xba\xf5\x50\xad\x4c\xff\x2c\xc3\x4a\x49\xc1\x25\x9a\x49\xc6\x60\xcf\x05\x00\xdd\x8c\x4d\xd2\xc5\x56\x4c\x3b\x28\x47\xba\x5f\xdd\x12\xea\xd6\xcb\x85\xf0\xbd\xc0\xcb\xd9\x04\x7b\x2f\xeb\x90\xa8\xce\x00\x38\x0b\x59\x27\xdc\x4e\x72\xcf\x9a\x96\xd3\x99\x6b\xb3\xd8\x64\x5e\x93\xff\x90\xac\x6b\xda\x23\x72\xb4\xa4\x9c\x5a\xa2\x5c\x78\x69\x49\x2a\x27\xd6\xab\x74\xd2\x87\x0f\x7e\x10\x08\xdb\x2e\x29\xef\xa7\xf8\xae\x4f\x8f\x74\x70\x90\x58\x2c\x37\xde\x82\xf7\x92\xad\x38\x15\xe8\xc4\x48\xc8\x96\xbe\x79\xbb\x85\x16\x8b\xe1\x30\x7f\xa5\x6a\xa5\x72\x16\x04\x5a\x05\x0d\xf9\x52\x48\xc8\xb3\xe8\x96\x68\x0d\x02\x13\x2a\xe7\x8a\x34\x07\xd5\x82\xd1\xb6\x4a\xb5\x02\x21\xb7\x01\x1b\x91\xd5\xd5\x6c\x17\x44\xf6\x7d\xca\x51\xfa\x24\xfb\xfe\xd3\x65\xdf\xdc\xa4\x8d\x27\xec\x7d\x28\x1f\xdd\xbd\xbe\x1f\xa9\xd2\x6e\xd8\xf7\x85\xeb\x2d\xbe\xa6\xea\xea\x22\xd7\xdd\xe3\x89\x9f\x64\xdb\xac\x60\xee\x76\xeb\xbc\x1a\x03\xb5\x12\x34\xcb\xfb\xa2\xe9\xbc\xa5\xd7\xe2\x12\xec\x38\x4b\xc2\xe8\xe2\x16\x5c\x5b\x6c\xef\x89\xb4\xdc\xf7\x23\xf9\xd3\x2f\xfe\x78\x86\x6f\xd1\x25\xf9\x0f\xbb\x0e\x21\x90\x87\x38\xc1\x73\x6e\x48\x3d\xd5\xbc\x00\x82\xd4\x30\x9c\x54\xb1\x38\x1b\x79\x80\x11\x91\xd6\x3d\xda\x92\xb9\x85\x81\xda\x8d\x8e\x32\x64\x9b\xee\xfb\x51\x25\x8b\xab\x4c\x55\x04\x3a\x1c\xf2\x99\xab\x7c\x2a\x16\x2b\x22\x52\x0f\xd2\x44\x54\x5e\x84\x54\x7d\x43\x21\x32\x3f\xdd\x17\xa6\xfe\x98\x41\xdc\x0a\x13\x22\x8b\xd9\x1c\x62\x78\x8f\x4e\x62\xe6\xd9\x2b\x77\x07\xaa\x33\xe8\x95\xaa\xd9\x35\xde\x9e\x90\x63\xa0\x1b\x36\x49\x17\x5c\x24\x84\xa7\x34\xce\x46\x72\x4a\xf0\x4a\x15\x1a\x61\xd8\x46\x69\x16\x66\x33\x2a\x70\x99\xe6\x5f\x01\x9e\xc6\x69\x98\xc9\x58\x32\xb8\x02\x3d\x00\x33\x18\x87\x38\xca\x74\x4b\x8c\xd2\x0d\x1b\x26\x16\x3c\xd5\xb8\x39\x82\x8b\x62\x64\x8e\x1f\x57\xc1\x17\x5e\x25\x0b\xd2\x1b\xce\xa2\x00\x6c\x22\x07\x38\xc9\xfc\x50\x4c\xbf\x63\xf9\x88\x89\x5d\x6c\x1d\x3d\xfa\x12\x12\x78\xdd\x61\x2d\xb1\x91\x27\xb3\xa9\x65\xfc\x92\x64\x5b\xe1\xbd\x9e\xc5\xb9\x44\x4b\x40\x77\x69\x03\x12\x6d\x8e\x67\xb8\x4b\xff\xc3\xc5\x5c\x2d\xd9\xbb\x73\x56\xd8\xe4\xe7\x93\x02\x71\xed\xc3\x01\xe2\x9c\x10\x71\x0e\x89\x2a\x93\x59\x9a\xc1\x56\x87\x27\x38\xca\x04\xdd\xf4\x6f\x32\x9c\x36\x1b\x55\x26\x8c\xff\x50\xd5\x26\x92\x95\x7b\xf0\xe9\x23\x93\xa0\x4d\x20\xaf\x4f\x49\x15\xcd\xa2\xf0\xbf\x67\x18\x85\x01\x8e\xb2\x70\x18\xaa\xac\xb8\xd4\x64\xf3\xe1\x29\x31\xc5\xd0\xa4\x9d\x6d\xfa\xb0\xed\x48\x9b\xd0\x6b\x9d\x0a\xf8\x20\x57\xfc\x7e\x58\x5d\xf1\x33\xc2\x59\x57\xf8\x00\x73\xd0\x7f\xde\x97\x0a\x0c\x61\x95\x0f\xa3\x35\x0a\x82\xb9\x21\xfe\xd8\x6d\x12\xd9\x95\x67\xee\xbf\x3d\xf3\xda\xa5\x72\x25\x33\xf5\x6e\xbb\x54\xc2\xb6\xd7\xb2\x16\x3e\x26\x02\xc6\xd0\x1f\x64\x71\x72\xe3\x51\x8d\x32\x19\xd8\x67\x84\x4f\x13\x59\x3f\x1e\x22\xd1\x9b\x8d\x0d\xf4\x23\x8d\xc8\xf4\x23\x94\x79\xb6\xba\x8a\x7a\xf1\x64\x12\x47\xff\x75\xfc\xfc\xd9\x33\xa3\xf3\xf9\x2f\xd6\x00\xc7\xa9\xf2\x23\x19\x86\x04\xff\x58\xf5\x90\xf4\x0a\x47\x83\xe5\xbe\x9f\xe2\x4e\x4b\xfb\x30\x09\xda\x7a\xd1\xcb\xe9\xe7\x60\xa8\xbd\x1c\x84\xd3\x11\x4e\x96\x29\xe4\xea\xeb\xe7\xcf\x6e\x9f\x3f\xc3\xe3\x14\x23\xa9\x33\x54\x63\x4e\xfb\xc2\x87\xe1\x47\xf4\xd3\x4f\xec\xc3\x8a\x3f\x09\x44\xdf\x36\xf7\xb7\x9e\x3f\x7b\x46\x3f\x54\x4e\x39\xce\x1e\x52\x51\x85\x67\x82\x21\xfd\x40\x11\x83\xdf\x32\x3e\x67\x62\x94\x65\xc4\x58\x43\x34\x1c\x06\xaa\xf4\x93\xf8\x2a\xc5\x49\xf5\xf9\xb3\x67\x62\xc4\xe2\x38\x5b\xe9\x25\x37\xd3\x2c\xfe\xaf\x63\x5a\xf5\x16\x8e\x4f\xf2\xfe\x23\xbe\xa3\x3f\x9f\x3f\x7f\x56\x51\xcf\x63\xcf\x10\x55\x89\x1c\x8f\xe2\x24\x1b\xcc\xb2\x94\xbe\x21\xcb\xa6\x87\x36\x10\xaf\xfb\x5a\x7a\x7d\x3e\x0e\xfb\xe4\xd3\xca\x38\xec\x4b\xef\x41\x1b\xd6\x83\x4e\x91\xaf\xa4\xd4\x8a\xf4\x4e\x81\xe0\x8f\x2f\x62\x00\x41\x7e\xbc\x7e\x2e\xb0\xf8\x10\xc7\x9f\x67\x53\x94\xf9\xfd\x31\x96\x30\x39\x7e\x7b\xf0\x1b\x3b\xf4\x89\x77\x7b\x1f\x7f\x39\xb7\xbd\x3f\xfe\xf4\xf6\x7c\x7f\xef\xb7\xf3\x9a\xeb\x43\xdd\xf5\xa1\xe1\xfa\xd0\xb4\xb6\xed\x6a\x47\xfe\x68\xb4\x25\x7f\x34\xda\x93\x3f\xf2\x36\xc5\xd0\xf4\xe2\xc9\x94\x9c\x14\xc7\xe6\x10\xd9\xa6\x54\xab\x15\xc4\xb3\x3e\x11\xfb\x49\xad\xbc\x00\xb0\x58\x19\x0b\x24\x9b\x2a\x84\x10\x4e\x10\x85\xe8\x0d\x6a\xb4\x3b\xaf\x51\xb8\xb4\xa4\x80\x17\x42\x22\x7a\x83\xea\x8d\x75\xe3\x1b\xf9\x0b\x4e\xc3\x33\xb4\x41\x60\xbc\x41\xf5\xd7\xea\x77\x7a\x97\x5a\x50\xab\x42\xab\x55\xd1\xef\xa8\x76\x5d\xaf\xf7\xf5\xfa\xf9\xe3\xed\x73\xa5\xd7\xbf\xfa\xe3\xcf\xe8\xdd\x4e\xa5\xf1\xfb\x7a\x55\xed\xed\x35\x0d\x91\xa8\xbe\x0b\xb5\x97\x0b\x8d\x80\x34\xc8\x69\x3f\xbe\x56\x3f\x82\xa5\x01\x69\xf3\x3a\x44\xbf\xa3\xca\x75\xde\x21\xf6\xbb\x21\xfd\x6e\x4a\xbf\x5b\x55\xad\xb3\x00\xa5\x92\x5e\xa3\x9f\x7f\xfe\x19\xad\x43\xc9\xf4\x1a\xfd\x84\x6a\xd7\xc3\x21\x1d\xa0\x4e\x53\xab\x42\x56\xc7\xe9\x35\x19\xc8\xf4\x5a\xfb\xc4\x17\xcf\x69\x0a\xdf\xaf\x5f\x3f\x77\x76\x6a\x32\x1b\x67\xe1\x74\x1c\x0e\x40\x4d\x60\x76\xef\x9a\x90\x71\x70\x7a\x7d\xf6\xda\xf2\xad\x45\xbf\x35\xac\x1f\xd7\xe9\xc7\xd6\x59\x41\xeb\xe9\xac\x8f\x40\xc0\xf1\xd0\x24\xbc\x46\x83\x78\x3c\x9b\x44\xa9\x42\xfd\x32\x4c\x22\x29\x54\x02\xe8\xd5\x4b\x42\x33\xb5\x3a\x1f\x29\xf6\x58\xab\xd7\x6a\xfa\xd0\x8a\x95\x4c\x07\xab\x92\xc1\xc4\xb4\xaa\xe8\x2b\xf9\x4d\xc7\xdb\x51\xa5\x2e\x57\xa9\x77\xa4\x2a\xf5\x8e\xab\x4e\x43\xae\xb3\x5e\x45\x79\x9d\x86\x31\xeb\x82\x1b\xd0\x3a\x59\xc1\x48\x85\xd1\xa5\x3c\x5a\xe4\xb1\xf4\x88\x5d\xaf\x4b\xe3\xc3\xc8\xb3\xc5\x5e\xd5\xf8\x8b\x86\x32\xa4\x85\x23\xaa\xf0\x47\x46\x63\x65\x86\x55\x61\x9d\x4a\xbd\x39\x63\xab\xb0\x55\xa5\xe2\x9c\x01\x56\x58\x2e\xab\x58\x34\xca\x70\x5b\x00\x8a\x60\x9c\x98\x9c\xf0\x87\x6b\x2b\x13\x64\x0c\x60\x63\x01\x0e\x08\x55\x1a\xe8\x77\x14\x9c\x92\xff\x5d\xaf\xa3\xdf\xd1\x75\xe3\xec\x4c\x5f\x48\x50\x36\x44\xbf\x6f\x40\xc1\xeb\xd0\x28\xa0\x30\x49\xf8\x79\x0b\x87\x5a\xb1\xaf\x1c\x26\x78\x40\x3b\x17\xa0\xa3\x41\x1c\xb1\x0d\x26\xdf\x95\x8e\x7a\x07\x1f\xc9\x1e\x51\xbb\xae\xd5\x3c\x54\xbb\xae\xd5\xe1\xdf\x06\xfc\xdb\x82\x7f\xd7\x3d\xa0\x05\xf2\x6f\x03\xfe\x6d\xc1\xbf\xeb\xf0\x6f\xbd\x4f\xfe\x6d\x76\xf2\xcd\xec\xe5\x4b\x86\xd4\x4b\xb4\xb9\x7d\x4c\x03\xb2\x23\x2a\x0e\x21\x22\x10\x24\x61\x36\x9a\xac\xf0\x32\xab\x39\x2a\xa4\xf4\x06\x13\x1f\x56\xe8\x83\x24\x61\xac\xe0\xeb\x8c\x86\x0f\x10\x5d\x3e\x0f\xe2\x23\x9c\xe2\xac\x8b\x1c\x5b\x24\x1b\x84\xe3\xcf\xe1\x94\x99\xfe\xc6\x43\x14\x1d\xc5\x70\x1c\x1b\xf9\x29\xea\x63\x1c\x81\x7b\x00\xbb\xe0\xf2\xa3\x00\x6c\xf8\x82\x30\x40\x51\x9c\x31\x3b\x4c\x93\x14\x68\x36\x17\x0e\x89\xdb\x8b\x9e\x7f\xc6\x37\x87\x49\x18\x27\x47\xd4\x04\x78\x63\x23\x7f\x6f\x25\x1d\x6e\x17\xa6\xcd\xa9\xd9\x01\x55\x7c\xe3\x7f\xdc\xe2\x70\xc3\xde\x7c\xfe\xd6\xc2\x9f\x3f\xe3\x9b\x5f\xe3\x04\xac\x18\x3f\xe3\x9b\x95\x2b\xf2\xdb\x5e\xec\x38\xfc\x82\x59\xa9\x34\xbc\x78\x4b\x18\x10\x5a\x45\xad\xa2\x65\x24\x1c\x01\x12\x18\x20\x13\x2c\x1f\x39\x8e\x63\xfe\xcc\x1b\x5c\x42\x9d\x52\x2d\x90\xfe\xa7\x83\x11\x26\xc7\x0f\x44\x44\x68\x4b\x1f\xd2\xa3\xf8\x8a\xc0\xae\xf0\x66\x96\xc8\x2e\xfd\xb2\xb0\x0f\x32\x5c\xfb\xb0\xf0\x46\xa5\x71\x96\xde\x9d\xea\x4b\x35\xb7\x11\x25\xe8\x50\xd1\x83\xfe\x7c\xc3\x30\x64\xcf\x16\x29\x04\x31\xb2\x13\xe5\xe9\x20\x59\xcb\x91\x3f\x09\x95\x53\xa8\x73\x46\x47\x16\x66\x9c\xbd\xb1\xb0\x1a\x37\xc3\x42\xd2\x7e\x62\x00\x87\x70\x3a\xfa\x50\xca\x68\xff\xc0\x10\xff\x97\x40\xdc\x89\x39\x9b\x85\xa3\x38\x43\x84\x24\xdd\x85\x32\x79\x0f\x50\xb7\x80\x42\xc8\xc7\xb3\x7e\x19\xc8\x20\x3e\x71\x98\x67\xd2\xde\x06\x1f\xf2\x9d\x8a\xc9\x68\x67\xd2\x2e\x26\x97\x58\x57\x0a\x00\xa6\x0c\x32\x7b\x3d\x07\xdb\xfd\xf0\x1a\xd8\x76\x11\xb6\xbf\x6f\x00\x13\x3f\x65\x83\xbc\x9a\x53\xc7\x57\x54\x63\xa8\x5b\x26\x1b\xe5\x13\x0e\xa4\xc5\xd6\xdd\xcf\xa8\x43\xf8\x99\x36\x61\x68\x63\x03\xb5\xe6\x4d\xda\x77\x37\xb4\xf6\x3e\x3b\x46\xdc\xb5\x66\x0c\x5a\x67\x43\x72\x86\x7e\x27\xb2\x84\xb9\x88\xe6\x72\x73\x59\xa6\x2b\x66\x33\x61\x74\xf9\xde\xc2\x69\x8c\xd7\x6e\x66\x43\x8a\xe6\xfc\x46\x3c\xe5\x2c\x87\xbf\x72\x70\x1d\x99\x61\x31\x3e\xba\x2c\xea\xd8\x88\x17\x8e\x8c\xbc\x99\x7f\x15\x10\x8d\x93\x9d\x3c\x2c\x67\x6a\x59\xc1\xcd\x43\xfc\x0d\x6a\x81\x27\x0b\x7d\x28\xa2\x7d\x75\x2e\x4e\x39\x04\x26\x69\x2e\xd8\x91\x02\x60\xaa\xd0\xad\xae\x21\x42\x8a\xaa\x70\xed\x58\x4a\x67\xe8\x77\xf7\xe2\x74\xfc\xa9\xc2\xb7\x7d\x05\xea\x08\x34\x4f\xd5\xa5\x68\x9f\x03\xa7\x24\xeb\x49\xd3\x83\xa3\x41\x72\x33\xa5\xa6\xb1\xb2\x9c\xb7\xef\xa1\x78\x38\x4c\x71\x66\xcc\x0c\x5d\x23\x41\xdc\x13\xf5\xf2\xc2\x9e\xb9\x57\x7b\xf9\x09\x31\xff\x59\xcf\x7f\x36\xf2\x9f\x4d\x0f\x58\x8c\x7c\xca\x50\x70\x0d\xf0\xa2\xb8\x12\xae\x79\xe5\x4f\x51\x23\x0a\x40\xf6\x6c\x65\x23\x87\x10\x43\xe8\x7b\xff\x94\x82\x21\xf2\x8b\x3e\xa4\xca\x37\xb5\x6c\xb3\xa0\x6c\xd3\x7a\x24\x2a\x33\x84\x2a\xad\x7a\x2a\x81\xaa\x8f\x75\xf5\xb1\xa1\x3e\x36\x3d\xa1\xb0\x30\x36\xef\xd5\x55\xb4\x47\x4e\xbe\xdf\xc5\x18\xd9\x27\x5d\x19\x26\xeb\xac\x7b\xe8\x7e\xe4\x66\x23\x1a\x76\x20\x28\x2d\x59\x5b\x06\xf6\x1d\x66\xc1\x42\xe1\x46\x92\x8a\xea\x04\x53\x8b\x8e\xab\x26\x0d\xd6\x19\xbc\xfe\x5d\x61\xb6\x35\x9b\x06\x28\xad\xeb\xd3\xa1\xd5\x32\xe6\x07\x6a\x35\xd4\x5a\x0d\xbd\x96\x55\xdb\x94\x36\xf5\xe9\xd4\x6a\x35\x6d\x6a\xa8\xf7\xda\xd9\xc1\x7e\xf4\x97\xb7\x40\xdb\x89\xe1\xc8\x72\xc6\x11\xfb\x2f\x1d\xd5\x0d\x54\x7f\xcd\x7e\xbe\xe1\x33\xc4\x5e\x38\xf6\x5d\x98\xe3\x70\x98\x01\xa5\x7b\x0e\x45\x59\xe1\xc4\x71\xd4\x33\x32\x79\x92\xba\xa6\x26\x24\xaf\xdf\x25\x45\x57\x25\xad\x1b\x72\xd7\xef\x92\x52\xab\x92\x36\x74\xa9\xeb\x77\x49\x7f\x95\x36\xa5\xd7\xc6\x36\xbc\xb4\x64\xdb\x00\x00\xb9\xba\x8a\x5c\xdd\x81\x5c\x63\x0e\x72\xcd\x42\xe4\x6a\x77\x44\xae\xa1\x22\xd7\x70\x20\xd7\x9c\x83\x5c\xad\x10\xb9\xfa\x1d\x91\x6b\xaa\xc8\x35\x1d\xc8\xd5\xe6\x20\x57\x2f\x44\xae\x31\x17\x39\x2b\xe9\x7e\x9a\x82\x11\x51\x9a\xf9\x19\x36\x0b\x00\x3b\xc9\x6a\x96\x8e\x01\xcb\xc8\x74\x3d\x1a\x7c\x21\x73\x91\x35\x6c\x5f\xc8\x40\x64\xba\x76\xdc\xaa\x44\xb1\xae\xa7\x39\xbc\x0f\x96\x4f\x85\x9e\x3c\xa4\xb5\xa3\x9f\x5a\x2c\xcb\x47\x3f\xb6\x98\x2b\x48\x39\xb7\xe4\x4b\xa8\x5a\x8e\x12\xc4\xfa\xe1\xd8\xd5\xdd\xd8\x99\xeb\xc7\xc0\xce\x58\x42\x2a\x76\xb5\xbb\x60\xd7\x90\xb0\x6b\xb8\xb1\x33\x17\x90\x81\x9d\xb1\x86\x54\xec\xea\x77\xc1\xae\x29\x61\xd7\x74\x63\x67\xae\x20\x03\x3b\x63\x11\xa9\xd8\x35\xe6\x63\x67\x52\x2b\xe6\x91\xad\xed\x72\x09\xdd\x86\x2d\xeb\x48\x17\x72\x8c\xe5\xa4\x6e\xae\x96\x55\x65\x88\x3e\x4d\x97\xec\xc3\x8e\xc2\x5d\xd4\x68\x77\x56\x9b\x0d\xa6\x81\xae\xda\x54\xc1\x5c\x62\x11\x02\x52\xca\x3c\x87\x99\x6a\xf8\x45\xca\x12\x3e\x21\xc8\xe1\x3d\xf4\x07\x58\xe8\x88\x05\x90\xff\xc4\xd7\xfe\x64\x2a\x4e\xca\xf9\x07\x3e\xa7\x14\x56\x86\xaf\x33\xe9\x76\x7b\x65\x73\xfb\x78\x85\x9d\x23\x2a\x13\x6e\x92\xfe\x19\xdf\x78\x68\x30\xbc\x10\xd2\x7c\x0e\x65\x3a\xf6\x09\x12\xd7\x19\xd2\xa1\x30\x09\xbf\x92\xb7\x63\x03\xc4\x74\xda\x3d\x8b\x12\xfb\x9c\x86\x4d\xdd\xc5\xe3\x29\x4e\x2a\x9b\xdb\xf4\x5a\x9f\xea\xec\x9f\x3f\x63\x36\x2b\x72\x93\xaf\x9f\x3f\x87\x10\xb8\x60\x40\xa2\x58\x15\x74\xdb\x0d\x8f\xdb\x25\x74\xdb\x60\x3b\x22\x59\x26\x74\xdb\x2d\x2f\x37\x49\xe8\xb6\xc1\x87\x71\x12\xb4\x7f\xec\x76\xea\xb7\x67\x5e\xbb\x71\x2f\x6b\x91\x6f\x69\x26\xf2\x68\xc6\x1c\xdf\xd0\x2c\x83\xae\x84\x97\x88\x19\x50\x90\xe6\xd1\x20\x9e\x4c\xe3\x08\x62\xae\x93\x6f\xab\xcf\x9f\x89\x79\x1f\x87\xfd\x15\x56\xf4\xeb\x57\xd9\x00\x40\x78\x7d\x3e\xb0\x71\x87\x9f\xe2\xdc\xaa\xc3\x4f\xb1\xf4\xed\xd7\x38\x09\xc0\x2f\x5d\x14\x10\x6f\x64\x08\xb3\x21\x18\xfc\x01\xad\x6f\xf2\x5b\x9e\x1c\xa6\xf5\xb3\x82\x19\x06\xd7\xaa\x1e\x59\xa8\xd2\xfb\x4f\xd9\x70\x1d\xa0\xe0\x68\xb0\x42\x1e\x34\xac\x3b\x2d\xf1\x95\x3e\x16\x19\xa2\x88\x2f\xdb\x97\xd3\xf7\x5b\x3b\xf9\x65\x13\x7d\xb6\xde\x60\xf5\x53\x6a\x9f\x47\x96\x15\xbf\xc5\xca\xf0\x64\x3a\xf6\x33\x1b\x83\x12\x51\xa6\xff\x8c\x58\x44\x1e\xae\x41\x05\xaf\x02\xc1\xeb\x40\xef\x17\x7e\xc1\x2b\x3c\xc2\x64\x17\xb5\x50\xa5\xde\x58\x47\xfd\x30\x4b\xab\x45\x00\xc3\x4b\x0b\xbc\xbd\x5f\xee\x0a\xee\x7c\xfb\x63\xef\xfc\xb7\x9d\x83\xa3\xfd\xf3\xfd\x83\xad\x6d\xb4\x09\xb1\x0d\x32\x3f\xca\x50\x82\xa7\x09\x4e\x71\x94\x85\xd1\x05\x57\xc4\x10\x32\x9c\xc4\x41\xde\x77\x2b\xcc\xad\xed\x52\x30\x19\x3b\x35\x60\x4a\x97\x82\x9a\xc9\x91\x78\xb4\x53\x94\xe5\x92\x30\x9f\x4d\x8a\x6e\x0f\xfc\xbe\x67\x09\x18\x3c\x88\x24\x1f\x72\x11\xa5\xb8\xd4\x3b\x41\xf7\x64\x0e\xd0\xc9\x08\x93\x51\xcf\x62\x34\x63\x7e\x02\x84\x05\x20\x52\x18\x40\x2b\x20\x57\xf3\x87\xc1\xf0\xa2\x0b\xa4\xcb\x71\xad\xca\x3b\xaa\x81\x2d\x6c\x17\x29\x85\xcd\xc8\x2f\x8c\x5c\x93\x61\x43\x9f\xda\x63\x4a\xb8\x13\xd2\x23\xc8\x7f\xc6\x37\x2b\xd6\xb2\xdc\x35\x74\x30\xbc\x40\x95\x03\x68\xc5\x1f\x57\xa1\xce\xc0\x36\x78\x25\xc7\x40\x6d\x8b\x07\x12\xa5\x13\x7a\x4b\x48\x84\xf7\x8e\x10\xca\xa0\xa8\x4f\xe4\x5c\x11\x0e\xdc\xdf\x55\x29\xc1\x2c\x80\x14\x69\x41\xde\xe3\xf9\xd5\xf3\x0a\xdd\xa6\xb7\xe9\x30\xc7\x49\x85\x5d\x9e\xc1\x10\x7a\xe8\x4f\x14\x5e\x76\x51\x78\x99\xf3\xc6\x5b\xc5\xf4\x40\x99\x6f\x15\x52\x57\x89\x0b\xc5\x24\x07\x5d\x03\x20\x67\x0e\xa1\xf5\xd9\x8d\xb3\xba\x56\x2d\xb2\x87\x2e\xa1\x95\xa4\x27\xc7\x42\x7c\xa2\xa7\x87\xa5\xa7\x2d\xfc\x50\xf4\x24\x20\xdd\x8f\x9e\x54\x3e\x7d\x07\x7a\xda\x8b\xc2\x2c\xf4\xc7\xe1\x17\x9c\x22\x1f\x45\xf8\x6a\x7c\xc3\x30\x0c\xd8\x70\xcc\xa7\x25\xbe\x6b\x5c\x0f\xe3\x64\xb2\x1f\x07\x18\x6d\x53\x67\x35\x88\xd3\x9c\x73\xba\x38\x91\xe9\x14\xac\xab\xc1\xcf\x8f\x53\xad\xd8\x64\xec\x64\xf8\xdd\x91\xec\x83\x91\x55\xc5\xfc\x60\xe3\x14\x77\x24\xb8\x30\x0a\x15\x0b\x1b\x31\x4d\x12\xb9\x58\x54\xd4\x9b\xd3\x29\xa1\x05\x18\x2d\x9e\x6e\x3a\xb5\x5c\x33\x90\x21\xde\x10\x3f\xf9\xa6\x48\x69\xd0\x3c\x15\x67\x44\x72\xa6\x86\xf5\x71\x32\xa1\xd3\xee\xdb\x74\x37\x94\xbe\x73\x92\xda\xc8\xc9\xeb\xb5\xad\x24\xb5\xa3\x01\x5b\x19\xeb\x59\x3c\xa4\x84\x4e\x3d\x00\x6c\xfd\x00\xfb\xa2\x4a\xe9\x85\x03\x36\x3a\x2a\x1f\x86\x60\x0e\xa9\x68\x09\xb4\x67\xf7\x24\x1f\xb6\x04\x4d\xdc\x94\x19\x4e\xca\x18\x51\x51\xa3\xa2\xc0\xcf\x7c\xd4\x07\xd9\x4b\x2d\xe1\x90\xc7\x00\x34\x4d\x75\xc1\xdd\x9d\x75\xc0\x87\x38\x81\xb9\x1c\xc4\xd1\x20\xc1\x19\x5e\x66\xc3\x31\x8e\x2f\x14\xa6\x2c\xdd\x4b\x1d\x2d\x36\xd6\x10\x50\x03\x30\xa7\xfe\x2d\x8c\xa7\xe0\x40\x62\x29\x38\x58\x60\xd3\xfb\x9a\x32\x57\x18\x02\x94\x29\x3b\x09\x6f\xe0\x6d\xb0\x06\x24\xf0\x25\x76\x2e\x89\x3f\x09\x58\x34\x6a\x16\x8b\x46\x10\x46\x17\x0f\xc0\x4d\xf2\xce\x6f\x70\xf2\x60\xf0\x2b\x2f\x48\x9b\x2f\x54\x32\x29\x53\xef\x8a\x63\xee\xa4\x30\x56\xb2\xab\x85\x79\xa5\x43\xe7\xe0\x1e\x38\x0a\x6c\xb3\xef\xc3\x17\xb9\xba\x8d\xa6\x68\x7b\xc8\xbf\xf4\xc3\xb1\xdf\x1f\x63\x6a\x86\x98\xba\xb7\xc5\x73\xde\x99\xd2\x54\xb5\x13\x46\x6c\xe3\x2b\xdc\xa7\x18\x5c\x75\x9f\xf9\x18\x67\xcc\x3d\x9a\x46\x4d\xa3\x90\xf2\x5d\x03\x85\x29\xc2\xc3\x21\x1e\x64\xe1\x25\x1e\xdf\x20\x1f\x05\x38\xcd\x92\x19\x3c\x7b\x28\xc1\x7e\xb0\x1c\x47\x03\x5c\x6a\x9f\x29\x4b\xbd\x80\xc6\x63\xd1\x30\x05\xfe\xd8\x94\xcc\x47\xb2\x52\x9e\x88\x45\x95\x45\xa9\x5f\x54\x9c\x4f\xfe\xbc\x68\x79\xfa\xdf\xc9\xe7\x62\x06\x85\xd4\x12\xe1\xb0\x10\x00\x2a\x5c\x2d\x4a\x51\xcb\x45\xc9\x02\x0c\x19\x02\x22\x11\x54\xd9\x82\xc3\x01\x0b\x98\xc9\x39\xf5\x8e\x34\x21\xd6\xc5\x67\xd6\x9e\xab\x6c\xae\x37\xd6\x57\x9b\x0d\xf9\x13\x55\x89\xd8\xbe\x68\x72\x50\x17\xd5\x95\xaf\xaa\xfc\xdb\x45\x8d\x32\x67\xa7\xd4\xaa\xca\xf6\xe7\x2b\xb2\x91\x73\x6d\xf2\x53\x0b\x1b\xe9\x93\x11\x96\x84\x02\x96\x69\xcb\x47\x23\xd0\x1a\x13\x21\xb3\xc4\x52\xe4\x22\xec\x66\xc4\xf1\x81\x08\x03\x7c\x59\x13\xa1\x89\xad\x6b\x4b\x87\xbe\xc1\x61\x89\x59\x7b\x9b\x2a\x4f\x4d\x47\x6e\xc8\xb6\xce\x55\xa6\xd4\xeb\x3a\xfd\xa6\xc8\x9f\xf8\x94\xe2\x31\x1e\x64\xb4\xe1\xe3\x2c\xf1\x33\x7c\x71\x53\x71\x99\x6b\x4b\xda\x67\x10\x17\x37\xd0\x0b\xca\x4a\x5f\x38\xcd\xc3\xd8\x6c\x1c\xfa\x69\x4a\xd8\xc4\x5b\x3f\xc5\x81\xe2\x31\x27\xff\x15\x1b\x87\x31\x50\xc7\x38\x81\x03\x17\xd9\xd5\xdc\x90\x8a\x17\xb9\x9e\xdc\x8f\xdd\x67\x14\xd8\xa8\xbb\x90\x62\xe4\x24\x33\x36\xf3\x86\xa5\xc8\x6e\x34\x8f\x02\x66\x9f\x07\x71\x71\x43\x51\xf4\x90\xfb\x02\x47\x1f\x03\xcf\x61\xe9\xc9\xc8\xbe\x6b\xf4\x5f\xbb\xcf\xb9\x17\xda\xea\x4d\x91\x87\x0a\x6f\x8c\x74\xcc\x2d\x13\xaa\xb3\x6d\x99\x4b\x56\xaa\x4c\xc3\x6b\xbf\x7a\x53\x75\xd8\x69\x96\x60\x7f\x72\x27\x55\x36\xc8\x50\x4c\xf9\x2c\xdb\xe0\x37\x1b\xcb\xfd\x90\x1a\x6c\xab\x27\x1a\x2a\x9d\x40\x1c\x6b\x49\x33\x5d\x47\x95\x66\x43\x55\x4c\x4b\x0a\xdf\x63\xc0\x4f\x53\xfb\xea\x2f\x0b\x3c\x42\x76\x2c\x7b\xad\x6d\x87\xe5\x22\xe2\xd4\x4f\xe0\xb8\x65\x13\x10\xcd\xed\x0d\x8e\x37\xb9\x75\x15\x17\x1a\x7f\xf8\xe1\xc5\x70\x3c\x4b\x47\x2f\xca\x6d\x73\x14\x8a\x6b\xa3\x13\xc3\xdc\x45\xf5\xa2\x79\x85\x73\x2d\xa4\x35\x9d\xca\xb7\xa5\xb2\xf2\xfc\x7c\x42\xcf\xbe\xbd\x15\xf6\xe3\xcf\xdb\xf9\x14\xa2\x78\xec\x40\x3d\x83\x4a\xa4\x36\xa4\xdb\x4d\x76\xd0\x36\x9c\x83\xd9\x7b\x59\xe9\x5d\xa4\xa0\x97\x55\x94\x13\x9e\x9d\x2b\x97\xaf\x17\xde\x4d\x37\xd5\x1e\x59\x15\x82\x7a\x6a\x99\x5c\xc1\x0f\x54\xfd\x0d\xf6\x43\x3e\x53\x7c\xbb\x03\x3d\x6c\xef\x6d\xcf\x50\x45\x73\x8e\x12\x5e\x52\xaf\x9d\xbb\x68\x9e\x73\x18\x85\xba\x42\x51\x97\x2b\x9a\xa4\x7a\x77\xd2\x38\x8b\xe9\xcc\x0f\x48\xff\x33\xa7\x33\xd7\x04\x2f\x38\x9d\x56\xc5\x6f\xc9\xe9\x14\x75\xef\x31\x9d\x45\x0a\xdf\x72\x57\x07\xdf\x74\x3a\xef\x3d\x5d\x05\x4b\x60\xce\x7c\xe9\x7a\xd3\x82\x49\xa2\x9b\x89\xd0\xf3\x0e\x6c\x62\x1d\xb3\xba\xbe\x44\x1b\x28\xbc\x94\x67\xab\x68\x8b\x60\x3b\x26\x0d\x2c\xdd\x1b\xf9\x61\x04\x39\x4f\x5c\x77\xad\x6f\xc1\x6e\xe0\x9c\x77\x1e\x6d\xb8\x83\x0f\xe8\x2a\x36\x65\x07\x21\x75\x0d\x62\x90\x86\x26\x6f\x4c\xdb\x25\xc4\x9d\xe8\xeb\x22\x8e\xf2\xb6\xc7\xb7\x03\xed\x24\x24\x35\xa1\xcc\x1d\xe9\xd5\xdb\x9e\x65\xef\x31\xc1\xd3\x26\x0e\x45\xfc\xcf\x8c\xab\x31\x28\x95\xfa\x19\x33\xea\x5e\xd1\xeb\x18\x30\x34\x9a\xa5\xd2\x91\xd0\x8a\x30\x61\x29\xe6\x32\x12\x52\x39\x21\xb2\xde\x90\x30\xbb\x2c\x02\x84\xfd\xbc\x1a\x61\x16\x7a\x9f\xe2\x07\x91\x3c\xd3\x12\xc8\x99\x0b\xc3\x5e\x90\xfc\xc1\x54\x32\x51\x87\x7a\x03\x40\x7e\x3c\xe8\x82\x70\x6d\xd0\x65\x59\x79\x32\x50\xae\x02\x34\xcc\xe4\x55\x28\x4e\x5b\x68\xab\x03\x2c\xd2\x6f\x48\xe4\x85\xe4\x30\x9c\xcd\x85\x58\xa1\xc9\x11\xaf\x1c\xe6\xac\xbf\x1d\x1c\xc1\x79\x99\x11\x9d\x59\xe6\x3a\x4e\xa0\x5f\xb9\xa2\xdb\x43\x4a\xbf\xbc\xbc\x59\x9b\xd0\xcf\xf0\x90\x7d\x5d\x2a\xfa\xe8\x5a\x31\x3b\xc2\x13\x0c\x52\x38\xec\xae\x94\x04\xd8\x55\x14\x9c\xf6\xc1\xa1\x1d\x5e\x9b\xd5\xb9\x04\x8b\x2f\x79\xdc\x79\xca\x4c\x69\x42\x79\x8e\xb7\x30\x05\x74\x76\x40\xf6\xdc\x99\xbb\x6e\x03\x5c\x62\xdd\x8a\x7d\xea\x69\xdd\x3e\xad\x5b\x74\xf7\x75\x7b\x9f\xd5\x01\x16\xc2\xa3\x30\x5d\x78\x6d\x58\x31\x61\x14\x0d\x5c\xe4\xb7\x83\x23\x27\x07\x90\x3d\xc8\x0c\x0e\x70\x5f\xb6\x63\xc5\xec\x24\x1f\x9a\x3e\x1e\xc4\x13\xb6\x74\x08\x5b\x08\xe3\x59\x5a\x9e\x79\x88\xc1\x2a\xcb\x1e\x04\x29\xf1\x6e\x54\x9c\xb8\x2f\xe4\x01\x05\x22\x12\x97\x96\x6c\x1e\xfe\xa3\x38\x4e\x31\x9a\x84\xd7\x44\x16\xb2\xf4\x0f\x3c\x41\x4d\x21\x0d\xc9\x84\xc8\xa4\x30\x17\xd9\xc5\x97\x20\x9d\x92\x93\x4e\x3a\xeb\xa7\xf8\xbf\x67\x38\xca\xac\x2a\x06\xa4\x8a\x76\x52\x5a\x0f\x75\x14\x9d\xaa\x41\x19\x25\x6d\x56\xe6\xab\xfa\xc9\xce\x66\xc3\xca\x16\x23\x29\x5f\x6d\xd6\x48\x49\xe4\x0f\x26\x30\xb7\x1e\x0f\xcf\xd0\xef\x1b\xb4\xde\x69\x58\x18\xba\x24\xff\xcd\x4d\xa0\xdf\xf6\x58\x79\x25\xa0\x89\x24\xda\x1e\xfa\x41\x40\x26\x70\x8e\x02\x64\x0a\x69\xae\x7a\x2b\xf4\xbf\x76\xf5\xc7\xe1\xfb\xde\x31\xfa\x5f\xed\xd5\x35\x34\x65\x40\x53\xa6\xcb\xb3\xc1\x3c\xfc\x3c\x48\xd7\x40\x4e\x9e\xfa\xc1\x0a\x7f\x2a\x90\x8d\x0f\x7d\x7e\xfd\x3c\x4b\x79\xec\x7c\x11\x08\x85\x99\x2b\x43\xe0\x64\x81\xc7\x42\xf6\x57\x00\x59\xbe\x7d\x26\x68\x59\x2b\xd9\xf5\x78\x2c\x04\x94\x74\x1f\x09\x80\x52\x11\xcd\x92\x0c\x0a\xc4\xb3\x7c\xe4\x63\xb3\x38\x7c\x89\x71\x25\xbf\xf2\xeb\x35\x4f\x8b\x9b\xa5\x5c\x30\xfb\x81\x7e\xb9\x76\x67\x06\x22\xaa\xd1\x58\x27\x1b\xd2\x78\xb9\x62\x86\xcc\xa2\x4c\xd0\x0e\xf8\x15\x99\x50\x23\x46\xb0\x06\x50\xfa\x62\x99\xe6\x9c\x16\x11\x56\xfe\xa5\x15\xb0\x35\x4b\xef\x85\x78\xbb\x66\xe8\x05\x9a\xea\x0d\xbe\x12\x7a\x81\x08\x28\x0a\x16\xb9\xaf\x8b\xf1\x9e\x39\xb8\x18\xef\xc1\xad\x45\x79\x3b\x17\xb3\x42\xa4\xd2\xe2\xf0\x05\x39\xfb\x51\xdb\x44\x21\x5a\x72\xb9\xe5\xcb\xd0\x69\x9c\x7b\xe9\x4d\x81\xf4\xaa\x61\x87\x36\x72\xdb\x77\x7e\xf8\x97\x41\x7b\x2a\x4a\x36\x33\x84\xcd\x20\xb0\x0f\x02\xcc\xf5\x20\x8e\x06\x7e\xc6\x61\x96\xd6\xc0\x7c\x8a\xa6\x82\xa1\xc0\x92\x1d\xf9\x01\x0d\x64\xc4\x16\xea\xb7\xe1\x32\xb3\x48\xe7\x33\xdf\x84\x23\x40\xb3\x25\xae\xdc\xa1\x9c\xce\x12\x6c\x7c\xe0\x1d\xce\x94\xcc\xc5\xd2\x22\x86\x18\xb0\x68\xec\xa7\x19\x3c\xcf\x5f\xd3\xb9\x78\x7d\x5a\x51\x97\xf3\x32\xaa\x57\xa9\x8b\xd9\x19\x73\x06\xb3\x79\x12\x53\xc1\xc1\x4d\x31\x39\xb8\x0d\x7d\x0d\x4a\x9b\x29\xdd\x36\x17\xd4\xf3\xff\x15\x17\x41\x36\x17\x05\xfb\xcd\x82\xed\x56\xa1\xe8\x1e\xe8\xe1\x8c\xfe\xf7\xe3\x00\xdf\x52\xf5\xe0\x89\x38\xad\xd1\x4b\x11\x38\x49\x48\xdd\xe9\xbd\xed\xb9\xa0\xb0\xb9\xba\x15\xf4\x45\x60\xe9\xc2\x86\x09\x11\x48\xde\x41\xe0\xe0\x47\xc0\x06\x40\x32\x9c\xd4\x08\x9c\x60\x0a\x98\x79\xda\xa9\x8e\xb6\x6d\x34\x71\xab\x78\x23\x2c\x60\x18\x48\x27\x5a\xfd\xd8\x93\xac\x0f\x8b\x6d\x00\x0b\x02\x9c\xa9\xf6\xa1\x16\x3f\x4e\x90\x9b\xc9\x08\x28\x6a\x51\xa4\x2a\x76\xc9\xf7\x09\xd8\x7e\x3a\xf0\xcf\x27\xd6\x3c\x0c\x18\xb6\xa4\x5c\xd2\x56\x8d\x4b\x9c\x27\x06\x02\x15\xb6\x44\xd0\x68\xc0\xa9\x5c\xbb\x9b\xb1\x4b\xfb\xab\x2f\x8b\x9b\x57\xad\x57\xaa\xe8\xe5\xea\xc2\x18\x08\x55\x8b\xe3\x2c\xf3\x1e\xe3\x29\xf2\x33\x34\xc6\x84\x0b\xc6\x11\x5f\x01\x2c\xcd\x07\xb5\x04\x85\xfd\x1a\x18\xae\xc9\xb7\x90\x38\xdf\x4c\xc2\x88\x1a\x89\xb2\x43\xbc\x11\x2e\x51\x7d\x64\x95\xe8\xf4\x49\xf8\x53\x42\x9a\x80\xfd\x31\x3d\xf2\x86\x97\xe8\xa7\x9f\xac\xfa\x78\x3d\x50\xc7\xe1\x9d\x74\x19\x39\x26\xaa\x32\xc5\x79\x3e\xd7\x9b\x2d\x7b\x25\xed\x16\x49\x73\x91\x44\x18\x4a\xb3\x57\x16\x82\xe6\xcd\x3d\x2c\x21\xaf\xae\x92\x83\x0c\xcd\xf7\xe5\x12\xb9\x40\x5e\x67\xa6\x5f\x20\x81\xc3\xef\xb9\x3a\x08\x7e\x15\x4f\x6d\x04\x5d\xa7\xe4\x3b\x5d\xc6\x3f\xde\xb2\x7a\x5c\xbc\xad\xed\x81\xe4\x37\x67\x06\xa8\x7c\x64\x6b\x6f\x9e\xe5\xdf\x3d\x2d\x15\xc0\xf4\x8e\xc9\x1e\x76\x33\x14\x34\x88\xc7\x63\x4c\xe9\x3f\x1e\x72\xd1\x00\x44\x4d\x0c\xc9\xf4\x8a\x44\x0f\x49\x14\x95\x9c\xbc\xc9\x36\x9a\xf8\x57\xd2\x2b\xab\x5f\xa2\xdd\xf5\x83\x3a\xa0\x0b\x21\xa5\x4c\xed\xfc\xe2\x11\x52\x3c\x30\x2e\x48\xeb\x93\xf5\x69\x98\xe3\xba\x00\xa5\xfe\x98\x62\x0f\x3f\x00\x18\xa8\x24\x7d\x1a\x7e\x14\x27\xe1\x25\x95\x55\x38\xc7\xb0\x02\xe4\x57\xa9\xb9\x9c\x2f\x59\x0e\x9a\xb1\x56\xcb\xc9\x35\x77\xe9\x59\xb1\x7c\x33\x18\xe1\xc9\xdd\xe0\xda\x05\x4e\xa6\x32\x07\x8b\xe9\xa1\x04\xcf\x0a\x82\x26\x65\xbc\xcd\x93\x36\xd2\x53\x0c\x15\xb1\xf8\x5b\x5d\x0c\x1b\xc4\xd1\x25\x4e\x32\x45\x86\xa5\xe9\xee\xb8\x31\x25\x58\x7c\x52\xeb\x3f\xb7\xdb\xea\x21\xad\xa2\x3a\xaf\x8a\x97\x25\xed\x61\xe6\xbb\x58\xa9\xa8\xcd\x3f\xd6\x09\xef\x26\x19\x1f\xcd\x4e\xd4\x8f\x44\x16\xab\x69\x9c\xa6\x61\x7f\x8c\xdd\x2b\xd6\xd2\xd4\x62\xce\x4d\xf9\x40\x99\xf6\xa0\xf4\x1b\x3f\x81\xff\x69\x40\x41\x42\x7d\x4e\x56\x70\x57\xfa\x9d\x3b\x3c\x59\x2b\x7d\xc6\x37\x5d\xd5\x2f\xca\x5a\x4c\xf3\x94\xb2\x17\x22\xcb\xb8\x0b\xff\xce\x29\x28\x56\x65\xd7\x74\xe7\xb2\xd7\x60\x22\xbc\x6e\x99\x60\x2f\x2c\xe4\x7a\xf5\xe8\xfc\xbe\x77\xbc\x66\xaf\x20\xb1\xf0\x96\xbd\x84\x58\x38\x12\x50\xfa\x6e\xe5\x60\x8a\xa3\xe3\xe3\x0f\x46\xb5\xf2\xce\x64\xf2\xf4\xdb\x05\xaf\x49\x78\xbd\x17\xa9\xe5\x4a\x9b\x1e\xd1\x55\x9c\x2e\xb6\x8c\x91\x73\xdd\x98\xac\x44\xf3\x0d\x74\x70\x13\x72\xa8\x73\x03\xe7\x06\xb6\xdc\x2b\x03\x76\x05\xf8\x1d\x0e\x43\x7d\x8d\x17\xc0\x81\x2c\x60\x29\x4d\x01\x06\xe9\xe3\x70\xee\x45\x99\x63\x1c\xc5\xf4\x8d\xc6\x00\x59\xd2\x7e\x5c\xc4\x3d\xca\x2e\x69\x8a\xbc\xb8\xa6\x63\x6b\x7b\x09\xbd\x78\x61\xf7\xad\xb0\x96\x5f\xc9\x62\x9a\x6f\xc8\xe5\xca\x31\xa7\x96\x83\x54\x9d\x84\xc9\x2b\xca\xc4\x29\xc6\xc6\x65\x55\x95\x97\x40\x5f\xbf\x52\x72\xcd\xeb\xac\xf0\x49\xbc\xe1\xc7\x5e\x43\x47\x63\x95\x93\x28\x95\xcd\xbb\xd7\xa0\xed\xc0\xd5\x86\xf8\x69\xbf\xdd\x60\x3d\xb7\x11\xa7\x0d\x34\x2b\x2e\x72\x19\xc3\xee\xa5\x0e\x62\xf1\x75\x87\x58\x75\xbe\x7b\xc9\x45\xbc\x99\xe5\x41\x3c\x99\xfa\x19\x6c\x2f\x65\x97\xa1\xbc\x2d\x68\x9b\x98\x24\xfe\x94\xdd\x13\x6d\xcb\xef\x2e\xc8\x3d\x94\xe1\x60\x4c\xdb\x3e\xe6\xe4\xed\x20\x64\x89\xba\x5c\xbc\x51\xa1\x6f\x51\xbc\x34\xf7\x9d\xa3\x96\x91\x23\x2d\x29\x4b\x30\xff\x62\x0b\xd4\x48\xc4\x5d\xad\x02\x79\x67\x3b\xc6\x42\x7f\xcd\x43\x2c\x29\xee\x54\xb5\x5c\x49\xd1\x6a\x0c\xed\xfd\x69\xed\xba\xdd\xec\xd4\x3b\x83\x35\x48\x6c\xd0\x69\x77\x5a\xed\x61\x7b\x78\x56\xe5\xaa\x78\x00\xcd\x1f\xf2\x7e\x38\xce\x91\x25\x50\x70\x8e\x85\xe3\xf0\x25\xea\xe6\x8c\x8c\x86\xb5\x59\x7c\xcf\x2b\x5a\x63\xb2\xbf\xd2\xa2\xc2\x23\x5f\x27\x39\x9d\xde\x79\xc9\xa8\x31\x1b\xf8\x82\xbe\xc3\x1a\x7e\xd8\x00\x0e\xa6\x30\xaa\x2d\xbd\xa9\x9f\xa4\xb8\xa2\x2c\xd4\x82\x8b\xc9\x24\x55\x14\x3f\x79\x35\xab\x57\x02\x29\x8e\x68\x0c\xaf\x39\x8b\x8e\x12\x86\x81\x4c\x91\x7a\xb5\x08\x22\xbf\x8c\x93\x0e\xc3\x2c\x29\x84\x01\xee\x04\xa7\x19\xb5\x6d\xf0\xc7\x96\x05\xaa\xc1\x3c\xad\x9d\xa1\x8d\x0d\x94\xaf\x3d\xf4\xd3\x4f\x7a\xbb\xa7\x75\x56\x86\xaf\x49\x97\x0a\x6a\xfb\x9a\x5e\x60\x98\x2d\x23\x95\xc3\x18\x8b\x5f\x6b\x91\x99\xf2\x34\x3c\xd4\xaa\x16\x58\xd7\xc5\x97\xec\x88\x0e\x57\x41\x39\x0c\xb3\xbc\x01\x7f\x0a\x0d\xd4\xf4\x5b\x6b\xa3\xb8\x72\xab\x53\xef\x94\x63\x14\xd6\xa3\x91\xe3\x18\xe4\x49\xa7\x13\x55\x34\x2f\xbc\x2b\xe2\x8b\xf0\x2a\xf1\xa7\x53\x90\x23\xfd\x8c\x35\x2f\xab\x4c\x90\x4f\x76\xfa\x54\xf2\x4a\x2b\x5c\xbd\x8a\xab\x8f\xe1\xca\x96\x3b\xfc\xd8\x3e\x95\x75\x20\xb9\xf3\x65\x8f\x10\x7a\xb8\x8c\x5f\x24\xd5\x73\x1d\x81\xdc\x5b\xd6\x59\xea\x10\x1a\x05\x94\x6a\xc4\x01\x23\xbf\xd8\xb1\x1c\x9c\x8a\x42\x44\xe9\xde\x8b\x80\x50\xd7\x10\xd5\xa4\x89\x2d\x0c\x2a\xc5\xae\x1d\xc8\xbc\x31\x6f\xba\xfb\x78\xa8\xe6\xca\x27\xcb\x51\xa7\xc0\xfb\x9c\x35\x4d\x6d\x50\xd8\xef\xdc\xef\xfc\x6f\x12\xc3\xc5\xbe\x85\x6d\xfe\xb5\x1b\x18\x59\x96\x76\x8d\x8a\xb9\xac\x84\x7f\xa5\xa9\x8d\x50\x5c\x2d\x1d\xa7\xb0\xc7\x6b\x30\x0f\x52\xa3\xab\x13\xbe\x69\xe3\x9e\x58\x6d\x0e\x69\xa0\x40\xd9\x61\x71\x8e\x75\x7b\xb1\xde\x2d\x84\xce\x42\xd1\x73\xb6\x6d\xf6\xeb\x52\x74\x83\x38\x77\x3e\xb1\x05\x40\xb3\xfa\xac\x1a\x62\x49\xee\x99\x21\x02\x24\xb0\xce\xde\x45\x32\xe9\x41\xff\x72\x98\x70\x05\x6c\x40\x61\xf6\x46\x84\xe3\x0a\xc7\x5c\xd7\x7e\x54\x7e\x3b\x2d\xda\xb4\x95\xfd\xd5\x2c\xc8\x55\x8b\x96\x4f\x84\xac\x44\xdf\x56\xc2\x4b\x4b\x11\x49\x47\xc8\xe8\xc5\x2c\x43\xb5\x82\x39\x20\xb8\x10\x35\x8b\x09\x7d\x60\x5e\x92\xbd\xb2\x14\x96\x74\x81\xba\x85\xb5\xa5\xb4\xa4\x17\x24\xa4\x37\xb4\x1c\xd7\x6e\x4b\x1f\x5b\xd8\x3d\x74\x2a\x26\x4e\x28\xbe\xe4\x6b\x19\xf4\x68\xdb\x93\x4c\x00\x62\x87\xd2\x2e\x9a\xa4\x47\xc8\xed\xfd\x77\xdc\xa7\xb4\x00\x2d\x22\xd2\xf1\x37\xd8\x9b\xf2\xa8\xca\xf3\xd9\x34\xf7\x9e\xb7\xb0\x69\x4e\x76\x2c\x8c\x82\xe4\x51\x7f\x67\x96\xfd\xd0\x28\xea\xfb\xd2\x03\x6e\x29\xce\xd8\x05\x8e\x08\x03\xdf\x60\x57\x61\x1a\x07\x49\xb5\x20\x2f\x26\x0d\xb0\xbc\x53\xb0\xdb\x6f\x38\xbf\xca\xc8\xe7\xdc\xc4\xd6\x1c\xe3\x14\xe6\x86\x21\x4f\x9e\xb2\x89\x29\x51\x17\xe9\xb0\xe4\x7b\x93\xc4\x64\x14\x85\x8f\x75\x9b\x10\x4d\x2c\xac\x8d\xb1\xb2\x35\x7d\xac\xd4\xfb\x17\xd0\x31\xf9\x69\x3a\x9b\xe0\x40\xbd\x4f\xf4\xc7\x09\xf6\x83\x1b\x69\xbf\x53\x0e\x64\xb3\x88\xa6\xad\x2c\x11\xd1\x6c\x31\xb6\x67\xe7\x5f\x0b\x1d\x9a\x08\xe3\x02\x13\xf5\x24\xc5\x0b\xf3\x7a\xb7\xbe\x68\x16\x2d\x0a\xeb\x2f\x94\xb8\x0d\x92\xa7\x2a\xa4\x03\x4e\x05\x48\x10\xbf\x9d\x07\x9c\x1b\x3a\x25\x79\xf5\xb0\xca\xb6\x54\xde\x2c\x76\x8d\xbc\x08\xe7\x84\xb0\xe1\x36\x21\x94\x3d\x99\x4b\x55\xbf\xd8\x40\x85\xda\x51\x06\xad\x40\x29\x6a\x68\x26\xac\x37\x24\xef\xed\x26\x12\xf3\xae\x4c\x3e\x07\x43\xb8\x2f\xa1\xff\x2d\xbe\x2c\x99\x67\x85\x61\x5e\x98\xbc\xa7\xd0\x49\x2b\xe5\xee\x49\xb6\x08\x78\xb8\xd3\x27\x8d\x91\xb5\xbc\xf7\x0b\x57\x18\x4c\x59\xbc\xa0\xf2\xea\x58\x5e\x83\x59\x5e\xb0\x07\x90\x53\x48\x33\x00\xb8\xd8\x2b\x24\x0f\x54\x8e\xa9\x6d\x45\x18\x31\x4b\x5e\x66\x07\xc0\x4c\x66\x2e\x70\x04\xc6\xbc\xc5\xd0\x44\x94\x72\x07\x30\x1a\x3a\xbb\x18\x96\xa9\x33\x00\x15\x96\x24\x24\x6d\xa2\x4e\x0b\x4c\x8e\xe1\x03\xb7\x9f\xdd\x1b\xa2\x78\x12\x12\x19\xc1\x43\x3e\xfd\x74\x15\x8e\xc7\xa8\x8f\x45\x83\x01\x4a\xfc\x28\x88\x27\xe3\x9b\x07\x3a\xdc\x53\xab\x09\x36\x4c\x1e\xda\xfb\xc5\x83\x29\x25\x8d\x7f\x03\x2e\x44\x27\x39\x30\x59\x90\x44\x8d\x2b\xf8\x1a\x0f\x66\x19\xae\xbc\xe0\xd1\xa8\x5e\x78\x2c\x71\x87\xc7\xcc\xb7\x1c\x62\xd1\x03\x41\xf7\xd0\x0b\x32\x1c\xe4\xff\x5f\xb8\xcf\xcc\x14\x8c\xcc\xdd\x38\x35\x7b\x9c\x44\x3d\x46\x5d\x54\xb1\x69\x37\xea\xa7\xd3\xcc\x66\xd9\xa1\xa8\xfe\xc1\x79\x95\x64\x28\x91\x29\x9c\x4a\xa7\xb5\x6a\xa4\x35\xb7\xb8\xd5\xd1\xa5\x2d\xad\x6b\x53\x5a\xa1\xf1\x66\x69\xe2\x81\x5c\x81\x2b\x62\xdc\xe5\x69\x90\xd9\x42\xba\xad\xae\xb0\x44\xde\xd2\x78\x00\xfe\xd6\x80\xb5\x84\x36\xb3\x62\x0c\xc0\x6e\xda\x50\x93\x8b\x64\xd0\x4c\x41\xce\x93\xc9\xf2\x31\x47\x2f\x4d\x7d\xb6\x92\x1a\x3a\x4f\xe1\x6c\x77\x96\x3a\x62\xa2\xd4\x82\x87\xf1\xfc\x48\x2d\xa4\xe8\xbb\x69\xb5\x6d\x9a\x01\x45\xc5\x1d\x30\xbe\xcc\x59\x9e\xc6\x92\x3d\x01\xcb\x21\x7e\xdd\x5d\x1f\x6e\x89\x12\x27\x14\xe2\xf6\x6f\x36\x0d\xd7\x23\xea\xc7\xdf\x6f\xed\xdc\x22\xb2\x7d\x72\x0b\x4a\xdb\x2e\x9c\x4b\x79\x9c\xd9\x16\x6f\x71\x0b\x69\xc5\x2d\x1d\x76\x3b\x3f\x7c\x0e\x86\x5d\x69\x7b\x96\x28\x64\x41\xf5\x38\x73\xa9\x5a\x64\x5f\xfe\x3e\xf4\xe5\x85\xd2\xc1\x77\xa0\x8e\xf8\x9b\xa8\xcd\x2d\x8b\xaf\x94\x26\xf9\x05\x1f\x6a\x57\x58\xd9\xc7\x6f\xd8\x43\x7f\x3e\xb2\x06\x3b\xdf\x8e\xbe\x91\xc2\x41\xdb\x5d\xe3\xcc\xa5\xdc\xb5\xc9\x2e\x04\x3c\x11\x5b\xb8\xb8\x22\x61\x4f\x87\x57\xc8\x18\xec\x99\x6e\x7b\x2e\xef\x4e\x2a\xc6\xd2\xbe\x19\x5d\x5a\x81\x2d\x56\xc1\x60\xc5\x1a\x92\xc0\xa9\x98\x57\xf4\x25\xee\xeb\x0c\x39\x00\x84\x31\x3f\x6a\xfb\x92\x1e\xdf\x40\x63\x3f\xbc\xa6\xc9\x40\xa0\x82\x75\x48\xa5\xb3\x35\x35\xcc\x54\xa0\xbb\xf4\x26\xd6\x13\xdf\x3d\xf4\xc1\x7f\x01\x3f\x7e\x60\x05\xf1\xf7\xce\x98\xbf\x47\x3d\xb1\x8d\x19\x2e\xaa\x28\xbe\x17\x63\x7c\x70\x14\x4d\x45\xf1\x43\x31\xee\x92\x7a\xe2\x6f\xce\xbb\xbf\xb9\xb2\xf8\xdb\x6f\x15\x9e\x62\xdb\xe3\x38\xa1\x3d\xdc\xde\x51\x4a\x1f\xee\xbe\xbf\xb0\x6d\x1d\xf2\xf8\x96\xdc\x3d\x8a\x14\xe4\xb9\x2a\x4f\x64\xba\x94\x53\x5a\xb2\xfc\x95\xb7\x67\x5e\xbb\xf9\xbd\x26\xa5\x7c\xf0\x1c\x94\x8b\xe6\x9e\x54\x72\x4e\x1a\x88\x99\xe9\x27\xb5\xb4\x93\xbc\xa2\x23\xf1\x24\xe8\x47\x73\xe0\xe2\xa7\x9a\x7c\x72\xdf\xcf\x46\x1e\xb2\xa4\xa0\xcc\x8f\xd7\x1f\xe2\x81\x3f\x46\xd3\x78\x7c\x33\x0c\xc7\x28\x1e\x22\xba\x69\xb1\x53\xbc\xe5\xc8\xcb\x62\xdb\x6f\xa8\x05\xb5\x86\x15\xc6\x24\x5e\xef\x90\xf7\xb7\xaf\xcd\xd8\x41\x92\xad\x65\xff\x0f\x83\xa9\x81\x8d\xe0\xac\x4f\x66\x50\x27\xe2\x9d\x95\x69\x12\x67\x31\xf9\x84\x36\xc8\xe9\x43\x2f\xc0\xea\xa1\x0d\x14\xe1\x2b\x82\x40\x31\x84\x68\x36\x1e\x3b\x16\x8a\xc0\x20\x5f\x26\x52\xbc\x23\x5b\x24\x4f\x3e\x27\xc5\x4a\x6e\xa7\x62\xfb\x43\xd8\x4f\xfc\xe4\x66\x9e\x8e\x5c\xca\x0f\xea\x04\x05\xd9\x42\x99\xd6\x93\x08\x17\xbc\xcb\xfe\x18\x85\xd1\x08\x27\xa1\x12\xc0\x55\x89\xe8\xa0\xe7\x19\x35\x23\x8c\x9a\xd3\x59\x22\xec\x1f\x8f\x31\x0c\xee\x71\xc2\xcf\x60\xe4\x67\x1c\x21\x16\xca\x83\x8a\x41\xc6\xa9\x12\xa1\xa2\x38\x80\x5c\xee\x8a\x2f\x71\x92\x84\x01\x4e\xd1\x21\x55\x88\x84\x38\xa5\x0c\x7c\x7a\x83\xc2\x88\x65\x33\xce\x11\x28\xd1\x82\x9e\xab\xe1\x64\x51\x00\x86\xcc\xe5\x28\xb7\x48\xd4\x40\x32\x51\xfb\x37\x27\x94\x84\x15\xe9\xa6\xc0\x24\x51\xf6\x17\x0b\xf1\x38\xe8\xa2\x17\x90\x29\xeb\x85\x6e\x38\x62\x6f\x93\xfc\x4d\x70\x36\x8a\x83\x42\x1f\x79\xa9\xb4\x1e\x23\xdf\xe6\x78\x86\x90\x19\xce\x90\xa2\xaf\x18\x64\xf3\x79\x75\x06\x31\x9c\xfa\x57\x91\xf9\x45\x62\x24\x44\x58\xc8\xd3\xea\xb9\xcc\x89\x37\x67\x17\x13\x1c\x59\x4c\x87\xc9\x8e\x52\x8c\x05\xca\x99\x0f\x3b\x77\xe5\xe5\xad\xe9\x1f\xac\x08\x30\x33\x29\xee\xfa\x15\x0a\xc7\xd2\xc4\x8e\xd3\x0f\xbc\xc9\x91\x9f\x1e\x5c\x45\x8c\xec\x6f\x2a\x2f\x48\xcd\x17\x55\xe1\xf3\x44\x1e\x61\x13\xe4\xe5\xc9\x8b\xb9\xfd\xa0\xb5\x0a\xa7\xdb\x52\xeb\xff\x49\x67\x53\x22\x6a\x45\x61\xb6\xe2\x13\xe1\x94\x6d\x7d\x7e\x72\x31\x23\xa3\x6b\x1d\x0f\x64\xc9\xa0\x50\x30\x4e\xb9\xc7\x6d\xf2\x22\x45\x39\x47\x0f\xa9\x52\x98\x4f\x3a\x5d\xa5\x26\x04\xb9\x83\xca\x7e\xe0\xd8\x76\x10\x57\x8c\x0f\x71\x82\xa3\x01\x69\x00\xc6\x79\xaa\xaf\x57\x63\x18\x98\x5c\x6c\x03\xe8\xdc\x67\x90\x2d\x35\x86\x8d\xa9\x6e\xc3\x4a\x49\x65\xa6\x49\x55\xde\xb3\x88\x8e\x03\x4c\x20\x5d\xb5\x66\x08\xd4\x4d\x3e\x1f\x79\x06\x9b\x4a\x55\x5c\xc3\x11\x51\x1a\x42\xca\x01\x90\x4a\xf5\xef\xcc\x2b\x79\xc4\x72\xb4\xc1\xd8\x26\xbf\xb3\x98\xcb\x8b\x68\xb9\x62\x8e\x67\x36\x02\x4b\x2e\x8f\x93\x6d\xae\x5c\x1e\x41\x5d\x5a\x23\xfc\x9d\xba\x4e\x9c\x54\xc3\x8b\xdf\x85\x6c\x8a\xdc\xd5\x1d\x73\x85\x0e\x18\x33\x63\x49\x02\x80\xa4\xc0\x84\x3e\x08\x50\x1a\x4f\x30\x4d\x3d\x85\xae\x46\x38\x42\x37\xf1\x2c\x11\x66\xf6\x3e\x11\x67\x29\xf0\x07\x8e\x9d\x7b\xdf\x5d\x50\x77\x74\x2e\xda\xcb\x10\x65\x00\x2b\x2b\xe6\xc8\x88\xa1\xbf\xe3\x76\x37\x17\x8d\x52\x73\xda\x8b\xa7\x44\xd8\x99\xe6\x72\x0f\x93\x77\xee\x21\x4e\x49\xc0\x40\xc3\xa4\xc8\x54\x13\xd0\x44\x3e\xf0\x94\xb2\xd5\x49\xf7\xcf\xb2\xf2\xcb\x1d\xc7\x1d\x1a\x51\x2e\xb1\x45\xff\xac\x6b\x5c\x44\x3c\xe4\x97\x6d\x1f\xfd\x09\x18\x4d\xcc\xa9\x87\xd8\x56\x9d\x17\xd3\x37\x6b\x19\x60\xb5\x70\x8b\x25\xd3\x79\x2a\x17\x3f\x43\x1b\x52\xfb\xea\xa7\x05\x52\x17\x39\x36\xd9\x6d\x74\x15\x47\x2f\x32\x2a\x3f\x73\x77\x47\x29\x78\xe1\x38\x8e\xa7\xc8\xef\xc7\x97\x96\x6d\xb0\xb8\xcb\x2f\x38\xb4\x17\xee\x0e\x03\x17\x15\xad\xca\xfd\x14\x6f\x4b\xe4\xd5\x2a\xb5\x78\xc4\xe1\x04\x7a\x0a\xf6\x2f\x8b\xac\x1b\xdb\xc6\x37\x18\xc7\x11\x7e\x04\x8e\x07\x70\xd1\x46\xbe\x87\xc0\x8b\x12\x3b\x19\x29\x36\x77\x23\x93\x73\x91\xa8\xc2\x11\xe7\xa7\x56\x7b\x32\xfb\x19\xd9\x7a\xbb\x1f\x21\x1f\x3c\x6f\xb5\x58\x84\x85\x91\x85\x8c\x38\xef\xc5\x20\x6c\xe1\x69\x84\xf1\x83\x1a\x0e\x31\x0d\x2f\xa2\x70\x18\x0e\xfc\x28\x63\x01\x25\x43\xda\x7b\x00\x49\xdb\xb1\x1d\x93\x7f\x95\x3c\x88\xe9\x59\x59\x7e\xf3\x00\x61\x63\xcc\xe6\x75\xb2\x70\x84\xc1\x97\x4d\xaf\xe6\x8c\x35\xb2\x9a\x85\x89\x91\xd2\x6e\x30\xe6\x0e\x1a\x7e\xb0\x54\x2f\xb2\x7f\xb6\xb2\xb1\x1b\xb6\x30\x0e\xed\x7f\x79\x00\xa7\xb5\xeb\x5a\xad\x56\xaf\x35\x6a\x4d\x0f\xd5\xae\x6b\xad\x5a\xbb\xd6\xa9\xad\x9d\x3d\x1a\x60\x0f\x75\x4a\x87\x5e\x61\xe1\xeb\xf8\x8c\x18\x2b\xf6\x8a\x39\x04\xc3\x72\xe5\x0f\xf4\xbf\x5f\xbf\x42\xcc\x5e\x4d\xd4\x18\xa2\x8a\x98\xde\x1f\x36\x2c\x8a\x42\xf9\x0f\xa0\x4a\x46\x43\xfc\x67\x69\x63\x52\x1d\x00\x25\x8f\x31\x8e\x2e\xb2\x11\x35\x3d\x72\x72\x91\xf2\x31\x63\xf2\x85\xb2\x58\xa4\x98\xed\x68\x10\x07\x84\xde\x31\xfd\xa1\x93\x3b\xbc\x2e\x8e\xfd\x29\x08\x00\x47\x83\x95\x5d\x7c\xed\x6e\x73\x5e\x00\x99\x52\xab\x7d\xe1\xe0\x2e\x39\xb1\x96\x88\xec\x62\x89\x6b\x30\x2f\xac\x8b\xa5\x8a\x32\x24\x9f\xb2\xe1\xfa\x42\xd1\x5c\xd8\x54\x38\x63\xb9\xf0\xa9\xfa\xfa\x15\xed\xe2\xeb\xc2\xf0\x2d\x73\x08\x68\xe0\x67\x38\x62\x7b\xbe\x4a\x41\x0e\xe6\xef\x26\x24\xe9\x1e\x36\x1f\xf0\x13\xc6\x0d\x25\xca\x84\x34\xbf\x8b\xde\xeb\x96\xc5\xa5\x0c\x6d\x08\xec\xea\x3c\x7e\x86\x78\xd3\x70\xa7\x34\x83\x92\x3a\x53\xa2\x81\x9d\x17\x0b\x47\x42\x06\xf6\x57\x83\x61\x59\x7c\x15\xb3\x91\x2f\x42\x1d\xe4\x24\xe6\x2e\x1d\xa6\xc7\x39\x8f\x51\x78\x8e\x03\xf8\xb1\xca\x92\x28\xfc\xbc\x8e\xd1\xa9\xde\xd8\x9f\x4c\x11\xbe\x86\x48\x92\xfd\x50\xef\x1c\xbd\x57\x25\x65\xcc\xdb\x06\x7a\x9f\x3a\xb0\x05\x49\x51\x10\xff\x97\x23\x50\x3a\xd4\x27\x22\x69\x84\x61\xab\x45\x7e\x86\x7c\x94\x85\x13\x8b\xc4\x6d\x0b\xc9\x2e\x77\xd7\x9d\x14\x42\x1e\x1c\x52\x14\x6d\x10\xf4\xd8\x2c\x9c\x86\x3c\x2a\x36\xf9\x4f\xa5\xd1\x42\xcb\xa8\x12\x52\x8c\x5f\xa2\xf5\x6a\x55\x44\xcb\x76\x4a\xf1\x14\x8e\xda\xe3\x25\x14\x8a\x70\xdb\x5f\x37\xf2\xa6\xdf\xbc\xe1\x6d\x58\xca\x8b\x46\x4b\x08\xfe\xce\x6d\x49\x1e\x53\xba\xb8\xee\x35\xa6\xee\x28\xf7\x65\xbb\xbf\x81\xcc\xc1\x2e\x93\x31\xd8\xa4\x42\xb1\xd9\x2e\x6d\xa8\x68\xda\x72\xac\xf8\x61\xe4\xf7\xf5\x93\x87\x74\x00\x28\xcb\x4e\x69\x0c\x0e\x22\x04\x2a\x82\x61\x98\xdd\x57\x14\xcc\x17\xa7\x58\x5d\x0e\x26\x45\x3e\x97\x0d\xdd\x6b\x61\x4d\xa6\x1c\x65\x8b\x8b\xe4\x64\x32\x76\x86\x61\x11\xd5\x4e\x05\x0c\x1e\x67\x7e\x03\x96\x0e\xfd\x03\xd2\x6f\x36\x08\xe9\xa7\x0a\x5f\xb0\x10\xbc\x22\x4a\x6d\xa0\x7d\x3f\x1b\xad\x0c\x70\x38\xce\x6b\xae\xa2\x05\x22\x12\xd9\xcf\xbf\xa5\x76\x1e\x87\x39\x92\x71\xfc\xbd\xab\xdd\x27\x3b\xee\xca\xb4\x60\x9c\x77\x55\x5a\x98\x77\xce\x95\xc1\xc2\x49\x8d\xe2\x2a\x47\x3f\x37\x4f\xce\x2b\x26\x8d\x30\xf3\xfb\x9a\xd3\xa4\x8e\xd4\x5b\x7c\x0a\x24\xb1\x61\x18\x8e\xc7\x3c\xec\x2c\x73\x93\x80\xf3\xd6\x7c\xa1\x84\x1f\xe6\x22\xdb\xa1\x57\x06\xe5\x74\xf1\x29\x35\xcb\x0c\x52\x29\x42\x79\x28\xe3\xb3\x12\x47\x30\xe6\x0a\x52\x77\x9f\xb4\x68\x09\x99\x4c\x22\xfb\x11\x4b\x66\x0f\xe6\x81\x8a\x7c\x4d\xd4\x1b\xf2\xc9\xf9\x95\x3b\xca\xfc\xf9\x15\xda\x20\xff\x3a\x12\xa8\x4d\xce\xbf\x90\x6d\xe6\xba\xe9\x07\xb8\xb3\xde\xd7\xc3\xaf\x8b\x62\x7e\xfa\x19\xc9\x9c\xa3\xe0\x9e\xa0\xc4\xdd\x1d\x6d\xb5\x52\xbb\x7e\x55\xeb\xbc\x42\x2f\x49\x17\xbe\xc0\x9e\xbe\xb3\xb3\xb3\x53\x45\x4b\xf4\xc5\xcf\x3f\xa3\xda\x75\xbd\x06\xdb\x3d\x41\xc0\xb1\xdd\xd3\x2e\x56\x6a\xd7\xad\x4e\xbb\x46\x81\x5d\xe9\xc0\xae\xca\x02\x83\xe1\xc5\xe9\x0c\x3c\x7d\x2a\x80\xc6\x9b\x37\xb4\x26\x5a\x42\x30\xd2\x85\xf5\x59\xdd\xd5\x0d\xa8\xc3\xfe\x8a\xcb\x2e\x6d\xa0\xda\x4a\xdb\x59\x06\xc6\x94\x15\x7d\x49\xed\x6d\x38\xb5\x55\xd1\xcf\x68\xa5\x8d\xfe\x03\xd5\x51\x17\x2d\xd7\xcb\x88\x28\x06\xe7\x50\xc5\x0d\x0f\x25\x03\x7f\x30\xc2\x2c\xbb\xce\x7c\x81\x83\xd4\x3c\x27\xf4\x98\x54\x2a\xb4\x2a\x39\x2a\x29\x48\x92\xdd\x44\x1a\x0c\xfb\x15\x13\xad\xba\x81\xce\x93\x0a\x2d\x0f\x04\xb9\xd6\x5f\xb3\xf4\xe9\x2a\xcf\xe1\x53\x11\xe5\x73\xf8\xe8\x2b\xaa\x95\x0c\x6b\x1e\xe1\x2b\xc9\xd9\x09\x6e\x1d\x99\x02\x24\xe2\xe9\x7b\x9e\x69\x23\x69\x77\x3e\x65\x47\xfb\x79\x86\x34\x38\x1a\x80\x21\x0d\xfd\xaf\xdd\x90\x66\x17\x5f\x9b\x9a\x00\x1b\x38\x52\x70\x83\x02\x5d\xa1\xbf\xcb\xc5\xdf\xd4\xd5\x17\x23\x7c\x5d\x5a\x85\x51\xe2\xe4\xb9\x60\x54\xcd\x52\xad\x3f\x14\x23\x1f\xe1\x6b\x33\x84\x26\x1b\x3f\xe9\x68\x3f\x3f\x91\x90\x35\x70\xe6\x5d\x8f\xa9\x57\xa5\x4f\x9e\xe9\xa2\xc7\x48\x3a\xeb\x26\xa0\x11\xbe\xee\x8d\xfc\xa4\x74\x9e\xad\x74\xee\x81\x0e\x72\xa4\x85\xf4\x20\x77\x75\xcf\x43\x1c\xc7\x8e\xad\x71\x00\x4b\x80\xb4\xaa\xb9\xda\xa7\xde\xa9\xda\xf8\x9d\xad\x2a\x69\xa7\x36\x2c\xae\xeb\x60\x10\x02\xdc\x1f\x71\x18\x55\x5e\xbc\xb8\x43\xc4\x4d\x89\xc2\xe9\x7a\x5b\x44\xd3\xc3\x57\x0a\x25\xdc\xf2\x0b\xc6\x21\x3c\xfd\xf5\x52\x13\x5f\x6c\xd4\x66\x5b\xac\xc7\xf2\x91\x32\x69\x95\xc5\x12\xa5\xd0\x3a\x1f\xf8\xd1\x85\x3e\xb2\xa3\xcc\x22\xab\xe6\x6a\x91\xd4\x74\x72\xa3\x6c\x0b\x6d\x14\xe4\xc7\xa4\xab\xa5\x09\x9a\x09\xe8\xf4\x5e\x94\xb1\xce\xae\xa4\xb3\x7e\x9a\x25\x95\xd0\x43\x8d\xaa\x07\x49\xf8\x72\x95\x05\x59\x51\xeb\x55\x9b\x03\xee\xc2\x7b\x9e\x32\x4c\xab\xa8\x51\xd6\x7d\xf6\x83\x9f\x85\x51\xbd\xdc\xa6\xc5\xca\xf2\x7d\x4b\x3c\xde\x6d\xeb\x62\xd5\xff\xba\xdd\xab\x2c\x02\x0f\xb5\xa6\xc6\xd0\x9e\x7d\x0f\xa3\xb8\xfc\x8f\xda\xc6\xe8\x70\x7c\xc7\x3b\x99\x84\x20\xdd\x91\xe8\xd4\xad\x0c\x93\x78\x42\xde\xf6\xe2\x00\xc3\x26\x55\x76\x43\x92\x01\xde\x63\x4f\x52\xe8\xf6\xee\xdb\x92\x20\xc7\x85\x16\xc3\x77\xbd\x39\xb1\x55\x44\xf7\x27\x79\xb9\x95\xdf\xa2\x44\xad\xc5\x76\x29\x51\x4d\x6c\x54\xe2\xcd\x63\xef\x55\x5a\xd3\xf3\x72\x39\x87\x92\x16\x3d\xef\xed\xca\x80\x11\xf4\x66\x56\x09\xf9\x9a\xd0\xb7\x2a\xbb\x6e\x71\xe1\xad\x4a\x43\xb8\xec\x4e\xf5\xe9\x64\x67\x79\xbd\xdc\x46\xf5\x29\x1b\xae\x8b\x6d\x8a\x3d\xdc\x6d\x93\xa2\x8d\xfe\x75\x7b\x54\xc9\xf6\x1f\x6a\x65\xcd\xb2\xe1\xba\x7d\x83\x22\xa3\xf8\x98\xdb\x53\x96\xdc\x14\x18\x18\x05\x98\x1c\xd1\x3f\x1d\xed\xf5\xb8\xa7\x53\x05\xa7\x03\x7f\x8a\x2b\x05\x1b\xa7\xc9\x96\xd1\xc0\xcf\x06\x23\x54\x31\xd3\x47\x03\x0a\xa3\x24\xbe\x02\xba\x85\x8c\x2b\x95\x17\xfb\xfe\x78\x18\x27\x13\x1c\xb0\x69\x08\xfc\xcc\x37\x53\xd0\x2d\xce\xc0\xe5\x49\xbd\x3b\xff\x66\x73\xb5\x08\x99\x7c\xd7\xcc\x1b\x28\x8c\xb2\xee\x9c\x0c\xcb\x33\x6e\x56\xc7\x65\x0c\xa0\x6c\x0d\xb3\x88\x51\x0f\xb5\x10\x50\xe8\x8a\xc3\xa9\x96\x0e\x40\x23\x52\xf0\x42\x2e\x4c\x1c\xb0\x6c\x66\x92\x17\xba\x33\x13\xaf\x64\x27\x7b\x23\xa5\x44\x9b\xcc\xd2\x0c\xf5\x31\x0a\xc9\x88\x4e\x70\x94\xd1\x3c\x6b\x3e\x5c\xaf\x27\x38\x13\x1e\x0b\xa5\x72\xfb\x6a\x79\x3a\x55\xe5\x3e\xcd\x71\x48\x5d\xab\xf2\x04\xf1\x9f\xf1\x34\x43\xb3\x68\xca\x93\x06\xaa\xd9\x41\x25\x9b\x96\x9a\x85\xfb\xbe\x65\xe3\x00\x99\x06\x37\xc5\x28\x08\x2f\x31\xd7\xe7\x92\x66\x70\x90\xdd\x95\x59\xf3\x68\x23\xfd\x82\x25\xd1\x66\x49\x4c\xb3\x18\x85\x59\xca\xbd\x62\x10\xa1\xe0\xfb\xde\x31\xf5\xad\xc8\xd3\x84\xb8\xee\x4b\xa6\x52\x59\x77\x99\x79\x1f\x02\x2b\x65\x9b\xcd\x00\x64\xe0\x64\x9e\x8a\xda\xce\xaa\x33\x25\x5a\x3e\xda\xf2\x33\x9f\x0b\xeb\xb5\xb2\x92\xe6\x66\x10\xa4\xd0\x06\xcf\x0b\xee\x18\x69\x46\x0b\xe5\x37\x45\x11\x64\xc1\xc8\x3c\xce\x8c\x5d\x10\x5d\xf3\xcc\x09\x80\xf2\x4b\xea\x53\xe2\x4b\x16\x94\xd4\x9e\x18\x38\xde\xe3\x4c\xe6\x39\x45\xa7\xf2\xc2\xe4\xf7\xa5\xea\xcd\xdf\x1b\x59\xc9\x32\xc9\xcc\x4d\xf7\xfa\x3c\x1d\x9d\x1c\x50\x54\x1a\x20\x16\x4c\x54\x05\x25\xfb\x38\x03\x19\xcd\x89\x13\xc9\x68\x4d\x62\xca\x80\xe1\xfc\x48\x69\x9b\xd0\x35\x17\xf9\x72\x53\x22\x1b\x30\x83\x68\x97\x36\xd4\x24\xe9\x65\x29\x98\xe7\x3a\x4d\x91\x7f\xe9\x87\x63\x88\xd8\x45\xf9\x02\x30\x3b\x37\xd5\x9c\x48\xce\x2a\x61\x74\x19\x7f\xc6\xa9\x9e\x64\xb8\xc2\x92\x03\x7b\xe8\x6a\x14\x0e\x46\x56\x56\xdd\xbf\x29\x60\xd5\x66\xab\x7c\xa1\xf4\xe3\x78\x8c\xfd\xe8\x16\x05\xf1\xce\x78\x96\x8e\xd0\xaf\x23\x9c\xd1\x78\x26\x3c\x17\x2d\xb8\x6b\x4d\xfd\x04\x18\x05\x7b\x95\x73\x6d\xc1\xae\xef\x10\x0e\x44\x70\x7a\x18\xf1\xfb\x6f\xf3\x02\xe0\x16\x25\x24\xd7\x9a\xe1\xa9\x72\x5d\x71\x39\x16\x04\x63\xcf\x14\xac\xc6\x5a\xa5\x45\x95\xc5\x47\x07\x7c\x41\x9d\x09\x5b\x22\x39\x71\x5b\xb4\x25\xe4\x35\x37\x4e\x83\x91\x75\xa9\x55\xc8\x47\xc9\xd0\xcc\x45\xf7\xbc\x78\x2e\x2b\x6c\x68\x29\x99\x8b\x0a\x73\xe8\x79\x6d\x7b\x44\xbf\x5e\x3c\x8b\x32\x4e\x5f\x16\x66\x42\x80\x46\x34\x91\xf0\x11\xc4\x2d\xde\x50\xf1\x5f\xd5\x9a\x7c\x6d\xf2\x22\xd7\x90\x33\x0c\x8e\xe2\x59\x14\xa0\xd9\x94\x3a\x14\x0e\xc6\xb3\x00\x6b\x74\x6f\x56\xd3\x30\xca\x8d\x5c\xe4\x0f\xe5\x63\xdb\x0a\x2c\x82\xf8\x2a\x92\xf1\x88\xa3\xf1\x0d\x1a\xce\xc4\xa2\xb4\x44\xd2\x5f\x5d\x45\x63\x9c\x52\xa7\x4a\xbb\xac\x05\x7c\x23\xc1\x13\x3f\x8c\x54\xe1\xaa\x5c\xbf\x26\xfe\x75\x45\xe9\x17\x5c\x9c\xa2\x65\x5b\x66\x76\x6f\xfe\x95\xaa\x98\x73\xaa\x79\x70\x4d\x39\x50\x32\xc7\x43\x69\xfd\x25\x92\x08\xd0\x45\x4f\x40\x1b\x4e\x72\x22\x5f\xd5\x3e\x86\x51\x45\x6e\xf2\x25\x6a\x79\x0a\x9d\xd9\xcc\x27\x79\x06\x6f\x1b\x91\x10\xba\x93\x00\x16\xbb\x6d\x51\x3e\x4f\xd5\x2c\xec\xf7\x1b\x79\x04\xc4\xdb\x25\x69\x3d\x39\x8d\x26\x08\x66\x38\x21\xa7\x49\xb1\x31\x2c\xe7\x07\x04\x70\x86\xb4\x57\x64\xdc\x45\xdd\x83\x04\x57\xb1\xe5\xaa\x77\xcd\x31\x52\x52\x60\xe5\x0c\x1f\xa6\xdc\x2c\xaa\x70\x5f\x99\x85\xe9\xc9\xb0\xe4\x11\xb5\xa0\xa1\x70\x32\xb4\xbc\x21\xcf\xf4\x7c\xaa\xe4\xb1\x45\x8b\xb0\x75\x2b\x9c\x54\xfc\x3d\xb9\xe9\xfb\x1a\xbb\x95\xce\x42\x59\xe8\xe4\x75\x4f\x2b\x37\xc7\x6e\xf8\x17\x99\xbc\x9d\x1b\x1b\x62\x8e\x89\x75\xc6\x0a\x2d\xde\x54\x1e\x26\x4e\x9a\x8e\x4c\xf4\xfc\x0c\x3e\xf2\x53\xc8\x90\xeb\x3c\x71\xcf\x4d\x45\x9e\xb3\x6b\xd9\x07\x8a\x4e\x3a\x83\x4e\xc3\xae\xe1\x14\xc5\x91\x74\x14\xae\x77\x50\xa5\x5d\x6f\x80\x25\x6b\xd5\x72\x2c\xde\xa5\x95\xf9\x31\x58\x3c\xda\xcf\xc3\x0f\x12\xf5\xb5\x28\x03\x59\x61\xc0\xd4\x22\x57\x33\x3a\x08\x0b\xe4\x24\xbf\x6b\x74\x3b\xd2\x10\xa2\x21\x92\xe7\x05\xb9\x2b\x6d\x43\x22\xe6\x40\x09\xdd\x76\xbc\xbb\xd9\x68\x77\xec\x4e\x62\x45\xa9\xae\xef\x1c\x61\x8d\xc7\x56\x2b\x1f\x66\xed\x18\x8b\xf0\x1e\x6e\x0d\x81\xa9\x86\x98\x63\x89\x9d\x6b\x52\xf8\xc2\x79\x78\x95\x09\xa3\x97\x87\x50\x91\x00\xc2\xb2\x8a\x47\x2d\xe1\x58\x49\x00\x5a\x61\x5e\xa6\xd4\xa0\xef\xcd\x6c\x38\x2c\x1b\x33\xdf\x90\x8f\x16\x1b\xeb\x4f\xd3\x00\x58\x86\x3c\xd8\x34\x2d\x7f\xf9\x8c\x7d\xce\x08\xc2\x14\xb8\x1e\x47\xb8\xb4\x0b\x11\x65\x45\xcc\x7f\x68\xee\xf2\x5e\x60\xce\x67\x80\x57\xe5\x05\x43\xca\xa6\x4b\x51\x4b\xce\x57\x9d\xd0\x82\x32\xa1\x28\x63\xe0\x58\x8f\x0e\x8d\x04\x53\xd8\xa8\x10\x2c\xe4\xc1\xc6\x97\x08\xe9\x04\x5f\x1b\x28\xe9\x1c\x6b\x8a\xbf\x0f\xe6\x3b\xb1\xc3\x92\xdc\xa4\x02\x17\x27\x83\x44\x1f\x63\x40\xd9\xcf\x68\xbe\x78\x56\x33\x8f\x19\x8a\xc2\x14\xe1\xe1\x10\x0f\xb2\xf0\x12\x8f\x6f\x90\x8f\x02\x9c\x66\xc9\x0c\x9e\x3d\x90\xd3\x97\xe3\x68\x80\x4b\x45\x19\x2d\x49\xa1\x4a\xa2\x07\x40\x29\x0f\xc8\x0d\x25\x16\xd7\x5c\x90\x41\x78\xa0\x9d\x01\x6d\x70\x72\x14\xc9\x84\x1c\x6a\x09\x47\xe9\x22\x42\x2f\xa8\x36\x9f\xea\x79\xd1\x85\xe8\x7e\xc7\x32\xbe\xe6\x81\xa8\x18\x0c\x9a\xb7\x56\xe6\x09\xf0\x0b\x70\x56\x69\x84\x38\x93\xdd\x91\xe6\xc1\xba\x78\x48\x79\xd7\xe2\x91\x92\xdf\xb5\xeb\x8d\xd5\x66\xa3\x9c\x98\x9f\x32\x8d\x8f\x12\xff\xde\x67\x93\xf6\x42\x04\x4e\x0a\xa3\x0c\x27\x43\xc9\x5a\x18\x39\x57\x05\xe7\xaf\xac\xeb\x9c\x6a\xe9\x76\xcb\xe2\x23\xfa\x68\x84\xc7\x53\x9c\x10\xf1\xa7\xc4\x22\xd8\x61\xb8\x31\xdf\x60\x1d\xe5\x6f\x70\x8f\x47\x65\x26\xdd\xa9\x82\x76\x75\xe5\x9c\xf6\x6a\x17\xba\x54\xb1\x09\x5b\x6e\xfd\x9c\x5c\x55\x31\x1e\x04\xd0\xae\xfb\x3d\x63\x5d\xd8\x03\xe0\x22\xf5\xbc\xc8\x56\x22\x1c\x16\xd5\x2c\x62\x79\x86\x4b\x95\xc2\x17\x3f\x36\x5a\xe9\x89\xb0\xe4\xdd\xfd\xcd\xde\xc3\xd3\x13\x11\xa1\x79\x50\x0a\xd2\x02\xa3\xab\xbf\x05\x4d\xed\x4e\xfc\x41\x29\xba\x9a\xf8\x83\xfb\xd0\x96\xa8\x7e\x2f\xfa\xfa\x8c\xed\x2a\x24\x89\xbe\x7a\xe7\x80\x16\x99\x07\x4a\x64\xb4\x11\x5a\x77\x31\x62\x2b\x3c\xfe\x0a\x4d\xd2\x1c\x1f\x06\x82\x0d\x38\x31\xb0\x1f\xb9\x17\x03\xcf\xd4\x02\x21\x7d\xf7\xfd\x6c\x44\xc3\xfa\x3e\xe3\xef\xd9\x30\xbf\xce\x23\xfd\xde\x9e\x79\xed\xd6\xf7\x1a\xde\x97\x21\x53\xe1\xe1\x88\xab\x0f\x1e\xef\x97\x43\x5e\x34\xee\xaf\xc0\x50\x8e\xff\xeb\x0a\xfa\x2b\xbe\x43\xf0\x5f\x5b\x00\x5d\xf3\x8a\x82\x47\x8d\xcd\xa7\x4c\x22\x00\x29\x1a\xac\xf4\xbe\x20\x3c\x8d\x52\x5b\x72\x81\x71\x85\x91\xed\xb4\xca\x99\x68\xb1\xb2\xdc\x48\x4b\x3c\xde\xcd\x4c\x8b\x55\xff\xeb\xec\xb4\xca\x22\xf0\x50\x9c\xb2\x0f\xed\xd9\x4d\xb5\x28\x2e\xff\x00\x5b\x62\xa3\xfc\xc4\x9f\x0a\xe1\x70\xe2\x4f\x17\x8f\xbd\x60\x71\x11\x37\x41\xb8\xac\x32\xe9\x98\xdf\xd5\x60\x19\x2d\x6d\xa0\xa6\xdb\x66\xf9\x26\xc3\x75\x8b\xd1\x32\xfd\x73\x99\x2e\xd3\x3f\xa7\x01\x33\x07\xdc\xc8\x01\x57\x42\xb4\x84\xea\x55\x8b\x4d\x34\xff\x52\xc6\x32\x9a\x03\x6e\x6a\x80\x1b\x4e\xc0\x0d\x2b\x60\x3b\xe4\x2c\x09\xa7\x63\xb8\x7a\xa9\xd0\x61\x79\xf3\x06\xfc\x26\xbe\xd2\xe7\x06\x79\x5e\x27\x8f\x80\x82\x0d\x8a\x98\x8a\x3f\xe8\x54\x54\xfe\x40\x6f\x48\xeb\x3f\xfd\x84\x00\x9b\x3f\xd0\x4b\x54\x5b\x59\x6b\x4b\x33\x54\x7d\x8d\xfe\x28\x08\x77\x21\xcd\x3d\xb5\x05\x9f\xf8\x53\xb0\x99\xdd\xcc\x2a\x15\x8e\x30\x74\xba\x83\x5e\xa2\x4a\x13\x2d\xa3\x3f\xaa\xac\xa7\xcd\xa1\xd5\xdb\xc9\x88\xcf\x60\x2a\x2e\x82\x80\xa7\xfb\x36\xa9\x91\x7d\x20\x28\xa1\x0d\x24\xa1\xd3\x31\x9c\x49\x20\xb6\x5e\x5e\xdc\x6e\x1c\x3c\x0a\xc7\x18\x55\xe4\x7e\xb2\x70\x01\xae\x58\x23\xd6\x61\x91\x9b\x59\xbc\xcf\x8c\xb3\xca\x50\xef\x61\x27\xaf\xf0\xe4\xbb\xdb\x59\x0a\x56\xbb\x10\xa3\xff\xae\x4d\x2d\xd9\x0e\x41\xed\x7a\xe4\xad\xa4\xbc\xb9\xa5\xa8\xb5\xe0\xe6\x20\xea\x09\x43\x79\xf1\x46\x18\xca\xcf\xe7\xfb\x46\x89\x04\x5f\xe2\x24\xc5\xfb\x52\xc1\xfc\x95\x2d\xae\xd9\x0f\xf9\x67\x27\x75\x17\x02\xb5\x6d\x01\xfc\x4f\xe7\x3f\x84\xfd\x90\x15\xca\x3a\x58\xc8\x69\xd4\x86\x4f\xf9\xc2\x66\xb6\xf9\x7f\x54\xcf\xd0\x06\xfa\xa3\x5c\xac\x4e\x0b\x4b\xd9\xbb\x88\xe2\x04\x7f\x33\xae\x22\x81\xdc\x8b\x02\xf0\x73\xce\xa7\x3b\x24\x6f\x0e\x86\xf3\x78\x86\xd4\x0e\x85\xf1\xc3\xc6\x06\x5a\xae\xcf\xe1\x49\x32\x85\xc9\xb5\xef\xc4\x88\xad\x22\x41\x22\xd2\x5e\xa6\xf8\x43\x1c\x4f\xf3\x25\xe1\xe9\x38\x78\xd2\x8c\x2a\x22\x87\x76\xe3\xe9\x4f\xbb\xe8\xc5\xe6\xdb\xde\xd6\xf6\xce\xbb\xdd\xbd\xff\x7a\xff\x61\xff\xe3\xc1\xe1\xff\x3e\x3a\x3e\xf9\xf4\xcb\xaf\xbf\xfd\xfb\xff\xf8\xfd\x41\x80\x87\x17\xa3\xf0\x8f\xcf\xe3\x49\x14\x4f\xff\x3b\x49\xb3\xd9\xe5\xd5\xf5\xcd\x97\x5a\xbd\xd1\x6c\xb5\x3b\x6b\xeb\xaf\x96\x56\x37\x58\x84\x5b\x71\xb4\x13\x8b\x76\x61\x54\xf3\x21\x76\x78\xa5\xe4\x96\x1b\x8a\x85\xa9\x4d\x14\xd2\xda\xb1\xb9\xa9\x90\x99\x0e\x1d\xfb\x0d\x73\xec\x4a\x89\x90\x24\x2d\x8f\x9c\x9a\x64\x07\x16\xb4\x8c\xea\xd5\x33\xf0\x5e\xc9\x05\xa6\x86\x49\x5c\x1c\x68\xa3\x0c\xd0\xea\x19\xdf\xe0\x65\x31\xcc\x02\x95\x0a\x44\x91\x12\xb9\xe7\x2b\x11\x66\x00\xfd\xaf\xb4\x45\xd9\xb7\x26\x2a\x0e\xde\x83\xd8\x10\x2f\x2d\x29\x1f\x04\xd9\x8a\x1f\x8c\x22\x8d\xd8\x92\xd6\xb0\x08\xb7\x79\xee\x1e\xfd\x90\x2f\xed\x11\xaf\x9d\x99\x7d\xda\x4f\x47\xff\xa7\xa3\xbf\x38\xfa\x7f\x3a\xd9\x59\xae\x77\xd0\xdb\xed\xd2\x0e\x5a\xf5\xce\xdb\x6d\xd9\x47\xab\xde\x51\x9f\xe0\xeb\xdd\x9d\xb6\x28\x32\x7f\xad\xe3\x56\x49\x1c\x1e\xd0\x79\xab\xde\x71\x7a\x6f\xd5\x3b\xff\x00\x8d\x40\xf9\xc3\x3a\x0c\xc6\x7d\xce\xea\x76\x7f\x7f\xb0\x8c\x8a\x03\x7c\x18\x87\x51\xe6\x72\x32\xae\x77\x1c\x4e\xc6\xd6\xc3\x74\x8e\xa9\xdb\xcb\x58\x34\x59\xd6\xd5\x58\x02\x7a\x8f\x13\x94\x4e\xc4\xf7\x72\x56\x03\xda\x5c\x74\x6d\x7c\xd7\xc7\x28\xba\xaa\x84\xcb\x1a\x5f\x7c\x0b\xf9\xac\x41\xa5\xc5\x7c\x8d\x79\x2d\x21\xdf\xf2\x17\x8f\xed\x69\xac\x36\x5c\xce\xd1\xb8\x0e\xb2\x8f\xc0\x50\x75\x33\x26\x22\x50\xbe\x58\x1a\x64\xb1\x68\x41\xd8\xdc\x14\xee\x92\x72\xb4\xd1\x79\x59\x3e\x14\x06\x23\xcb\x0f\x25\xf6\x30\x69\x9f\xfa\x70\xef\x7d\xea\xc3\x77\xb0\x4f\x95\xc1\xe1\xa1\xf7\x29\xeb\x72\xfa\xb0\xfd\xb4\x4d\x89\xbf\x07\xdb\xa6\xd2\x2b\x7f\xba\x1d\x05\xa1\x1f\x55\x16\xdd\xb1\x6c\x47\xf2\xef\x7f\xcb\xfa\xf0\x38\x5b\x56\x99\x65\xf2\xfd\x6f\x59\x1f\xb6\xb5\x4d\xeb\x69\xc7\x32\x76\x2c\x69\xc5\x2c\xb4\x79\x7d\xd3\xdd\x4b\xcc\x8b\x84\x2d\x01\xa4\xf4\x91\x47\xc3\x87\x2f\xec\xee\x84\x2e\xee\x5a\x8d\xfc\x3f\x5c\xac\xd0\x8f\xa4\xfb\xec\x2b\xfd\x96\x2f\xff\x79\xea\x02\x20\x2c\xb7\xb6\xa0\x73\x2f\x6d\x01\xcb\x51\xfb\x2d\x95\x06\x1e\x92\x5e\xa5\x23\xbf\xae\xbd\x1a\x4d\xfc\xc1\x23\xaa\x16\x3c\xc4\x9b\x85\x5f\xd0\xda\x3f\x41\xdd\x60\xe4\x8b\xbd\x83\x2a\x42\x31\x62\x91\xbe\xec\x6f\xb5\xa1\x26\x98\xdc\xec\x6f\xb5\x6d\x32\x1e\x98\x38\x7f\xc6\x37\x34\x0b\x36\xb5\x83\x15\x7d\x05\xe7\x5f\x3f\xca\x78\x12\xef\x38\x99\x50\x1b\xed\xed\x5f\x0e\xcf\x61\xd3\x3d\x89\xdf\xe3\x5c\x18\x44\x57\x57\x57\x2b\xf1\x14\x47\x69\x3a\x5e\x89\x93\x8b\xd5\x20\x1e\xa4\xab\x90\x84\x3b\x5e\xd5\xea\x8c\xb2\xc9\xd8\xa2\x08\xd9\xbe\x9c\xbe\xdf\xda\xc9\xd1\x16\xcf\x25\x83\x21\xcc\xf7\x01\xd1\xf6\x38\xc3\xfb\x85\xa5\x3c\x87\x3d\x8a\x0c\x4c\x4a\x1e\xc2\x88\xbb\xbd\x48\xe1\x9e\x73\x57\x97\x16\xaa\xd4\x1b\xeb\x8a\xa7\x8b\x01\xdf\x61\xa4\x26\x87\xc5\xd0\x13\xa4\xec\x6f\xb5\xe7\x61\x1b\x66\xcc\x16\x59\x0f\x52\x2d\x7d\xc8\x62\x34\xa5\x56\xa7\xb2\x77\x8e\x63\x87\x33\xfc\x62\xb4\xdd\x81\x0d\x4f\x17\xd5\x1b\xeb\x60\x42\xaa\x7c\xa5\x9d\x03\xcc\xb5\x2f\x39\x3e\x4a\xdb\xb7\x77\x76\xbb\x71\x10\xed\x63\xfb\xe1\x60\xa9\xd1\x07\x30\xb3\xfe\x1c\x0c\x0d\xef\x1b\x4a\xf3\x73\x52\x34\xcd\xaf\xf8\x67\x3e\x57\xeb\x5a\x3e\xbf\xbb\x82\xf1\xd4\x69\xac\xd5\x6a\x3a\xe0\x05\xbd\x83\xe6\xfa\xfd\x94\x93\x77\xb7\x20\x85\x3f\xa1\x11\x42\x15\x90\x08\xdb\x87\x0c\xac\x64\xd1\xde\xc5\x4a\x9f\xd7\xa5\xb1\x00\x6c\x80\x0a\x2a\xa7\xfe\x38\x43\x9b\xf0\x9f\xc5\xc5\x62\xa0\x2e\x4a\xde\x0f\x41\x5e\x98\x6c\x1e\x9f\x83\xe1\x0a\x75\x8b\xc0\x15\xde\x19\x0f\xf0\x2b\xc8\x5b\x03\xc5\x95\xfc\x8e\x6a\xcd\x85\x04\x5e\x75\x8a\x2d\xe2\x2d\x59\xe9\x8c\x7b\x98\xb5\x85\x97\x1a\x21\x0f\x66\xa2\x9c\xaf\x0e\x2b\x2c\x97\x5b\x18\x84\x16\xa0\x43\xfc\x1e\xc6\xc6\x96\x12\x6d\x91\x33\x72\x0e\x4c\xf8\x04\x8b\x37\xce\xe3\x32\xdf\x63\x68\x8f\xd8\x93\xa5\x9c\xc4\xc4\x69\xd1\xfc\x85\x05\xcb\x77\x6c\x63\x22\xe0\xd5\x8f\xcc\x98\x45\xc3\x95\x1b\xb4\xbc\xe1\xf8\x58\x8f\x02\x44\x8c\x03\xcf\x01\xe7\x05\xb3\xea\xb2\x44\xcb\xce\xbf\x56\x46\x72\x30\x86\xdc\x09\x84\x41\xe1\xc4\x26\x19\x05\x1b\xf4\xaa\x36\x2f\xfc\xe9\xcc\x12\x84\x26\xc4\xc0\x99\x9f\x95\x83\x52\x9d\x1e\x94\xa4\x81\x2e\x4c\xfb\xa3\x61\x2f\x90\x75\x8e\x82\x0d\x63\xcb\x50\x99\xef\x24\xb2\x62\x31\x63\xac\x6d\x68\xa3\x2c\xd5\x92\x74\x34\x9c\xfe\x2c\xd1\x2e\x44\x80\x39\x5e\xaf\xac\xcd\x75\x29\x1e\x2c\xfb\x1d\xdf\x89\xf7\x2e\xc8\x77\x1f\xd0\xfb\xd6\xe2\x57\x26\xf5\xa6\x3c\x37\x97\x2a\x29\xda\x0d\xe9\xbd\xca\xdd\xf3\x0f\x48\xe1\xea\x62\xd3\xa6\xfb\xb5\x8b\xb3\x2f\x56\xcd\x43\x0e\xb1\xe1\x3e\x60\x0a\xc5\x06\xa1\x42\xce\x65\x7d\xd7\x9e\x63\xba\xb0\xb0\x61\x57\x25\x16\x70\x5c\x29\xde\xef\x6e\x5f\x17\x1c\xdf\x29\x34\xfb\xd9\xdd\xe3\x87\xcf\x6e\x7b\xdd\xe3\x47\xd2\xee\xda\x1a\x39\xd3\xaf\xfd\xad\xcf\xf4\x83\x70\x3a\xc2\xc9\xf2\x23\x9b\x08\xc0\xe9\x5d\x6e\xea\xaf\x39\xc4\x9b\x99\x3b\x1f\xe4\x34\xdf\x83\x8e\x1d\x12\x8e\x93\x8a\x43\xbb\xfc\xd2\x6d\x42\x20\xde\x6b\x99\x30\x94\x1a\xe4\x0c\xe7\x67\x50\x89\xfe\xe4\x8c\x98\x55\xdc\x81\x97\x19\x8b\xaa\x40\x8b\x2c\x90\x4e\x83\x9c\x6e\xe8\xdc\x64\xf8\x3a\x23\xa7\x48\x9f\x3d\xa3\x29\xed\x13\xf3\xcd\xe2\xa9\x36\xfc\x00\x0f\xc2\x89\x3f\x1e\xdf\xb0\x34\xa0\x41\xe9\x9b\x1b\x79\x54\x6e\x59\x2b\x6c\xe0\x4e\x04\x1a\x6a\xb3\x8b\x27\xe3\xb8\x0b\x7e\x8f\x9a\x9e\x23\x9f\x12\xe9\x56\x47\xee\xfc\x62\x17\x3b\x4a\x4d\x87\xa3\x96\x5c\x66\x25\x9f\xdd\x3c\x81\xc4\x2e\xbe\xbe\x63\x26\x08\xcb\xf0\x4a\xe4\x23\xdf\x37\x2c\x38\x9d\xda\xcd\x43\x18\x4d\x67\xd9\x7d\xe6\x94\x93\x87\x4a\x74\x77\xa0\xb3\x87\x22\x8e\x81\xc6\x28\x2c\xf4\x71\xe7\xa4\x12\x30\x5a\xf6\x10\x36\xf9\xe4\x6c\xa0\xbc\x0d\x5a\xe1\xb5\x95\x7a\x7a\x0a\xf5\x70\x8d\x40\x0e\xa8\x2b\x03\xbd\xb5\xeb\xe6\xdd\x3b\x6d\xde\x5d\x6d\xb7\x95\x36\x88\x6e\xbb\xe1\x69\xca\xf3\xf5\x27\x53\xbb\x7f\xba\xee\xdb\xb5\x3b\x1a\x91\xcc\x8b\x34\xe1\xe6\x21\x05\x1c\x80\x85\xc6\xd5\x9a\x88\x8a\x94\xd8\x90\x1d\x55\x1f\x26\x21\x3d\xb8\xbc\xce\xe5\x78\xa5\x95\xc4\x25\x55\x51\x44\x56\x07\xe7\x65\x3c\x48\x70\xf6\x40\x4a\x25\x22\xff\xee\xda\x03\x07\x41\x2f\x19\x9b\xb0\x79\x22\x53\x47\xdf\xb2\x1a\x43\xd9\x39\xd8\x11\x20\xd8\xaa\x33\x12\xfa\x22\xea\xa3\x20\x1e\x75\x0f\xf7\x02\x6f\xb7\x87\x8c\x2f\x0b\x07\xa6\x39\xe1\x65\xe9\xa1\x4a\x8a\x2e\xab\x8f\x93\xdd\x10\xbf\x40\x31\x45\x3b\xfa\x56\x8a\x8b\xc9\xba\x5e\x14\x19\x53\xab\xc4\xf5\x05\x3a\x2c\x7b\x94\xcc\xcd\xf1\x38\xbe\x42\x7e\xd2\x0f\xb3\xc4\x4f\x6e\x10\x53\x2f\x7d\xc6\x37\x96\xb8\x83\x9f\x65\x8d\xc4\xcf\xd6\x86\x0b\x06\x4a\x57\xb7\x94\x1b\xad\x39\xce\x90\x04\xa5\x02\x37\x48\x88\xff\x06\xba\x8d\x38\x41\x61\x14\xe1\x04\xa2\xcf\xc6\xb3\x0c\x04\x08\x3d\x0a\x1f\xc4\x4c\xa4\x3a\x46\x4a\x86\xec\x81\xb6\x62\x04\xa4\xe3\x1a\x3f\xb9\x46\x68\xa9\xb1\x08\x09\xc4\x92\x56\x32\x2e\xd2\x47\x86\x52\xc1\x50\x2a\x68\x34\xf6\xdb\xc1\x11\xcc\x27\xbd\x06\x9c\xfa\x01\x1a\xc4\x51\x9a\xf9\x91\xde\xbc\x35\x89\x94\x3a\xc7\x6e\xc5\x9a\xc0\xfb\x34\x3c\x43\xbf\x6f\xa0\xda\x75\x7b\x40\xff\x67\x73\x87\x31\x0a\x37\x3b\xf4\x7f\xc5\x9a\xb1\x58\xd3\x89\x85\xda\xb3\x8d\x22\xff\x82\x38\x64\xb0\x03\x3d\x46\x14\x32\xc1\xc4\x1f\x24\x12\x59\x41\xbe\x32\x1b\x33\xb6\x0c\x24\x74\xda\xc6\xc7\x1d\x7a\x52\x55\x5f\x9c\x2f\x98\xbb\x45\x20\x83\x61\xfe\x6e\xe2\x8f\xed\x6f\xf6\x58\xf4\x31\xc0\x2b\x84\x25\x56\x18\x09\x65\xc1\x29\x2f\x13\x88\xcc\x28\xfd\xf0\xc1\xc8\x64\x92\xe0\xad\xcc\x0d\x3e\xf6\x58\xd1\xc3\x60\xa8\xff\xa7\x47\x0f\x9b\x23\xa6\x2e\x22\x22\x12\x1e\x9a\xd3\xd0\xdc\x08\x62\xee\x1a\x73\xa3\x88\xb9\xab\x3e\x52\x24\xb1\xfb\x73\xbb\x1e\x55\x4f\xc3\x78\x5b\xf6\x63\x22\x5d\xec\xda\x83\xa3\x15\x06\x1c\x2b\xe4\x98\xf2\x58\x69\x40\x73\x09\x85\x4b\x1a\xfc\x92\x49\xa0\x52\x75\x86\x1c\x9b\xf8\x03\xfb\x25\x91\x38\xf8\x3b\x8c\xe0\x5e\xfd\xad\x15\xe6\xd7\x9d\xd6\xb2\xe5\xf5\x38\xec\x2f\x13\x54\x02\xb0\x6d\x4d\xb5\xaf\x38\x1a\x2c\x83\x4d\xa3\xe5\x3d\x75\xb3\xd4\x3e\x4c\x82\xf6\x7c\xe3\xbb\x74\xe4\x37\xda\x3a\x48\xf2\xb2\xa1\x83\x4b\x47\x7e\xbb\xde\x30\x5f\x36\xd7\x2d\x25\x9b\xda\xab\x24\x9c\xe2\x49\x50\xef\xd4\xac\xb6\x7f\xca\xab\x69\xff\x73\x30\xd4\xdb\xc1\x97\xd3\xcf\xc1\xb0\xe8\xde\x41\xed\x7a\x1c\xe0\xe5\xc1\xb0\x6f\x7d\x9d\x25\x8e\xd7\xcb\x17\x63\x3f\x98\xf8\x91\xed\x73\x6c\x07\x86\x07\xfa\xeb\xa9\x1f\x2c\xfb\x51\x1a\x5e\xbf\x6a\xe8\x83\x40\x3e\x85\x69\x5c\xaf\xd5\x1b\xfa\x88\xb3\x4f\xaf\xd6\x5e\xad\xe9\x33\x44\x3e\x7d\xc1\x49\xcc\x5c\xaf\x2d\x5f\x23\xc7\x37\xaa\x23\x5b\x1e\xe1\x6b\xed\x83\x8f\x75\xe2\xa2\x71\x37\x02\xe3\x7d\x32\xd0\x27\x37\xf1\xfb\xfd\x30\xb3\xbe\x5c\x1e\xe3\x0b\x7f\x70\xf3\xd8\x77\x40\x62\xf5\xc0\x93\xbe\x68\xe0\x65\xbe\x56\xc4\x23\x5b\x22\xf0\x4c\x56\x86\x66\x16\xca\xd6\x81\xf8\xdd\x68\x89\xdf\x84\xea\xf9\x6f\x42\xec\xe2\x37\xfd\x95\x93\x76\x6e\x5f\x0a\xbf\x18\x21\x53\x0c\x28\xfd\x1a\x77\x58\x14\x1d\x4e\xad\xd2\x53\x96\xa8\x4f\x82\x36\xf3\xb7\xb1\x52\x83\x50\x22\x6d\x56\x26\x40\xf1\x46\xd0\x9d\xfc\x86\x92\x9b\x78\x23\x53\x99\x78\x19\xa9\xaf\x24\x9a\x82\x67\x42\x4a\xf0\x23\xa7\x20\x3a\x2a\x03\x36\x50\x8c\x5e\xa4\xdf\x9c\x4c\x16\x55\x44\x2a\x0a\x48\x99\xd7\x2e\xae\x98\x74\x87\x62\x63\x5d\xea\xb6\xeb\x5e\xb1\x36\xd9\x53\xe9\xaa\xdb\x6e\x79\x0a\xe1\x75\xdb\x6d\x2f\x9f\xf8\x6e\xbb\xe3\xa9\xa3\xd7\x6d\xaf\xe9\x37\xc2\x3a\x29\x77\x3b\x35\x8f\x51\x6b\xb7\x03\xf8\x08\x4a\xe9\x76\x1a\x9e\x4c\x2b\xdd\x4e\xcb\xb3\x51\x4b\xb7\xd3\xf4\x64\x0a\xe9\x76\xda\x9e\x4c\x3f\xdd\x0e\xe0\xa5\xd0\x4c\xb7\xb3\xe6\xe9\x54\xd3\xed\xac\x7b\x3a\xdd\x74\x3b\xaf\x3c\x83\x48\xba\x6b\x35\xcf\x42\x4e\xdd\x35\xc0\x9f\x2d\x89\xee\x1a\x60\xcf\x48\xa3\xbb\xd6\xf2\x0c\xe2\xe8\xae\x01\xe2\x84\x8c\xba\x6b\x80\x73\xbe\xce\xba\x6b\x1d\xf9\x02\xdd\xcb\x97\x6c\x77\x8d\x5f\xad\x93\xc5\xdc\x5d\x7b\xe5\xf1\xa5\xda\x5d\xaf\x79\xf9\x12\xee\xae\xd7\xbd\x7c\x71\x77\xd7\x01\x9d\x9c\x82\xbb\xeb\xd0\xb8\x60\x34\xdd\xf5\xd6\xed\x99\xd7\xa9\x3d\x5d\x1e\xfc\xf5\x97\x07\xbd\x11\x1e\x7c\x26\x9d\x82\x95\x42\xdd\x80\x68\x9a\xb3\x74\x36\x25\x03\x83\x59\x7c\x6a\xa9\xdf\x20\xc7\xd3\x90\xe6\xe8\x87\x0d\xf4\x82\x43\x7e\x61\xb1\x08\x11\x4e\x1a\x0f\x78\x5d\x51\x68\x8e\x2f\xda\x39\xc2\x43\x9c\x60\x38\xe8\x25\xe1\x05\x9c\xc9\xc2\x28\xcc\x72\x30\xe9\x6c\x8a\x13\x50\x5d\x6f\x68\xe9\x39\x24\x28\x9b\xb3\x8b\x09\x8e\x32\xad\x00\xca\x62\x34\xf2\xa3\x60\x8c\x95\x71\x93\x61\xf7\xad\x90\x15\x9b\x1a\xa8\x6a\xba\x03\x4a\xba\x6f\x1a\x4b\x9e\x9a\x40\x85\x51\xb6\x2e\x69\xe8\x87\x72\x7d\xa1\x98\x50\x67\xc7\x3c\xe6\xe7\x35\xa8\x12\xfe\x13\x81\x0a\x2f\x64\x6c\x94\x43\x84\x15\xb1\x98\xa6\xff\x02\x48\x97\x21\xbe\x72\xa1\xe8\x6c\x5e\x42\x78\x8f\xa3\x80\xbe\x7e\x55\xcb\x73\x82\x03\x2c\x41\x67\xcc\xab\xff\x40\xd6\x9c\xb0\x1d\x81\x45\x67\x07\x6e\x54\xad\x1a\xad\x38\xb1\xaa\x77\xec\x68\xb9\x5b\x5a\xac\xc6\x5e\x94\x35\x1b\x8b\x36\xb1\x58\x8d\x9d\x71\xec\xdf\xa5\x4a\xa7\x05\xef\xf3\xf2\x77\x24\xa5\x15\x4a\xc1\x1e\x92\x5f\xdd\x64\xf8\x00\x92\x03\x19\xaf\x6d\x79\x97\x15\xfa\xdb\xa5\x8b\x2e\x6f\xab\xcc\x8a\xc8\x4b\x2f\xa6\x42\xc8\xa1\xbd\x15\xb8\xa1\x0d\x3b\xce\x16\xcd\xc2\xf6\x35\xcb\xbe\x7a\x93\xd9\x8c\x9f\x17\x72\x17\xb4\xa1\xb2\x48\x3e\xed\xbc\xfe\x69\x78\x76\xa7\xe4\xd9\xb9\x39\x77\xf8\x05\x53\x55\x6d\xee\x38\xaa\x16\x15\x8c\x35\x4f\x6d\xe1\x21\xe6\x46\x68\xeb\x88\x32\xdf\xd6\xac\x67\x64\x34\xc9\x6b\x02\x0f\x45\x44\xea\x93\x99\xb9\xd9\xae\x3f\x9d\x8e\x6f\x58\xc3\x7e\x72\x31\x23\x2c\x3c\x2d\xf2\x57\x64\xfc\x7a\x65\x9a\xc4\x59\x4c\x70\x94\x39\x77\x91\xe1\x84\xbe\xfb\xd8\x15\x2c\x9d\xfa\x93\xac\xf3\xd7\xc8\x3a\x10\x30\xfa\x2f\x88\x4b\x64\xcd\xa9\x54\xc2\x44\x02\xb6\x58\x7a\x8f\x87\xb2\x5c\xb7\x4e\xaa\x9c\x30\x66\x21\x95\xa4\xaa\x4b\xed\xe6\xcf\x26\xe9\xb9\xf8\x4a\xa7\x65\xe7\x22\x27\x84\x4d\x6c\xd0\xe1\x5b\xf1\xfb\x29\xfd\x91\x86\x11\x0b\xc6\x4a\x58\x46\xed\xba\x5e\x63\x7f\x55\xf4\x55\x4d\xe3\xcb\x96\x57\xa5\x6a\xb5\x50\xdf\xdf\x6a\x6b\xd6\x14\x36\x03\x10\xdd\x6b\x12\x6d\xb0\x51\xb5\x18\x80\xf0\xb4\x37\x85\xb7\x63\xb9\x26\xd8\x9e\xab\xf8\xd4\xe4\xa4\xb5\xeb\xce\x5a\xab\xdd\x68\xd6\xea\x1e\xaa\x5d\xe3\xe1\x20\xf0\xfb\xeb\xaf\x2c\x79\x15\x6b\xd7\xaf\xd6\xfb\x7e\x30\x18\x62\x0f\x06\xa6\xd9\x68\xb7\xd6\x3a\x6a\xb9\x33\xe7\x8d\x98\x96\x46\x4f\xee\xc5\xbe\xc8\xa4\x67\xdb\xbb\xae\xfc\x29\xc2\xe0\x5e\x3d\x7f\x0f\xa9\x77\xdc\x3b\x86\xfb\xfa\x9a\xcf\x06\x45\xe2\x9c\xc0\xe3\xe9\x05\x51\xe8\x88\xc0\xbb\x7f\x2e\x95\xde\x3f\xe5\x0f\x67\x36\x97\x10\xe9\x33\x21\x38\xb3\x00\xf9\xab\x54\x2a\x12\x4c\xea\x29\x8e\xbe\x22\xf9\x25\xec\x75\xad\xaa\xe6\x23\x8e\xbe\x96\x04\xd8\x68\x55\x2d\x00\x21\x94\xb1\xe2\x92\x6e\x82\xbb\x9f\x71\xc8\xae\x72\x43\x61\xbf\xee\x57\x86\xb4\x86\xa4\x31\x45\x4b\xa8\xa6\x8b\x0f\x4a\xe9\xba\x56\xba\x5e\x58\xba\xa1\x95\x6e\x14\x96\x6e\x6a\xa5\x9b\x85\xa5\x5b\x5a\xe9\x56\x61\xe9\xb6\x56\xba\x5d\x58\xba\xa3\x95\xee\x14\x96\x5e\xd3\x4a\xaf\x15\x96\x5e\xd7\x4a\xaf\x17\x96\x7e\xa5\x95\x7e\x55\x3c\x3b\x35\x6d\x76\xe6\x4c\x66\x5d\x2b\x5e\x3c\x9b\xf5\x86\x56\xbc\x78\x3a\xeb\x4d\xad\x78\xf1\x7c\xd6\x5b\x5a\xf1\xe2\x09\xad\xb7\xb5\xe2\x6d\x83\x1b\xac\xae\x12\x86\xfc\x39\x8c\x2e\x48\xd5\xd0\x1f\xf7\x6d\x62\xb3\x4f\xb6\x81\x53\xeb\x40\xf5\xe1\x93\x75\x50\x06\xf0\xc9\x3a\x00\x01\x7c\x6a\xda\xd0\xe9\xe5\x77\xd0\xea\x37\x82\xc4\xce\x4e\xc5\xf7\x50\xdf\x43\x03\x0f\x05\x9e\xb4\x40\x3d\x84\xd6\x3c\xb2\x85\xd6\xce\x74\xde\x10\xd0\x7a\x81\x87\x44\xd5\x7c\x84\x3c\x84\xea\x0d\x0f\x9d\x9c\xd6\x8d\x7a\x03\x5a\x8f\xb6\x44\xab\xe6\x8b\x96\xd4\x5b\x23\xf5\x1a\x46\xbd\x3e\xad\x27\x90\xf4\xa5\x7a\x4d\x0f\xa1\x06\xb4\xd7\x34\xea\x15\xf5\xaf\x25\xfa\xd7\x5a\xa8\x7f\x6d\xd1\xbf\xf6\x42\xfd\xeb\x88\xfe\x75\x16\xea\xdf\x9a\xe8\xdf\xda\x42\xfd\x5b\x17\xfd\x5b\x5f\xa8\x7f\xaf\x44\xff\x5e\x2d\xd4\xbf\x7a\xcd\x63\xfd\xab\x9b\x04\x53\xd4\xc1\x7a\xdd\x63\x1d\xac\x9b\x14\x53\xd4\x43\x82\x25\xed\x61\xdd\x24\x99\x42\x12\x6d\x7a\x9c\x44\x4d\x9a\x29\xec\x63\x4b\xf4\xd1\x24\x9a\xc2\x3e\xb6\x45\x1f\x81\x6a\xcc\x4e\xbe\x7b\xe7\xe8\xa4\x87\x50\x9b\x76\xd2\xa4\x9b\x80\x56\xb4\x76\x92\xd0\xdb\x2b\x5a\xd1\x24\x9c\x01\xad\x68\xef\x64\xdd\x43\xa4\xa3\x27\xa7\x75\x93\x72\xfa\xb4\xa2\xb5\x93\x84\x63\x34\x6a\x50\xd1\x24\x9d\xa2\x3e\xb6\x45\x1f\x1b\x76\x5e\xe3\xea\x23\xa1\x39\xda\xc7\x86\x9d\xd9\x38\xfb\xd8\xe6\x7d\x6c\xd8\xb9\x8d\xab\x8f\x2d\xd1\xc7\x86\x9d\xdd\xb8\xfa\xf8\x2a\xef\xa3\x9d\xdf\x38\xfb\xd8\x12\x7d\xb4\x33\x1c\x57\x1f\x09\x63\x64\x7d\xb4\x73\x1c\x57\x1f\xd7\xf3\x3e\xda\x59\x8e\x93\x56\x9b\x1e\xef\xa3\x9d\xe7\xb8\xfa\xd8\x10\xb4\xda\xb0\x33\x1d\x57\x1f\xd7\x44\x1f\x9b\x76\xa6\xe3\xea\x23\x59\xfe\xb4\x8f\xcd\xba\x7d\x41\xee\xee\xba\x89\xb5\x05\xb8\x36\xed\x5c\x67\x77\xd7\xde\x49\x32\xac\x64\x6d\x9d\x9c\x36\xed\x5c\x67\x77\xb7\x60\x41\x76\xa0\xa2\x9d\xeb\xec\xee\x3a\x3a\xd9\xf2\x50\xa3\x09\x15\x4d\xd2\x29\xea\x63\x3d\xef\xa3\x9d\xe9\xb8\xfa\xd8\xca\xfb\x68\x67\x3a\xae\x3e\xc2\x44\xd2\x3e\xda\x99\x8e\xb3\x8f\x35\xd1\x47\x3b\xd3\x71\xf6\xb1\xe9\xb1\x3e\xb6\xec\x4c\xc7\xd5\xc7\x9a\xe8\x63\xcb\xce\x74\x5c\x7d\x6c\x8a\x3e\xb6\xec\x4c\xc7\xd5\x47\xc2\xca\x69\x1f\x5b\x76\xa6\xe3\xea\xe3\x2b\x31\x8f\x2d\x3b\xd3\x71\xf5\x91\x2c\x0f\xd6\x47\x3b\xd3\x71\xd2\x6a\x9b\xd3\x6a\xcb\xce\x74\x5c\x7d\x6c\xe4\x7d\x5c\xb3\x2f\xc8\xbd\x3d\xb7\xa0\xda\xa1\x9d\xb4\x73\x9d\xbd\x3d\x7b\x27\x81\xe6\x80\x07\xb4\xec\x5c\x67\x6f\xaf\x40\x0c\x68\x83\x08\x68\xe7\x3a\x7b\x7b\xf6\x4e\x12\xde\xd1\x80\x61\x6d\xdb\x45\x1d\x57\x1f\xc9\x7c\xd0\x3e\xb6\xed\x4c\xc7\xd5\xc7\xa6\xe8\x63\xdb\xce\x74\x9c\x7d\xac\x89\x3e\xda\x99\x8e\xab\x8f\xf5\xbc\x8f\x76\xa6\xe3\xea\xe3\xba\x98\xc7\xb6\x9d\xe9\xb8\xfa\x08\x34\x47\xfb\x68\x67\x3a\xae\x3e\x82\x48\x4e\xfb\x68\x67\x3a\xce\x3e\x36\x3d\xde\x47\x3b\xd3\x71\xf5\xb1\x25\xfa\xd8\xb1\x33\x1d\x67\x1f\xeb\xbc\x8f\x1d\x3b\xd3\x71\xf5\xb1\x21\xfa\xd8\xb1\x33\x1d\x57\x1f\x5f\x89\x79\xec\x34\xcd\x05\x09\xd7\x28\x19\x4e\x26\x38\x08\xfd\x8c\x39\x95\x81\xbb\x82\x5a\x8e\x1c\x71\xd1\x06\xaa\xc0\x7f\x97\x90\xaf\x6b\x58\x69\x99\x3a\x2b\x53\x27\x65\xfa\xf6\x32\x0d\x56\xa6\x41\xca\x0c\xec\x65\x9a\xac\x4c\x93\x94\x09\x0c\x6d\xae\xa6\xaa\xdc\xb1\x58\xea\x2e\x18\xd0\x16\x32\xa5\x8b\x6c\xba\x7e\xe6\xdb\x0e\xe6\x7e\xe6\x8b\x50\x3e\x7e\xe6\xbb\x95\x63\xd1\xdb\x30\x4b\x4f\xe2\xcc\x1f\x0b\x98\xd1\x96\x9f\xf9\xd4\x83\xe4\x25\x5a\xb7\x40\x87\x3a\x1f\xf0\x30\xe3\xd0\x85\xc7\x09\x94\x37\x3a\xe3\x4c\x79\x25\xd0\x3c\xcd\x41\xfe\xfc\xf3\xcf\xa8\x0d\x17\x6f\xb5\xeb\xf5\x5a\x7e\xdf\x96\x97\xf8\x17\x6a\x36\x0c\xe2\x50\xfb\xb2\x8b\x36\x10\xa8\xdd\x87\xe3\x38\x4e\x2a\x52\x27\x57\x15\xdd\xbb\xab\x73\x50\xf6\x03\xda\x90\x9e\xf4\x85\x23\x50\xaf\x54\x2a\x39\x6e\x4b\xa8\xd3\xa2\xf9\xd2\x5e\x41\x30\xd1\x56\x95\x2a\x6c\xec\xfa\x59\x5e\x95\xe1\x9c\x2b\x67\xe5\xb7\xe5\xb5\xb3\x26\x38\xa6\x9a\xd5\xc1\xcd\xd3\xcd\x1a\x5c\x62\x91\xce\xb6\xca\x74\xf6\x83\xb5\xb3\x1f\xee\xda\xd9\x0f\xd6\xce\x7e\x28\xdb\x59\xb3\xb7\xb2\x13\x55\x45\x74\x9f\x07\x9b\x82\x9c\x7a\x76\xff\x41\x30\x78\xa7\x6e\x0c\xe0\xa3\x68\xf3\xa4\x2a\xcc\x2b\x3f\xc7\x1b\x52\xd1\x79\x5b\xc8\x77\x97\x19\xc6\x3b\xbd\xdf\x16\xba\xf7\x70\x5c\x71\xa1\xa2\xeb\x7f\x81\x09\x5c\x61\xec\x9e\xda\xef\x2e\x76\xd9\x2d\x59\xa5\xb2\xab\x5c\x4b\xec\x2e\x7c\x1f\x41\x69\x61\x57\xb9\x8b\xd8\x75\x5e\x42\xcc\xbf\x71\x38\x62\xb9\x81\x61\x0e\x59\x04\x9e\x00\xc6\x54\x2d\x5a\x22\x59\x39\xb8\x21\x14\xb2\x7a\x50\xb0\x82\x53\xa6\xb8\xa1\x83\xc7\xfc\xfa\xdf\xd8\x78\xe1\xf3\xb9\x41\x0b\x2e\xef\x4a\x1e\x41\x83\x7c\xb5\x7b\x38\xd0\x5f\x02\x49\x4d\xf5\x75\xed\xa1\xd4\x43\xea\x15\x1a\xf0\x49\xb4\x81\x7c\xb4\x84\x2a\x95\x3e\xfa\x89\x6e\x8e\x95\xff\x4b\x7e\x06\x55\xc2\x06\xae\xd1\x12\xca\xa4\xf6\x44\xc0\xe2\x88\x4c\x53\x4a\x57\x2a\x8d\x53\xde\x6c\xa0\x65\x94\x56\xa1\x5a\x5f\x33\x7a\x13\x58\x69\xe7\xff\x72\x58\xc1\x76\x5c\x19\xa0\x9f\xd0\xff\x7d\x1c\xac\xb4\x43\xd0\x5c\xac\xfa\xe8\x77\x34\x40\xbf\x13\xc4\x1e\x1e\x19\x4d\x00\x9c\x8b\x0c\x41\xa4\xd2\x47\x5f\x1f\x78\x70\xe4\xdb\xea\x63\x57\x9a\xf4\xb9\x89\xf7\xcb\x04\x59\xe3\x7e\x62\x9a\x8b\x22\xac\x06\x13\x8c\xc3\x59\xcc\x51\xfa\xae\x61\xcd\xd8\xba\x14\x46\x2e\xfb\x5b\x6d\x8b\xef\x57\x71\x79\xd3\xe1\x2b\x8f\x2f\xa6\x5c\xe6\xab\x19\xf9\xf7\xb7\xda\x56\x93\x01\xe7\x24\xcc\xc9\x55\xff\x50\x53\x70\xa7\xd0\x0e\xf3\x27\x4e\xf6\xf2\x7b\x88\x89\xa3\x4e\x65\x62\x22\x76\x27\xfe\x80\x4c\x86\x92\x19\xde\x9c\x0f\x56\xcc\x9c\x93\x3c\x9b\x3d\x9d\x97\xc2\x0c\xec\x2c\xb2\xb5\xc3\x02\xaa\xf1\xb7\x76\x31\xfb\xe7\xc7\x64\xa3\x8b\xed\x25\x8b\x33\x84\x76\x30\x0e\xfa\xfe\xe0\x33\x8b\xab\x39\x89\x03\x58\x52\x84\x66\xc4\x7c\xc3\xcb\xde\xce\x5b\x22\x02\x59\xc4\x03\x30\x73\x82\xaf\x8a\xb5\x1c\x58\xb8\xd0\x56\xf6\x09\x00\x66\xcc\x23\x56\x7d\x6f\xe7\xed\xca\x76\x44\x63\x95\x83\x01\xd5\xce\x5b\x8b\xc1\xcf\xd4\x61\x2e\xc3\xcc\x0c\x0b\x4c\x66\xdc\xa2\x29\x0b\x41\xc5\x05\x12\xfa\x68\xbb\x67\x96\x42\x79\xd0\x42\x72\x28\x0f\xb5\x3c\x8f\x51\xfe\x1e\xdf\xa4\x59\x82\xfd\xc9\x66\x14\xb0\xde\x59\xac\x23\x63\x66\x16\x2b\xc0\x79\xac\x01\x9b\x90\x7d\x84\x27\x18\x82\x8c\x83\x31\x26\x9d\x27\x16\x2b\x13\xfc\xe7\x23\x7c\x9d\xd1\xd7\x76\xf1\x1d\x5f\xbe\x65\x31\x53\xa1\xf5\x95\x74\x1c\x0e\x70\x85\xa3\x20\x6e\xea\x05\x2e\x36\xfb\x49\x65\xd6\xb6\xf0\x3f\x65\xd6\xee\x31\xba\x60\x38\x3c\x0a\xd3\x85\xc7\xf6\x9b\xd1\xcd\x49\xde\xa1\x3e\x1e\xc4\x13\xe6\x75\x4f\x08\x22\x8c\x67\x69\x39\x92\x11\x5d\x2c\x25\x8e\x17\xf4\xa6\x32\xb7\x0b\x9a\x6f\x84\x79\x60\x83\xf3\xde\x65\x1e\xac\xe5\xf2\xb5\x6a\x34\x2e\x87\x63\xa6\xcd\xe7\x9f\x21\xb3\xeb\xa5\xf5\x48\x23\x4a\xa3\x0d\x14\x5e\xb2\x29\xac\x39\x56\x62\x7c\x89\xd1\xde\x2f\x70\xfe\x4c\x67\xfd\x14\xff\xf7\x0c\x47\x59\xc1\xe9\x19\xf0\x15\x0e\x0c\x73\x0d\xa0\x75\x7c\xb4\x09\x31\x27\x81\xfc\x31\x2a\xc7\x74\xa0\xa1\x60\x45\x00\xf1\x90\xda\x95\xd5\x55\xc4\x66\x24\x7f\x67\xcd\x96\x5b\x1c\x35\x86\x9a\x9e\xe7\x16\x82\x10\x09\x46\x34\x0a\xe7\x68\x83\x5e\x18\x16\x5c\x9c\xd8\x79\x5b\x64\x70\xcd\x37\x9d\x45\xe2\xd4\x75\x9a\x4f\xc2\xc7\xf7\x2e\x7c\xa0\xff\x9c\x26\x38\xc5\xc9\x25\xa6\x62\x48\x3c\x23\xa2\xbc\x24\x7e\x80\x1a\xc3\xcf\xc2\xfe\x98\x71\x60\xb4\x95\xa0\xb7\x49\xe8\x47\xe8\x1d\x75\xcf\x44\xc3\x70\x8c\x71\x34\x58\x19\x00\x08\x1e\xf2\x19\x22\x60\x6b\xf4\x73\x72\x04\x45\xfe\xcb\x8f\xd0\x6e\x32\xeb\xdf\xa0\x3f\x46\xe4\x3f\x2b\x57\xb8\xff\x9f\x17\x13\x3f\x1c\xaf\x0c\xe2\x89\x5d\xde\x39\x39\xe2\xcd\x15\x88\x3d\x72\xa1\xd2\xd2\xcf\xb3\x3c\xdf\x4b\x34\x20\x07\x05\x9a\x32\xe9\xf9\xb3\x67\x64\xd0\x81\xf4\x44\x3a\x24\x50\x12\x51\xa5\x50\x15\x66\x9d\xfe\xfa\x13\xad\xae\xc6\x97\x38\x19\x8e\xe3\x2b\x52\x07\x36\xbe\x3a\x4f\x07\x4a\xea\xd5\x3b\xd5\x9f\x48\xd9\xd7\xe2\x73\x43\xfe\xbc\xae\x7f\x6d\xb2\x3d\x8c\x35\x06\x78\x02\x2a\x04\xac\x68\x77\x75\x15\xf1\x66\x51\xbf\x4e\x8a\x00\xca\xd0\x74\xed\xb5\xa8\xd2\xc8\xab\x88\x32\xcf\x00\x01\x5a\x88\x96\x6a\xaa\xa5\x58\xb1\x67\x80\x0a\x2b\x77\x0b\xff\x12\x82\x94\x4b\x2c\x2d\xf5\x9b\xd2\x77\xf8\x87\x97\xa1\x45\x96\x96\xfa\x8d\xd7\xcf\xdd\x05\x96\x96\xfa\x75\xf6\x9d\xfc\x0b\x1d\xe7\x8d\xc2\xc3\xd2\x06\xf4\xfc\xcd\x1b\x96\x0f\x52\x7e\xdd\xa0\x2a\x40\xe5\x2d\x43\xc8\x6c\x49\x54\xab\x5d\xd7\xea\x4c\xeb\x97\x17\x65\x5c\x8f\x14\x22\x2f\x6f\x75\xea\x60\xcb\xa3\x32\xa0\xff\x55\x69\x84\xbd\xa4\x37\x48\x9c\x94\xf2\x97\x55\x46\x30\xd2\x14\xac\xae\x22\xb2\x4b\xc0\x4d\x0c\x0a\xa5\x85\x44\x17\x8f\xb1\xd2\x5e\xa4\x08\xe0\xa5\x28\x8e\xc6\x37\x74\x39\x6e\xfd\x7a\x70\xb4\x85\xfe\x40\x6f\xd0\x3a\xc0\xe4\x0d\xd6\x6d\x58\xd0\xbb\x38\xb5\xb3\xec\x1b\xef\x2f\x5f\x4b\xca\x59\x40\xac\xab\x15\xc7\xeb\xbf\x50\xe6\x5c\x54\xe4\x34\x8a\x6b\x32\x8c\xd9\x2a\xe3\x89\xa2\x59\x3e\x60\x06\xea\x45\x12\x0f\x72\x4b\x3d\x20\x34\xd8\x1b\x29\x96\x81\xd0\x1d\xe4\x20\x34\x5f\x16\xe2\xd2\x01\x21\x6c\x93\xe6\x29\x2b\x7a\xa6\x8b\x46\xec\xb3\x84\xab\xaa\x7a\x5e\x44\x28\x42\x0e\xc1\x08\xdd\x4d\x38\x42\x0b\x0a\x48\x48\x95\xe7\xcc\x43\x57\x4e\xf7\xf2\xd9\x4b\x2c\x8d\xd7\x9a\x64\x25\x8a\x4b\x02\x96\x53\xc4\x92\x0a\x2f\x20\x69\xb5\x9e\x24\xad\xef\x5d\xd2\x72\xc8\x57\x0e\xf5\xce\xc9\x51\xb1\x9c\xb3\xa8\x7a\xc7\xc2\xd2\x75\x5e\xfe\xc4\xc4\xff\x79\x4c\xbc\xf0\x34\xfb\x08\x2c\x7b\x2f\x1a\x24\x18\x22\x37\x30\xe0\x1a\x48\x26\x87\xe4\x93\xbb\x8c\xa8\x31\x8d\xe3\x0b\xdc\x96\x7f\x45\xb5\xbf\xd5\xe6\x50\x76\x57\x98\x7f\xde\x26\x65\x16\xd8\x05\xda\x4f\xbb\xc0\xdf\x62\x17\xd8\x1e\xe3\x41\x96\xc4\x51\x38\x40\xbd\x38\xc0\xfd\x38\x9e\xaf\xf0\xdf\xee\x15\x29\xfc\xe9\xd7\x85\x76\x84\xed\x9e\xaa\xf0\x27\xcf\x0f\xb5\x03\xc8\xac\x5d\x65\x20\x6a\xbd\x22\x2d\x26\xc1\x47\x59\x48\x8f\x85\x5f\x80\xef\x84\x1f\x4f\xbd\xd4\x9b\xaf\x37\x83\x32\x0b\xac\xe3\xbf\x77\x72\xe4\xff\x39\xeb\xf8\x60\x96\x4d\x67\x59\xf9\x4b\xbb\x83\xc2\x4b\xbb\x83\xc5\x2f\xed\x74\xa9\xee\x40\xbb\xc4\x3b\xf8\x6b\xaf\x83\x1e\x5d\xaa\x33\x75\xf3\xe2\xcd\xc3\x4a\x76\x05\x0d\x7d\x2f\xd2\xdd\x3f\xe9\x84\x7d\xa0\x5d\x6b\xba\x84\xa8\x83\x12\x97\x16\x07\x0b\x5e\x5a\x3c\x65\xb1\xfb\x7b\x30\xdf\xcd\x8f\xc7\x7b\xe8\xb7\x95\x57\x8d\x26\x37\x10\x47\x69\x46\x96\xf7\xc5\x8d\xc1\x7d\xa7\x7e\xb0\xb2\x19\xa5\xe1\x6f\xa4\xb4\xc8\x05\x37\xf5\x03\x99\xfd\x05\x7e\xe6\x4b\x17\xa1\xae\x0b\xd0\x54\xbd\x01\x25\xb5\x8e\x73\x83\x5f\xc5\x00\xf8\xb5\x5a\xb4\xaf\xa7\x15\xe9\xbb\x12\x8a\x00\x51\xcc\xa2\x4c\xf4\x4c\x0b\x66\x05\xb6\x78\x87\xf4\x9b\x01\x8c\xbe\x58\x56\x31\xfb\x97\xf6\xdd\x68\x8d\xc6\xb4\x19\xfb\x29\x8d\x9c\x85\xa6\x71\x1a\xaa\x1e\xf8\xa4\x51\xf2\x9d\xd4\x3f\x8c\x79\x67\x45\x0b\x4b\x1a\x46\xcb\xa8\xae\x35\x72\xe8\x07\xf9\x33\x0c\x94\xc8\x36\xa2\xbe\xa6\xac\x44\x6e\x2b\x0f\xa9\xa5\x36\x92\x87\xd4\x92\x4b\xdb\x82\x6b\xa9\x96\xd9\x4b\x1a\x20\x6e\x87\xc8\x2d\x70\x67\x91\x85\x38\x74\x8a\x78\x87\x33\x29\xe1\xbc\x32\x55\x54\x81\x2f\x46\xb3\x78\xe6\xa4\x3e\x57\x54\x34\x97\xc9\xf1\x97\xf5\x3d\xbf\x08\x92\x50\x60\xfb\x8a\xe1\x21\xa1\x81\x71\xf4\xf6\xf9\xb3\x5b\x2b\xdf\xe4\xcb\xe5\xfa\x55\xa3\xb9\x10\xef\xbc\x5f\x62\xb2\x27\xde\xf9\xad\x78\xe7\xde\xf1\x01\x82\x90\xb8\xe5\x58\xe7\x1e\x0b\xa0\x7b\x5f\xd6\xf9\x97\xb3\xc3\x7c\x49\xcc\xe1\x87\x16\x56\x45\xd3\x01\xd8\x23\xd0\xad\x24\x7e\x14\xc4\x93\x8a\xc1\x01\xab\xd5\x15\x4d\x52\x2a\x86\xc3\x52\x87\x9d\x1a\x5c\xae\xd1\x3a\xf3\x08\xb8\x27\x46\xa5\x33\x2a\x4e\x9c\x0b\x31\xaa\xbf\x77\xe6\x85\xff\x51\x8c\x6a\x75\x6f\xbb\x87\x5e\xad\xbd\x5a\x5b\xae\x23\x46\x1b\x68\x1f\x67\xa3\x38\x40\x0d\x17\xb7\x82\xd0\xde\x77\xe5\x56\x9b\x41\x40\xfd\x07\xd5\x05\x51\x82\x0b\xf0\xd5\x4b\x6a\xd3\x3f\xbe\x68\x95\x06\xfe\x0f\x4e\x62\xc8\x1d\x96\x8d\x30\x4a\x70\x2a\xf1\x45\xa5\x23\xa4\x1c\xeb\x31\x79\x36\xf0\xbe\x13\x2f\x60\x0b\xf1\x0b\xc3\x41\x5d\x8d\xce\xe6\x01\x34\x85\x67\x5f\xd8\x71\x84\xd1\x24\x4e\x30\x15\x1e\x97\x97\xa1\x6f\xae\x51\xe4\xeb\x7d\x79\xb9\xe4\x02\x87\xf9\x5c\x64\x81\xaf\xdd\x2f\xca\xf9\xd3\x02\xff\x66\xa7\x38\x14\xc5\xf1\xb4\x9c\x18\xf2\x91\x93\xa3\x73\x65\x0b\x62\x77\xaf\x89\xbc\x48\x11\xcd\x89\xa6\x16\x22\xba\xfb\x85\x9b\x7d\x22\xba\x6f\x45\x74\xff\x47\x62\x7e\xc5\x24\x27\xf1\xc0\xbf\x50\xf8\x2d\x7d\x70\x96\xcf\xb7\x86\x00\x5c\xa9\x14\x8b\xc0\x55\xf4\xf5\xab\xfe\xea\x4e\x5b\x8c\xbd\xc7\xf3\xe3\x0a\xac\xae\xa2\x4f\x04\xbe\x5a\x2f\x34\x22\x05\x80\x66\x41\x94\xb9\x1a\x85\x63\x8c\x2a\x3f\x54\x72\x5f\xeb\x3c\x06\x37\x78\x1c\x1a\x31\xb7\x85\x09\xa7\xa1\xc8\x0c\xc5\x96\x84\x54\x15\xa5\xee\xd8\x0d\xf1\x78\xcb\xec\x5e\x12\x05\x2d\xc4\x4b\xfe\xde\x8e\x5b\x96\x1c\x5d\x34\x49\xd6\xe3\xf2\x95\x3c\x13\x12\xb4\xf6\xd7\xe7\xf9\x78\xdc\x24\xe1\xe5\x62\x62\x1b\x31\xaf\xc5\x97\xe3\xdd\xcd\x7a\x1e\xeb\x99\x3c\x49\x1f\xcd\x44\xe0\x36\x07\xd1\x43\x3f\x4d\xc9\x42\x5e\x26\xa8\x05\xe8\x3d\xbe\x41\x5b\x38\x09\x2f\x69\x4e\xc8\x1d\x3e\x28\x8d\xe2\x98\xd3\x87\x6f\xdf\x6f\xed\x34\xf2\xd6\xc4\x73\xc9\xc4\xe3\xbd\x38\x1a\x86\x17\x33\x96\x89\x32\x86\xac\x90\x69\x51\x7e\xc9\x24\x9e\xe2\x24\xbb\x41\x7f\xd2\x63\x31\x78\x93\x02\xf3\x3d\x19\xd1\x1c\xc7\x29\x79\x08\x23\x96\x2e\x20\x8b\x85\x2f\xcd\x0a\xda\xc2\x43\x7f\x36\xce\xba\xa8\x85\x2a\xf5\xc6\x3a\x24\x52\xae\xba\xe0\x3b\x12\x9a\xe3\x84\x27\x32\xcf\xc1\x91\xf1\x9f\x87\x66\x98\xb1\xe4\x99\x29\x80\xca\x0f\xf5\xd2\x87\x2c\x46\x53\x9c\x0c\xe3\x64\x22\x01\x57\x20\x4b\xe9\x1f\x07\xc3\x8b\xae\x6b\x94\x11\xbd\xf8\x3a\x86\x98\x33\xf5\xc6\xfa\x6a\xb3\xa1\x85\xe0\xa6\x5d\xa1\xa8\x6b\x9f\x72\x84\x94\xc6\x6f\xab\x45\x09\x49\x8b\x12\xc8\x93\x59\x09\x72\xd2\xe2\xeb\x6d\x7e\x16\xd1\x03\xe0\x73\xb7\xa4\xab\x72\xc6\x50\x32\x7e\x03\x1b\xdd\x70\x7f\xb3\x61\x9c\xc0\x29\x26\x6f\xf4\x01\x12\x83\x7e\x0e\x86\x46\xd2\x78\x4a\xed\xfc\xf4\xa8\x98\x61\x2d\x52\xf1\xcf\x7c\xb2\xd6\x69\xfa\xc9\x7b\x83\xf1\xd4\x69\xac\xd5\x6a\x3a\xe0\x82\xec\xf5\x83\xe1\x85\xdd\xf0\x82\x4c\xc4\x86\xf8\xc9\x09\x8f\x14\x77\x05\xc3\x30\xd7\x3b\x5c\x57\x50\x0f\xba\xb2\x2c\xe8\x2e\xf9\x66\xa7\x0c\x36\x50\x0b\x7f\x58\x29\x59\x39\xf5\xc7\x19\xda\x84\xff\x2c\x9e\x88\x96\xbb\xd1\x48\x7e\xed\xf7\x21\x3b\x9a\x48\x3d\x18\xae\xb0\xa8\x24\x15\xde\x19\x0f\xf0\x73\x4e\x2a\x2b\x2e\xcf\xab\x56\x73\xa1\xdc\x2e\xea\xd4\x5b\x0d\x08\xc3\xcc\x91\x14\x96\x79\xd9\x83\xef\x3e\xa3\x55\x42\x3e\x94\x07\x79\x62\x76\xec\x66\x89\xee\x04\xe5\x20\x9b\xd2\xc1\xa6\xe9\xe6\x0d\x7d\x8e\x2d\xd4\x13\xc8\xc9\x7b\x51\x80\xaf\x6d\x35\x4e\x6b\xd7\x4c\x01\x64\x89\xd6\x39\x27\x44\x97\x40\x45\x08\xcb\xe2\x8d\x33\x7f\x7d\x8e\x0d\xaf\x94\xbf\x71\x56\xe2\x5b\xde\x06\x99\x95\x15\xf6\x64\x33\xc2\xc8\xb7\x16\x5a\x34\x7f\x31\xc7\xc8\x42\xfd\xc8\x04\x75\xad\x83\x3c\x2e\xd2\x1b\x8e\x8f\xd5\xb8\x40\x74\x92\xe5\x39\xe6\xc9\xb2\x81\x02\xf3\x34\xbe\x79\xaf\xf5\x39\x43\x2c\xa3\x77\x9e\x1a\xd8\xfc\x3e\x3f\x1b\x03\xc0\x57\x86\xd8\x3a\xba\x66\x71\x91\xc5\x28\x7f\xc5\x3a\xee\x40\x64\x4f\x8c\xb1\x1d\x74\x28\x47\xb3\x63\x60\x2d\x58\x28\xb6\x1c\x75\x6a\xcb\x21\x4d\x9f\xd3\x98\x03\x01\x3f\x57\x9a\x80\xd1\x13\x23\x2d\x7f\xb4\x8d\x75\x99\xf1\x46\xf3\x42\x41\xd9\x3a\xcb\x47\x5f\x7e\x67\x0f\x58\x25\x35\xf1\xdb\xc1\x91\xda\x1d\x70\x9d\xb2\x78\x5c\x1b\xe3\xf6\x07\xb5\x81\xf9\x83\xdb\xc0\x48\xb3\xf9\x1a\xfd\x51\x30\x7a\xe4\x2f\xaf\x71\xfa\x07\x98\xc3\x18\x1d\x39\xfd\x43\x37\x8b\xe1\x7f\xb7\xe6\x6b\x3d\xe0\x14\xf9\x93\x98\x03\xd3\x4d\x43\xa3\xb6\x29\xd1\x98\xc4\x69\xed\x6c\x69\xa9\xd8\xa4\x48\x02\x2e\x1d\x7d\x39\xdf\xb0\x04\x31\x63\x7b\x59\x5e\xaf\xc8\x80\x52\x3e\x46\xdc\x6b\x43\x2f\x13\x6c\xa6\x70\x23\x5f\x70\x13\x7f\x28\xd1\x32\x4c\x6d\xe9\xf6\xe7\x47\xaf\xb1\x88\x06\x0f\x10\xc4\x86\x8a\x08\x42\x32\xa4\x42\xa1\x4b\x4c\x58\xac\x9a\x87\x1c\xb2\xe9\x7d\xc0\x14\xca\xa6\x79\x90\x1d\x71\x94\x74\x09\x30\x1e\xd2\x05\x55\x36\xec\xaa\x58\x4c\x0a\xcd\x11\x9e\x6e\x8b\x6c\xd1\x28\x34\x7b\xa0\x1e\x3d\x85\x2e\xcf\x09\x7b\x7b\xe6\xad\xfd\xbd\x7d\xe8\x17\x48\xeb\x3e\x3f\x39\xfa\xe3\xea\x8e\x9c\xe9\xb5\x5d\x59\xaf\xff\x09\xda\xa5\x63\x30\xce\xec\x71\xe3\x5d\xaa\x44\x92\x5f\x16\xe9\x91\x04\x1e\x47\x78\x96\xfa\xfd\x31\x66\xe1\xc0\x24\x74\x8e\x91\x9c\x6a\x91\x42\xd1\xdf\xbc\x43\x6a\x86\x35\x69\x5b\x38\x82\x6c\xca\x88\x19\xda\x32\x1b\x63\x53\x93\x24\xca\x43\x8c\x95\x30\x45\x3e\xa2\x09\x98\xd1\x25\x4e\x52\x88\x5a\x36\xf2\x33\x14\xe1\x8b\x31\x1e\x64\x38\x20\x6c\x78\xc0\x52\xaa\x66\x4c\xe1\x93\xc5\x68\x1c\x66\xd9\x18\x2f\xd3\x00\x97\x2b\x2a\x50\x9c\x24\x71\x82\x82\x18\xa7\xd1\x8b\x0c\xf9\xc3\x21\x1e\xd0\xba\x14\xa9\x17\x29\x4a\xf1\x60\x96\x84\xd9\x8d\x27\x2a\xf6\x67\x19\x0a\x33\xa8\xc4\x6b\x84\x59\x2a\x02\x2a\x84\xe3\x30\x63\x4e\xdc\x34\xaf\x6b\x48\xf8\xf3\x04\x47\x74\x3f\x48\x6d\x8a\x32\x3a\x20\x1f\x68\xe7\x84\xba\x4c\x7b\x2b\xcf\xdf\x5d\x93\xb6\x15\x1f\x52\xde\xcb\x66\xd0\xce\x03\x46\x6e\xbd\x0d\xa7\x86\xcb\xa2\xd3\x42\xc8\x4e\x68\x64\xf7\xc2\xce\x73\xda\x6f\xa2\x5d\xf2\xcb\x92\x38\xee\xfd\x69\xed\xcc\x43\x95\xf7\xa7\xcd\x33\x16\x2c\x00\x7d\x25\x8f\xec\x2a\xa0\xde\xa9\x5a\x92\xc8\xbd\x3f\xad\xd3\x4a\x35\xb5\x52\xb3\xb8\x52\x83\x56\xaa\xab\x95\x6a\xc5\x95\x9a\xb4\x52\x43\xad\x54\x17\x95\xd4\x3a\xb6\xec\x48\xc6\x90\x71\x2f\x43\xd7\xa0\xf5\xc4\xa0\xf5\xec\x83\x66\xe2\x23\x0d\x17\xeb\x13\xbd\x30\x19\x0e\x79\xda\x41\x8a\x34\x0d\xb2\x5a\xab\x91\x2f\xb6\xfe\x9a\x13\xd1\x54\x21\xd7\xad\x90\x1b\xa5\x20\xd7\x9c\x03\x2f\xc1\xd0\x20\x37\x4b\x41\xae\xbb\x66\xc7\x93\x60\x68\x90\x6b\x1a\xe4\xf9\x13\xd9\xf3\x93\xe4\x06\xf5\xf5\x74\xaa\x74\xaa\xfa\x34\xfe\x85\xa9\xc9\xc8\xe8\xe4\x13\xd6\x93\xde\xa4\x19\x9e\xa0\x61\x3c\x4b\x50\x16\x4e\xf4\xb9\x5f\x30\x28\x6f\x84\xaf\xb3\x63\xb2\xfa\xdc\xf1\x63\x2d\x11\x6f\xf7\xe3\x20\x1c\xde\x50\x4e\x48\xe9\xb0\x04\x16\xeb\x6e\x2c\x7a\xa7\xd4\x71\xe0\xb7\x53\x48\x79\x09\xd1\x56\x8c\x4c\x71\xb6\x24\xb9\xbf\xa0\x14\x67\xb3\xa9\xfa\xa1\xc0\xa3\x63\xfe\x61\x7f\xef\x17\xea\xda\x51\x74\xc2\xdf\xfb\xe5\xbc\x86\x36\xd0\xde\x2f\x66\x6a\x34\xa9\x48\x9d\x16\xa9\x5b\xa3\x19\xcb\x4b\x1a\xa6\x32\x9d\xf5\x2f\x31\x11\x15\x5c\x47\xff\x1a\x0d\x7e\x0c\x6d\xd3\xe8\xc7\x5f\x11\x7d\x72\x45\x3f\x96\x8b\xb3\x30\xc7\xa2\x7c\x7e\x1d\x6a\x0f\x73\x2c\x9a\x6d\x88\x66\xeb\x4a\xb3\xf5\x79\xcd\xd6\xd5\x66\xeb\x8b\x35\x0b\x61\x74\xc2\x1a\x5f\x82\x04\x48\xd8\x50\x57\xa0\xab\x6a\x13\xaa\x36\xf8\x62\x86\xaa\x35\x75\x99\x3a\x66\x84\x91\x75\x11\x6b\x45\x40\xad\x35\x7a\xae\xd7\x63\xfb\xd3\x8f\x75\xfa\xb1\x6e\xfd\xd8\xa0\x1f\x1b\xd6\x8f\x4d\xfa\xb1\x69\xfd\xd8\x2a\x6a\xb3\x5d\xd4\x66\xa7\xa8\xcd\x35\xd1\x66\x81\x46\xaa\x14\xe7\x41\x8b\x73\x1f\x54\x8e\x03\x21\x53\x49\x21\xfb\x11\x3d\x48\x72\x57\xa7\xf2\x5a\x92\x3e\x4a\x71\x66\xb5\x88\xbd\x77\xee\xed\x1d\x06\x37\xf7\x32\x03\x2e\xa4\x96\x3e\xa6\xa1\x86\x7e\x03\x22\x44\x95\xdf\xc8\xdc\xf3\x55\x02\xcf\x62\xef\x7d\xad\x57\xac\xd3\x8a\x0d\x56\x71\x4d\xab\xd8\x76\x56\x6c\xd0\x8a\x2d\x56\xb1\xae\x55\x5c\x73\x56\x6c\xd2\x8a\x9d\x33\x81\x9a\x52\xb1\x9e\x57\xbc\xd7\x2e\x56\x14\xa5\x9e\x22\xc2\x63\xc7\x1f\xb3\x94\xec\x2c\x78\x3c\x3c\xde\x25\x7a\x3c\x87\xc3\x18\x9c\x80\x63\x8b\x1f\x6f\xc5\xd7\xea\x84\x87\xa4\x1c\xbd\xc2\x9b\xee\xb8\xd8\x8b\x4e\xa6\x7e\x61\xc7\x93\xdf\xdc\xe6\x1f\xc3\x4b\xfa\xa5\xd3\x5a\x6d\x36\x74\xb5\x9c\x58\x26\x82\x60\x2b\x25\x5d\xa1\x94\xf5\xa1\x7c\x91\x44\x50\xcd\xe0\xe7\xd8\xbf\xc4\x28\x1e\x07\x4e\x56\xbb\x80\xfc\xd0\x3b\xa7\x93\xdb\xd3\xe3\x1d\x2a\x2d\xf6\xfc\xf1\x60\x36\x26\x2b\x2c\xc2\x57\xce\x66\x7b\x2c\x11\x4c\x8f\x26\x82\xa9\x5d\xb7\x82\x26\xfc\x1f\x5a\xe2\x12\x9a\x9e\xaf\xa5\xc7\xf2\xc2\xf4\x68\x5e\x98\xda\x35\xab\xd1\x84\x98\xf2\x3d\x2e\xa0\xd6\xaa\xe8\x0d\xaa\xf4\xce\xa5\xe7\xff\x40\x75\xd4\x45\xb5\xaa\x09\xb1\xc1\x20\x36\x28\x44\x06\xb0\xc5\x20\xd6\x35\x88\xf5\x12\x10\x9b\x0c\x62\xd3\xe8\x56\x85\xb6\xa3\x40\x6c\x94\x80\xd8\x62\x10\x5b\xd6\x5e\x37\x35\x88\xcd\x12\x10\xdb\x0c\x62\xdb\xda\xeb\x96\x06\xb1\x55\x02\x62\x87\x41\xec\x58\x7b\xdd\xd6\x20\xb6\x4b\x40\x5c\x63\x10\xd7\xac\xbd\xee\x68\x10\x3b\x73\x21\xe6\x62\x3f\x05\xaa\x54\x5f\xd3\xab\xeb\xde\x31\x82\xa6\xc9\xee\x73\xb1\x7c\x8f\x45\x44\x4a\x5d\x5c\x03\xaf\x0e\x49\xd7\x7a\x96\x24\x1c\x3c\x5d\x7e\x32\x1b\x64\x68\x14\x5e\x8c\x90\x1f\x05\x68\x1c\x5f\x21\x3f\xb9\x98\x41\xf8\x17\x70\x73\xfe\xef\x99\x9f\x18\x89\x7b\xa0\x01\x1f\x6d\x90\x56\xb8\x14\x67\x51\x1e\x5c\xf4\x69\x11\xba\x4b\x58\x8f\x4f\xbc\xcf\x0a\x06\x09\x4e\x67\xe3\x0c\xc5\xc3\xa2\xe6\x47\x74\x0b\xa8\x5c\xf8\xe8\x25\xba\xf0\xa9\xeb\x4a\x7d\xad\x8a\x96\x10\x7d\xd5\x67\xaf\xda\xf0\xaa\x0f\xaf\x6c\x48\x8e\x29\x20\xa9\x2b\xf4\x48\xf8\x12\x5d\x5c\xc3\x0c\x57\x81\x20\x78\x01\x21\x76\x4a\x05\x6c\x89\x60\x48\x87\x7e\x3b\x38\x42\x10\x4e\x52\xfe\xf8\x8e\x72\xb8\x8b\x11\xfa\x1d\x5d\x8c\xcb\x32\x39\xbb\x52\xe5\x37\xc6\xe2\xde\x51\x16\x57\xa9\xbc\xcb\xb7\x6f\xb2\x93\xbd\x93\xc4\x82\x2a\x2b\xd0\x51\x0b\x74\xf2\x02\x3a\x3d\xff\xc6\xb8\xe1\x3b\xca\x0d\x2b\xb4\x99\x7c\xbf\x7d\xc7\xf9\x1f\xec\xb7\x4b\x88\xb4\x66\xc2\x68\x30\x18\x0d\x0e\xa3\xae\x22\x50\x37\x30\xac\xa9\x05\x6a\x45\x18\x36\x19\xf4\x26\x87\xde\x50\x31\x6c\x68\x18\xd6\x2d\x18\xb6\x18\x8c\x16\x87\xd1\x54\x11\x68\x1a\x18\x36\xd4\x02\x8d\x22\x0c\xdb\x0c\x7a\x9b\x43\x6f\xa9\x18\xb6\x34\x0c\x9b\x16\x0c\x3b\x0c\x46\x87\xc3\x68\xab\x08\xb4\x0d\x0c\x5b\x6a\x81\x56\x11\x86\x6b\x0c\xfa\xda\x99\x42\x22\x02\xc3\x8e\x86\x61\x5b\xc1\xb0\x54\xe2\x8f\x94\x27\x9d\x10\xba\xd6\x12\x69\x27\xe6\x5d\x77\x51\x58\x19\xbe\xce\xe4\x7b\x27\x59\x93\xca\x43\x29\x28\x69\x1c\xe8\x6d\x91\x79\x7f\x35\x1d\xfb\x04\x9b\xeb\x0c\x39\xc1\xb1\x38\x33\x95\xbc\x65\x1b\x44\x71\x71\x55\xa4\xd4\x55\x93\x77\xc8\x25\xab\x45\x77\x50\x72\xc1\xd2\xc6\xc8\x9e\x7a\x37\xd2\x6d\xb7\xbc\xfc\x52\xa4\xdb\xee\x78\xec\xae\xa4\xdb\xa9\xdf\x9e\x79\x6b\x7f\xef\x48\x84\x4f\xf7\x55\x4f\xf7\x55\x8f\x76\x5f\xa5\x2d\xf1\xfc\x3e\x47\xbf\xc9\xf9\x7b\xdd\xe1\x3c\x54\x56\xb8\xf7\xe2\x68\xfe\x5e\x3d\x9a\xbf\xbf\xeb\xd1\xfc\xbd\x7a\x34\x7f\x5f\x74\x34\x9f\xa7\x60\x7e\xba\xa9\x7a\xba\xa9\x7a\xba\xa9\x52\xbe\x3c\xdd\x54\x3d\xdd\x54\x3d\xdd\x54\xe5\xcd\x3e\xdd\x54\xe9\x1f\x9f\x6e\xaa\x1c\x8f\x4f\x37\x55\x4f\x37\x55\x4f\x37\x55\xf0\xf7\x74\x53\x55\x4e\x89\xfb\x74\x53\xf5\x74\x53\xf5\x74\x53\x25\xfd\x3d\xdd\x54\x3d\xdd\x54\x3d\xdd\x54\x3d\xdd\x54\xfd\x4f\xbe\xa9\x7a\xb0\x3b\xaa\xbb\xdd\x4e\x95\xb9\x97\x2a\x71\x23\xf5\x58\x77\x51\x7f\xef\x7c\x28\x4f\x77\x51\xff\xfc\xbb\x28\xf9\xee\xa8\xd7\x9a\xeb\xe8\x24\xdf\x1c\xf5\x5a\xd2\xb5\x11\x3c\x3c\xfe\x9d\x11\xf5\xd2\x14\xb7\x46\xf6\xa0\x02\xdc\x43\xbb\xe8\x5a\x09\xdc\x38\x65\x8f\x62\x29\x66\xba\xa9\xaf\x88\xc2\x0c\xa5\xfd\xf8\xda\x84\x73\x2c\xd0\x39\x96\xaf\xe9\xf8\x9f\x4d\x9a\x6c\xb4\x3b\xee\x43\x39\x3b\x74\x87\xf3\xd5\xb8\xef\xf1\x8d\x4d\x8f\xab\xb6\xe8\x71\xff\xf1\xb9\x0d\xb3\x41\x21\x43\xc0\xa3\x4a\x84\xe8\x5f\xf2\x38\x39\x54\x87\xac\x12\xd9\xda\xf8\xd8\x9f\x2a\x80\xcc\x48\x68\xca\x67\x23\x28\x9a\xed\xec\x4f\x7a\x51\xf9\x03\x2d\xd1\xf1\x59\xe2\x8d\x56\xd1\xbf\xa0\x57\x8e\x58\x0a\x57\xfe\xd4\x8e\x33\xec\x1b\xa6\x86\x40\x9a\x80\x63\xbb\x63\x3c\x79\x4d\x66\x7c\xfe\xf4\xf4\xac\x2a\x7e\x96\x55\x43\x10\xcd\x1f\x2c\xcb\xac\x00\x74\x6f\xb5\x1c\xd7\x84\x80\x16\xc4\xc8\xbf\x4e\xa6\xc7\xae\x32\x54\x5a\x16\x4e\xce\x8d\x76\xc7\xa1\x10\xa9\x39\x95\x21\xd6\x46\xcb\x2a\x46\xa4\xf5\xa4\x29\x46\xf2\x41\x0b\xb5\x2f\x7f\xe4\xc3\x39\x37\x03\x3c\x28\x07\xd5\xea\x9f\x65\x3c\xb5\xf9\x10\xab\x29\xa2\xcb\x28\xa2\x2a\xb5\xc8\xb2\x88\x42\xd0\xa0\xd3\x84\x71\x8c\x2a\x95\xef\x0a\x09\x3b\x08\xd7\x4a\xb4\x05\x04\xeb\x26\xd6\x9c\x50\xd5\xf7\x6a\x67\xbf\x92\xba\x15\xb6\xa6\x48\x15\x86\xd7\x59\x9e\xd7\x20\xd2\xf3\x18\x68\xc7\xa7\x4f\x10\x07\xc5\x72\xa3\x95\x93\x7a\x68\x9c\xdd\xc9\x58\x28\x73\xc5\xc4\x32\x05\xbb\xef\x55\xee\xed\xb5\x1e\x42\xe8\xed\xb5\x16\x96\x78\xcd\x3d\x56\x13\x77\x7b\x2d\x6b\x6c\x0b\xb8\xa1\x09\x71\x70\x87\x1d\x7e\x2b\x89\xa7\xca\x2e\xcf\x5e\xc0\x20\x7c\x83\xa8\x78\x01\x69\x4e\x0d\x34\xa7\xe9\xf9\xc9\xc4\x93\x52\x22\xd4\x1c\xaa\xbf\x6a\xc8\x60\xf5\x58\x73\x04\x75\x29\xea\x97\xb6\x8a\x09\xa8\xae\x0a\x42\x8d\x18\x57\x4a\x88\x21\x6d\xf0\x82\xc5\x77\x18\x64\x3c\x0b\x36\x70\x61\xf8\x42\xf0\x22\xbb\xf8\xcf\xb0\x99\x2f\x2f\x5b\xf7\xf0\x05\xd8\x3d\x9a\x93\x00\xe9\x3b\x5a\x6d\x64\x88\x1e\x66\xc5\x01\xa4\xc5\x57\x1d\xa3\xf9\xe2\x95\x47\x0a\x15\x9f\x34\x7b\xad\xc7\x3a\x66\xde\x2f\x5d\xdf\xb7\x3c\x5f\x3e\xda\x29\xf0\xdb\x06\x71\x26\xac\x0a\xa7\x38\xb9\xc4\xcf\x9f\x55\x06\x55\xd4\xa8\xd5\x1b\xa8\x7f\x83\x7a\xff\xdf\xff\x1b\x24\xe1\x00\xed\xe3\x34\x0a\xc7\x2b\x68\x73\x3c\x46\x49\x78\x31\xca\x52\xc4\xca\x07\x2b\xcf\x9f\x3f\x3b\xc2\x41\x98\x66\x49\xd8\x9f\x01\x7c\x3f\x0a\x20\x28\x4f\x18\xa1\x34\x9e\x25\x03\x0c\x6f\xfa\x61\xe4\x27\x37\x84\x1d\x4c\x52\x8f\x45\x69\x48\xe0\xbf\xf1\x2c\x43\x13\xe0\xe9\x03\xe0\xac\x1e\xf2\x13\x8c\xa6\x38\x99\x84\x59\x86\x03\x34\x4d\xe2\xcb\x30\xc0\x01\x0d\x3a\x41\xd6\xe9\x30\x1e\x8f\xe3\xab\x30\xba\x40\x83\x38\x0a\x42\xba\x86\x49\xa5\x09\xce\xba\x6c\xc5\x2f\x23\x15\xad\x14\x14\xc3\x14\x9f\x41\x1c\x60\x34\x99\xa5\x19\xd9\xa8\xfd\x30\x02\xa0\x7e\x3f\xbe\x24\x9f\xa6\x37\xd0\x45\x14\xc5\x59\x38\xc0\x1e\x8d\x2b\x34\x0e\x53\xd0\x2c\xcb\xed\x45\x81\x86\x4c\x10\xa6\x83\xb1\x1f\x4e\x70\xb2\xe2\xc2\x21\x8c\xe4\x81\xe0\x38\x4c\x93\x38\x98\x0d\xf0\x83\xa3\x81\x58\xd7\x82\x78\x30\x13\x71\x30\x48\x8d\xd5\x38\x61\x31\x32\x26\x7e\x86\x93\xd0\x1f\xa7\xf9\x30\xc3\xdc\x40\x35\x09\x75\x32\xcf\x27\xbb\x7b\xc7\xe8\xf8\x60\xe7\xe4\xd7\xcd\xa3\x6d\xb4\x77\x8c\x0e\x8f\x0e\x7e\xd9\xdb\xda\xde\x42\x6f\xff\x8d\x4e\x76\xb7\x51\xef\xe0\xf0\xdf\x47\x7b\xef\x76\x4f\xd0\xee\xc1\x87\xad\xed\xa3\x63\xb4\xf9\x71\x0b\xf5\x0e\x3e\x9e\x1c\xed\xbd\xfd\x74\x72\x70\x74\x8c\x7e\xdc\x3c\x46\x7b\xc7\x3f\xc2\x87\xcd\x8f\xff\x46\xdb\xbf\x1d\x1e\x6d\x1f\x1f\xa3\x83\x23\xb4\xb7\x7f\xf8\x61\x6f\x7b\x0b\xfd\xba\x79\x74\xb4\xf9\xf1\x64\x6f\xfb\xd8\x43\x7b\x1f\x7b\x1f\x3e\x6d\xed\x7d\x7c\xe7\xa1\xb7\x9f\x4e\xd0\xc7\x83\x13\xf4\x61\x6f\x7f\xef\x64\x7b\x0b\x9d\x1c\x78\xd0\xa8\x59\x0d\x1d\xec\xa0\xfd\xed\xa3\xde\xee\xe6\xc7\x93\xcd\xb7\x7b\x1f\xf6\x4e\xfe\x0d\xed\xed\xec\x9d\x7c\x24\x6d\xed\x1c\x1c\xa1\x4d\x74\xb8\x79\x74\xb2\xd7\xfb\xf4\x61\xf3\x08\x1d\x7e\x3a\x3a\x3c\x38\xde\x46\xa4\x5b\x5b\x7b\xc7\xbd\x0f\x9b\x7b\xfb\xdb\x5b\x2b\x68\xef\x23\xfa\x78\x80\xb6\x7f\xd9\xfe\x78\x82\x8e\x77\x37\x3f\x7c\xb0\xf6\x92\xe0\xae\xf4\xf1\xed\x36\xfa\xb0\xb7\xf9\xf6\xc3\x36\x6d\xe9\xe3\xbf\xd1\xd6\xde\xd1\x76\xef\x84\x74\x27\xff\xd5\xdb\xdb\xda\xfe\x78\xb2\xf9\xc1\x43\xc7\x87\xdb\xbd\x3d\xf2\x63\xfb\xb7\xed\xfd\xc3\x0f\x9b\x47\xff\xf6\x18\xcc\xe3\xed\xff\xfd\x69\xfb\xe3\xc9\xde\xe6\x07\xb4\xb5\xb9\xbf\xf9\x6e\xfb\x18\x55\xe6\x0c\xc9\xe1\xd1\x41\xef\xd3\xd1\xf6\x3e\xc1\xf9\x60\x07\x1d\x7f\x7a\x7b\x7c\xb2\x77\xf2\xe9\x64\x1b\xbd\x3b\x38\xd8\x82\x81\x3e\xde\x3e\xfa\x65\xaf\xb7\x7d\xfc\x1a\x7d\x38\x38\x86\xd1\xfa\x74\xbc\xed\xa1\xad\xcd\x93\x4d\x68\xf8\xf0\xe8\x60\x67\xef\xe4\xf8\x35\xf9\xfd\xf6\xd3\xf1\x1e\x0c\xda\xde\xc7\x93\xed\xa3\xa3\x4f\x87\x27\x7b\x07\x1f\xab\x68\xf7\xe0\xd7\xed\x5f\xb6\x8f\x50\x6f\xf3\xd3\xf1\xf6\x16\x8c\xee\xc1\x47\xe8\xea\xc9\xee\xf6\xc1\xd1\xbf\x09\x50\x32\x06\x30\xf8\x1e\xfa\x75\x77\xfb\x64\x77\xfb\x88\x0c\x28\x8c\xd4\x26\x19\x82\xe3\x93\xa3\xbd\xde\x89\x5c\xec\xe0\x08\x9d\x1c\x1c\x9d\x48\x7d\x44\x1f\xb7\xdf\x7d\xd8\x7b\xb7\xfd\xb1\xb7\x4d\xbe\x1e\x10\x28\xbf\xee\x1d\x6f\x57\xd1\xe6\xd1\xde\x31\x29\xb0\x47\x9b\xfd\x75\xf3\xdf\xe8\xe0\x13\x74\x99\xcc\xd1\xa7\xe3\x6d\xfa\x53\xa2\x58\x0f\x66\x12\xed\xed\xa0\xcd\xad\x5f\xf6\x08\xda\xac\xf0\xe1\xc1\xf1\xf1\x1e\xa3\x13\x18\xb2\xde\x2e\x1b\xee\x95\xe7\xcf\x5e\xae\xaa\x3a\xaf\x7d\x3f\x1b\x3d\xac\xde\xab\x5c\xd4\x69\x1a\xf8\x58\x14\xa1\x8f\xa5\xac\xb3\xe1\xc2\xce\x8f\xb2\x14\x65\x7e\x9f\x4b\x2c\xa4\xca\xf9\x97\xb1\x35\xd8\x66\x2e\x47\xd5\x3c\x84\xea\x1e\x42\x0d\x0f\xa1\xa6\x87\x50\xcb\x43\xa8\xed\x21\xd4\xf1\x10\x5a\xf3\x10\x5a\xf7\x10\x7a\xe5\xa1\x7a\xcd\x43\xf5\xba\x87\xea\x0d\x0f\xd5\x9b\x1e\xaa\xb7\x3c\x54\x6f\x4b\x16\x96\x6b\xb4\x2e\xf9\x46\xe0\x91\xf2\x04\x46\xbd\x4d\xe1\x92\x7a\xd0\xd6\x2b\x06\xbf\xc1\x60\xd4\xa1\x8d\x1c\x4e\x93\xb5\xd5\x62\xb8\xbc\x62\x30\xd6\x25\x3c\xd7\x18\xac\x0e\xc3\xa5\x4e\x61\xd6\xe5\x58\xcb\x75\x56\x97\xe3\x52\xa3\x30\x00\x0f\x8e\x67\x93\xc2\x22\xf0\xeb\x72\xbf\x65\x38\x2d\x56\xb7\xcd\x70\x5f\x63\x30\x1a\x12\x9e\x75\x06\x6b\x9d\xe1\xc2\xfa\x5d\x6f\x9e\x55\x5f\xcb\x73\x91\xcc\x99\x0b\x8e\xc7\x9a\x34\x56\x0d\x06\x93\xe3\xdc\x51\xc7\x03\xfa\xd6\xd4\xfa\xde\x61\x75\x9a\x39\x2c\xa8\xdb\xce\x71\xe6\x30\xf8\x78\x40\x5b\x75\xad\xef\x50\xa8\x2d\x75\x70\x8d\x21\xd8\xc9\x07\x57\x00\x69\x48\x03\x4d\x91\xcd\x01\xad\xb3\x3a\xd2\x60\xc1\xc4\xb4\xf3\xc1\x15\x30\x9a\xd2\x40\x53\x64\x25\x84\x1a\x6c\x64\x6b\x12\x30\x3e\x1a\x6b\x62\xf6\x04\x85\x22\x36\x3a\x14\x59\x75\x36\xd2\x79\x2b\x83\xa2\xc8\xc6\x0a\xd0\x93\x5b\xe2\xb4\xd5\x94\xc6\xb3\x93\x7f\x53\x68\x7a\xcd\x83\x4f\x30\x54\x9c\x5e\x5f\xe5\xb4\xc7\x69\xaa\xde\x96\x86\x75\x8d\x95\x55\xe6\xa3\x9e\x13\x81\x98\x8b\x57\xac\x20\x27\x9e\x75\xa9\x0c\x47\x7c\x0d\x7e\xcb\x67\x29\xb1\x96\x5b\x79\x55\xde\xbe\x58\xf3\xf2\x9a\x58\x57\x40\xe6\xa0\xf8\xfa\x6c\xe7\xb4\x2f\xfa\xd9\xc8\x51\x10\xe3\xc4\x48\x86\xc2\x45\xda\x94\xcc\x5b\x20\x0c\x31\x65\xf0\xdb\x39\x02\xd0\xcf\xb5\x7c\x21\x42\x83\x2d\x86\x48\x47\x43\xba\xa9\x0e\xbe\xe8\x74\x3d\x87\x23\xc6\x4e\x2c\x68\xf8\xae\xc0\x11\x0c\xa4\x2e\x0d\x52\x27\x6f\x57\x2c\x3c\xb6\x80\xeb\x4d\xcb\x7c\x88\x0e\x68\x88\x73\x40\x62\xc1\x35\xa4\xff\xb6\xc5\x2a\x56\x07\xa8\x6d\x29\xd7\x52\x67\x46\xcc\x64\xde\x29\x54\xaf\xa3\x33\x25\x4b\xf6\xf9\x88\xac\x10\xcb\x7c\x20\x11\xaa\xb9\xe6\xa1\xda\x75\x7b\x73\xbd\xb1\xf6\xea\xd5\x2b\xf2\xbb\xb3\xbd\xf5\x6a\xfb\xed\x66\x9d\xfc\x5e\xdf\xa9\xbf\x7d\xdb\xdb\xea\x91\xdf\x9b\xaf\xda\xcd\x9d\xad\xd6\xb6\x3a\xdf\xa3\xc4\xd9\x40\xbb\xb6\xd9\x58\x7f\xbb\xdd\x81\x06\x7a\xad\xad\xad\x7a\xa3\x05\x0d\x6c\xad\xd5\x9a\xdb\x3b\x4d\xf2\x7b\x6d\xb3\xb3\xb5\xd6\xd9\x86\x86\x39\x42\x67\x56\x7d\xc0\xd1\xde\xe1\xf6\xfe\x56\xbd\x53\x83\xf0\xfb\x73\x74\x48\xa2\x6c\xae\x45\x92\x5e\xd1\x5d\xf9\xae\x77\x45\x54\x99\x08\x48\x38\x82\x60\x77\xd6\x5a\xed\x46\xb3\x06\x23\xb8\xbd\xd3\xdb\xda\x7c\xbb\x0e\x1d\x7c\xb5\xfe\x76\x73\xab\xb7\xb3\x4d\x7e\xd7\x6b\xcd\x46\xbb\xb5\x06\x83\xd3\x6b\x6e\x35\xb6\xeb\x3b\xb5\x33\xa7\x6a\xbc\xac\x52\xde\xaa\xd8\x2d\xed\xa5\x54\x2f\xb8\xa9\x99\x6f\x8e\x4f\xb1\x00\xdd\x6b\x6e\x16\xe9\xb8\xbe\xd9\x3f\x97\x4a\xf3\xcb\x83\x73\xd3\x90\x09\x15\xdd\xa9\x48\xf5\xd0\x06\xaa\x98\x05\x10\x35\x00\x95\x1a\xcb\x0d\x1f\xa4\x97\x8b\x19\x95\x1a\x00\x99\x5d\xa9\x06\xd0\xb4\x2e\x35\xc1\x15\xa8\xc6\xd0\x3c\x5b\xe7\x5d\x24\xee\x1f\x08\x29\x3a\xaf\x1c\x81\x01\x9c\x8f\xc6\xee\x02\x09\x14\x48\x9c\x05\x40\xfc\x3c\xff\xe2\x86\x00\x32\xd1\xf9\x17\x37\x04\xd8\xa6\xcf\x53\x37\x04\xd8\x34\xce\xd3\xc4\x1e\xd1\x7a\x75\x95\xac\xb2\xcf\xe4\xd0\x7c\xe9\x27\x21\x91\x8e\x2d\x97\xb4\xfe\xd8\x43\xfd\xb1\x87\x06\x63\x0f\x05\x63\x0f\xe1\xb1\xa5\x21\x3f\xf1\x50\x3f\xf1\xd0\x20\xf1\x50\x90\x78\x08\x27\x7a\x63\x3e\x41\xc5\x27\x08\xef\x9a\x2e\x23\xfd\x04\x82\x8e\xc3\xc7\xba\xfe\x71\x40\x3e\x0e\xe8\xc7\x86\xfe\x31\x20\x1f\x03\xfa\xb1\xa9\x7f\x84\x03\x03\xa6\x1f\x5b\xfa\x47\x91\xa6\xda\x57\xf3\x52\xf3\x2e\xe9\xb7\x82\x56\x53\x42\xf8\xef\xd2\x06\xaa\x5b\xd7\x76\x46\x96\x8f\x3f\x46\x4b\xf9\x9a\x5a\xfa\x32\x3e\x0d\xcf\xce\xaa\x5f\x6d\x4e\x0c\xe0\xb5\xf3\xa6\xde\xa9\xfe\xf9\xfc\x99\xca\x1a\x49\x1b\x68\x58\xaf\xf4\xc7\xde\x60\xec\x05\xe3\x2a\x5a\x42\xa3\xb1\xdd\xf7\xe6\x16\x09\x85\x5c\xf8\xa6\xd9\xa0\xaa\x36\x0b\xb4\x86\x0e\xcd\x18\x79\x03\x5a\x6b\xdd\x09\xad\xa9\x43\x33\xa6\xca\x80\xd6\x69\x39\xa1\xb5\x74\x68\xc6\xdc\x4a\xd0\xfe\x5c\x5d\x65\x10\xd7\x6b\x4e\x88\x6d\x1d\xa2\x41\x10\xc8\x1e\x26\x9d\x4c\x62\x66\x9d\x2e\xf2\x05\x25\x71\x36\xae\x64\x5e\x4a\xa6\xd5\xe6\xb4\x01\x34\x90\x2d\xe1\xb1\x7d\xca\x61\x45\x18\x4b\x8a\xfc\x01\xdd\x06\xb6\x2f\x40\xee\xd0\x2e\x59\x93\x75\xab\x1b\x10\xac\x97\xbe\xad\x36\x2c\x33\xe3\x26\x51\xa0\xea\x27\x68\x49\xa2\xd6\xe4\xee\xd4\xda\xae\xf4\x13\x6f\x90\x78\x41\x02\x23\x9e\xdc\x8f\x5a\x5b\x3a\xb4\xfb\x52\xab\x0a\xed\x5e\xd4\xda\xd0\xa1\xdd\x9b\x5a\xeb\x3a\xc4\x07\xa6\xd6\x04\x6e\xad\x0b\xc8\x35\x71\x90\x2b\x70\xd4\xc4\x46\xae\xc0\x88\x6d\x5f\x80\x45\x53\x72\x4d\x9c\xe4\x0a\x1b\x80\xad\x36\x6c\x0d\xa6\x85\x86\xce\xca\xf7\xe4\x74\x0c\x20\x43\x82\xd5\xaf\x26\x61\x92\x7f\x36\x50\x65\x97\x9a\xe6\x0e\x08\x67\x0e\x2c\x3d\xdd\x65\x26\xbc\xbb\xd4\xfc\x36\x20\xe5\x6c\x23\xb2\xcb\xcc\x74\x77\xa9\x21\x2d\x26\xe5\x7c\x6b\xb9\x26\x2b\x07\xc6\xb2\xb0\x23\xf4\xad\xe5\x5a\xac\x1c\x18\x26\xf7\x49\xb9\x81\xb5\x1c\x18\x30\x2b\xc3\xa2\x8b\xb5\x3b\x2c\xb5\xc6\x3d\xcc\xb3\x02\x3f\xf3\x85\x30\x44\x1e\x2c\x1b\xff\xfc\x34\x8c\xbc\x64\xf4\x36\xcc\xd2\x93\x38\x03\x8e\x47\x61\x46\x5b\x7e\xe6\x53\xab\xad\x97\x68\xdd\x02\x1d\xea\x7c\xc0\xc3\xcc\x48\xda\x08\xe5\x8d\xce\x6c\x06\x81\x99\x85\x18\xb1\x7c\x8b\xd4\x98\x29\x07\x49\xa4\xc9\xf6\x19\xfa\xba\x41\x13\x0b\xe7\x36\x12\xa2\xc4\xbf\x50\xb3\xa1\x53\x6b\x0e\xa9\x52\xa9\xe4\x45\x97\x10\xe1\x0f\x04\xe4\xab\x2a\x01\xd5\x22\xeb\xb6\xde\x72\x08\xd0\xbc\x2a\x1d\x8e\x5c\x78\x96\x5e\x96\x17\x9e\x0d\x60\x4c\x70\xd6\x80\xcd\x13\x9c\x6d\x1d\x95\xf3\x74\xe4\xf9\x30\x79\x8e\x1d\x30\x8e\xb1\xa4\xed\x58\x5d\x85\x93\x20\x82\xec\x2e\xd4\x21\xcb\x6a\x38\x35\xa5\x27\x2f\x33\x9b\x4b\x31\x59\xc2\xea\x96\x65\x74\x0b\xe1\xec\xa2\x0d\x24\x8b\xef\xf7\x3b\xbf\xb5\x4b\x1d\xdf\xec\x27\xb2\x5d\x38\x8a\xed\x5a\x9c\x49\x50\xd1\x19\x6c\x57\xb8\xeb\xed\x2a\xc7\xab\xdd\x85\xcf\x55\x94\x42\x76\x95\x33\xd5\xae\xf3\x30\x35\xdf\x14\xee\x88\xde\x84\xd3\xc9\x65\x19\x2c\x02\x18\x6c\xb5\x28\xbb\x31\xd7\x26\x48\x61\x53\x83\x71\x1c\x15\x33\x28\x30\x25\x20\xa5\x72\xed\x02\x3c\xba\xcd\x20\xe8\xe7\x73\x83\x48\x68\x3d\x93\xd6\x18\x9a\xf0\x55\xb1\x8b\x82\x9f\xb7\xf4\xf6\x1f\xc9\x16\x71\xc3\x7a\xe5\xda\x43\x37\x1e\xfa\x62\x4b\xf3\x51\xa9\x5c\x83\x67\xe7\x0d\xfc\xfb\x25\xcf\xd6\x7e\x6b\xc0\x69\x14\xc3\xa9\x5c\x57\x7f\xaa\xdc\x54\xa9\x3b\xf9\xff\x25\x0f\x5f\xaa\xd5\xea\xff\xcf\xde\xf7\x2f\xc7\x6d\x23\x0d\xfe\x1d\x3f\x05\x76\xab\xd6\x1e\x45\x23\x89\x00\x7f\x81\xb6\x95\xbb\x44\xb1\x3f\xe7\x62\xc7\x2e\x5b\x7b\xf1\x57\x2e\x7b\x17\x24\x41\x0d\xe3\xd1\x8c\xbe\x19\xca\x1a\xed\xc6\x5b\xf7\x1a\xf7\x7a\xf7\x24\x57\x68\x80\x24\x48\x02\xe0\x8c\x2c\xe7\xdb\xec\x5a\x5b\xeb\xcc\x0c\x1b\xdd\x8d\xfe\x85\x26\x7e\x34\x1e\xd8\xb0\xf9\xa3\xd8\x04\xa2\x7f\x08\x8c\x2d\x6b\x16\x5c\xc1\x38\xae\xbb\x80\x01\x78\xbb\xde\xbb\x3b\xf9\x07\x30\x67\xc7\x18\x6e\x23\x33\x21\xb4\x5f\x5b\x54\x16\x5c\x90\x4a\x6c\xa6\x0b\x23\xa6\xcd\xc3\x87\x0b\xe0\x6a\xf3\xcd\x37\xdf\x4c\x7c\x72\xb0\xd0\x99\x92\x1f\x9c\xbb\x61\xea\xcd\x30\xf2\x1e\xb8\xed\x36\xc3\x58\x6f\xfb\x51\xfb\x5b\x60\xcf\x53\xfd\xb9\x5a\xca\xc8\x34\x44\x63\xb9\x9f\xc7\x02\x7d\xd3\x8b\x79\x94\x67\xb4\x3b\x59\xea\x09\xbc\xc9\x3d\xc5\xe2\x3d\xc3\x2e\x1c\x7b\xab\xab\x9a\x5b\xd3\x76\x9b\xe1\xe4\x60\x6f\xab\x4d\x0d\xb0\xdd\x56\xa5\x5a\x39\x4f\x9e\x7d\x7b\xf2\x1b\xa8\xc6\xd1\xfc\x3d\xbf\x86\xa6\x6b\x9e\xad\x78\x65\xb9\x3b\xc9\xa2\x50\xb8\x72\xf0\x16\x15\x2a\x2f\x32\x6c\x54\xf3\xe4\x9c\x65\xad\x7a\xf4\x2d\x56\x06\x0d\x75\x80\x87\x5a\x3a\x67\x99\x41\x53\x5f\x7d\x94\xeb\xc0\x96\xad\x51\x35\xa4\xf9\x76\xa2\x8f\x6f\xa7\x71\xfc\x65\x8b\xd3\xbf\xc2\x91\x95\xcf\xbd\x74\xdf\x2b\xac\xa6\x11\xb6\x96\x4c\x7b\xf5\xe4\xdb\x03\xbc\xc5\x4a\xc6\xf0\xae\xea\xdb\x5c\xbf\x38\x86\xd3\xa7\xed\x12\x46\xb9\x28\xab\x89\xa1\x00\x55\x77\x49\x83\x17\x59\xce\x52\x9a\x18\x6a\x33\x79\x9b\x84\xa6\x2c\xcf\x0a\xde\x59\xe3\x30\x01\x66\x7e\x4e\x38\x2e\xbc\xee\xb3\x4f\x5f\x02\xb1\x65\xe8\xe6\xe4\x7b\x38\x83\x3e\x40\xb0\xcd\xdc\xb3\x79\xba\x58\x3c\x4a\xcd\x93\xc5\x90\x30\x9a\xa7\x8a\xe1\x75\xd5\x3c\x51\x2c\x1e\xf1\x66\x9a\x78\xc0\xa9\x75\x9e\xd8\x3a\x27\x6c\x79\x5b\x80\x79\x1f\x24\x4f\x98\x5a\x6a\xc1\xfc\x2c\x13\xff\x6e\x09\x8c\xee\xd9\xd3\xfa\xaf\x9e\x50\x32\x23\xaa\xcf\x39\xfc\xfc\xa6\x44\x07\xc8\x7f\x8b\xde\xa9\x8f\xb4\xfd\x88\x03\xed\x73\x64\xbb\x3b\x52\xb1\x34\x59\xc0\xe1\x58\xf9\x6e\x09\xaf\x0f\x3e\x36\x97\xa9\x31\xbf\x09\xc1\xd4\xd2\x84\x09\x24\x21\x20\x61\xf2\x4d\x26\x86\x03\xb2\x1c\xed\x03\x21\xdb\x44\x23\x7a\x88\x88\x67\x95\x1a\x4c\x9b\x4d\x26\x29\xba\x8b\x32\x99\xe7\x8a\x8f\x39\x60\xf6\x36\x21\x93\xab\xb0\x23\x53\x7c\xe8\x21\x0a\xc6\x48\xa4\xe8\x1d\xca\xd0\x3b\x94\x4b\xcc\x11\xcf\x13\x9e\x32\x53\xd1\xa1\x1e\xe6\x68\x07\xe6\x25\xef\xe2\x53\xa6\x7a\x71\x80\xbc\x4d\xec\xf1\x20\xf0\x49\x60\xa7\x75\xf4\x75\x43\x8e\x7a\x7b\xe8\xeb\xa3\xad\xfb\x22\xf0\xfb\x61\x92\xfb\x9c\xf4\x67\x79\x90\x45\xa5\xc2\x5f\x72\xd3\x74\x1f\x3a\x46\x99\x69\x8a\x0f\x01\xc9\x87\x0f\x91\xef\xa9\x5e\x82\xfa\x8d\x77\x8b\xa2\x63\x64\xe2\x83\x6d\x77\x5a\x6b\xab\xc9\x40\x35\x89\x56\x4f\xb6\xb1\xfe\x09\x6f\xd4\x99\x08\x84\x09\xc3\x41\xe5\x13\xd4\x99\x04\x84\xc9\xc2\xcc\x0c\xe3\xeb\x13\x85\xb9\x19\x26\xd0\x27\x09\x79\x1f\xe6\xcb\x04\xdf\x3f\xeb\x04\x9f\xc8\x85\x0f\x8b\xf9\x72\xb9\xd2\xe7\xdc\x8e\x60\xa0\x56\x7f\x9f\x44\x04\x6a\x21\xb4\x98\x47\xe6\xe9\x06\xd3\x74\x9f\x69\x86\x6e\xc7\x79\x20\xe3\x74\xdd\xef\x71\x36\xe8\xcb\x14\xc2\x60\x32\x40\xa4\xcf\x3b\xcd\x1e\x40\x03\xd7\xc4\x41\x37\x21\xef\xce\x19\x88\x67\x5f\xa6\x0b\x6e\x75\xba\x00\xf4\xb1\xc5\x4c\x81\x59\x2d\xed\x24\x81\x52\x8d\xfd\xd8\x94\x00\xb0\x4f\x0b\xd0\xdf\x75\x81\x8d\xf5\x8c\x91\x30\xfa\xdc\xb5\x31\x14\x95\x7f\x9f\xe9\x83\xc1\xf4\x80\xfe\x0e\x4f\xc2\xa8\xf3\x16\xaf\x9d\xc2\xee\xcf\x0a\x10\x12\x6c\x37\x2f\x20\x00\x3b\x38\xe1\xbb\x44\xfe\x9b\xce\x0d\x64\xd8\x0b\x13\x9e\x53\xf1\xca\xef\x47\x71\x96\x87\x5e\x0c\x9f\xbd\xd8\xcb\x73\x0c\x9f\x8b\xd8\xe3\x61\xe2\x9b\xe7\x0c\x8a\x22\xf3\xbc\xd4\x87\xc9\x85\x88\x86\x14\x87\x58\x7e\x0e\x8a\x84\x16\x0c\x10\xa4\xbc\x60\x41\xc1\x82\x1d\xa6\x0b\xb6\xca\x3c\xb5\xb0\xaf\x44\xa7\xb5\x74\x9c\xa2\x85\x88\xda\xa4\x33\x07\xc7\xc3\xe4\xc5\xb2\xb0\xf4\x65\x88\x1e\x19\x71\x09\x09\x76\x1d\xa4\x45\x93\x91\x61\xba\xe3\x1d\x83\x81\x9a\x10\xf3\x21\xf6\x2f\x43\xf5\x27\x0c\xd5\x42\x2b\xdb\x0d\xd6\x46\xe5\x74\x86\x6b\xa9\x20\xe7\x80\x4d\x48\xff\xa8\xb3\x76\xae\x59\x0d\x47\xf7\xe3\x44\x0c\xe0\xc9\x97\x79\xfd\xff\x9e\x81\xf9\xf7\x77\x2c\xef\x07\x79\x89\x43\xf9\xb7\xe6\x54\x2e\x5a\x2d\x2f\x17\x39\xca\xba\xe7\xf5\xb4\x1e\x3c\xe9\x5f\x9d\xf2\x63\x77\x19\xa0\x9e\xa8\xe5\x2d\x0e\xf9\xc4\x94\xc1\x20\x7d\x49\xb9\x5c\xbf\x58\x95\xe7\x7c\xb2\x30\x0e\x63\xeb\xff\x5a\x55\x3f\xd5\xef\xf9\xe2\xcb\x64\xd1\x7f\xcf\x6c\x26\x82\xa5\x3a\xd1\x31\x22\x0f\xea\xcf\x0f\x8f\x25\x86\xfa\x07\xc7\xdc\xf0\x1f\x26\x0b\xf4\x27\x05\xb6\x67\x9d\x2f\x54\x3e\x5a\xb0\xf9\x9a\x8f\xef\x0a\xec\xcf\x8f\xd5\xef\xe3\xab\xcb\xee\x1b\xae\x41\x2c\x67\xbc\x7a\xbc\x62\xf0\x99\xcd\xbf\x2b\xab\xb5\x41\x40\xcd\x12\xfe\x02\x1d\xa0\xc9\x02\x2a\x7b\xee\xa1\xaf\x3b\x93\x1f\xfd\x99\x2c\x8d\x56\x3d\x4b\xad\x57\x66\x87\xdf\x40\x21\xbd\xfa\x3d\x57\xb3\x72\xce\xd1\x44\x3d\x7b\x88\xd4\x96\xcc\xbe\x14\x5b\x6d\x5a\x05\xdd\xa0\xa0\x56\x29\x3f\x79\x23\x81\xa0\xec\xe8\x40\x10\x60\x0b\x17\xcb\xab\xc9\x62\x8a\x30\x3a\x42\x64\x6f\x8b\x8a\xed\x08\x6e\x42\xd9\x05\xad\xbf\x67\x2c\x9e\x2d\x51\xec\xef\x8f\x4c\x85\x2e\x3a\x10\x75\x86\x34\x69\x71\xde\x7c\x8d\x4d\x24\xde\xdb\x65\xd3\xc3\x0c\xfd\xb3\xaf\xb4\x3d\x39\x5c\xcf\xcb\x8c\x4f\xbc\xbd\x2f\xab\x5e\x5b\xaf\x7a\x0d\x1e\x15\xf0\x28\x34\x3d\x3a\x83\x47\x83\x05\x23\xc8\x59\xe0\x51\xfc\xc9\xcb\x68\x91\xa3\xd6\xfd\x6f\xbd\x8c\x76\xc6\xce\xcf\x99\xb7\x69\x16\xd3\xf0\x40\x28\x43\x68\xd8\x68\x3c\xa9\x5b\x3e\x7c\x88\x88\x5c\xf4\xaa\x7f\xf9\xe6\x9b\x6f\x50\xbc\xb7\x87\xd0\x3b\x33\xa6\xee\x5f\x07\x13\x0e\x06\x98\x30\xdd\xdb\xdb\x0e\x53\xb7\x9d\x6f\x0c\x2f\x9d\x9e\xe0\xb6\xdf\xc6\x4d\xf2\x5d\x60\xad\xdb\x58\x32\xab\x75\x1b\x6f\xea\x7a\xd3\x5b\x32\xdb\xc5\xe4\x0f\x31\x25\x3b\x76\xbb\x6e\x67\xbe\x93\x00\xb5\x86\xa3\x94\xb8\xaf\x7a\x0e\x45\x7e\x55\x0f\xf7\x9d\x0b\xa6\xb6\xd5\xcf\x0c\x4e\x35\x4e\x38\xba\x8b\x0a\xd8\xec\xf6\x0f\xf1\xf1\xcc\x76\x85\xcb\x39\x83\x0a\x73\x0c\xdd\x45\x29\x80\x33\xb9\x3a\xf8\x0e\xa9\x75\x42\x13\xff\x90\xac\x94\x67\x82\xf1\x66\xa9\x55\x2d\xb6\xa9\xb5\x56\xb9\xf5\x4f\x3e\xc1\x89\xf6\x04\xfb\x9d\x47\x9d\x46\xe6\xb1\xad\x21\x06\xf7\xd4\x4c\x38\xd8\xb8\xac\x9c\xcc\xa1\x5d\xa4\x30\xca\x27\x58\x7b\x82\xb1\xfe\x28\x96\x3b\x5b\xe5\x23\x12\x9a\x47\x3c\x58\x40\x16\x94\x66\x68\xbf\x26\xbb\x2f\x84\xba\x2f\x2f\x7a\xb3\x2e\x1e\x43\x43\x82\x8e\x6b\xc1\xec\x0b\xd1\x9a\x28\x88\xc0\x75\x66\x40\x20\x62\x5d\xbf\x4e\xbb\xf8\x13\xe1\xd1\x94\x7e\x41\xed\x4c\xb8\x2d\x01\x9b\x96\xf9\xd0\xc8\x12\x69\xbf\xda\x3a\x1a\x59\x0e\x9d\x54\x42\x10\x15\x31\xd1\xfa\x77\x59\x1a\x95\x30\xa1\x82\x81\x92\xe1\x85\x19\x26\x52\x30\x50\x12\xfc\xcc\x0c\x13\x2b\x18\xf0\xf9\xd9\x97\x65\xd8\x2f\xcb\xb0\x5f\x96\x61\x87\xd9\xe6\x97\x65\xd8\x7f\xca\x39\xde\x30\xda\x79\x8e\x37\x8c\x46\xe7\x78\xf5\x77\xb6\xe1\x1c\x6f\x18\x7d\x99\xe3\xbd\xf5\x39\xde\x30\xda\x76\x8e\xd7\xa4\x9c\xee\x1c\x2f\x28\xc8\xbd\x69\xbb\x59\x3b\x33\x2f\xcd\x52\xef\x77\xbd\x34\xbb\x89\x82\xdf\xe4\xe2\x82\x86\xce\x97\x59\xe0\xee\x2c\xf0\x26\x82\x35\xd5\xc3\x4d\x14\x68\xbf\xbf\x8e\x02\x55\xa5\x1b\x20\x0e\xb5\x3a\xd1\x3b\xd5\x74\xd3\xfa\xf7\xf2\xc9\xf3\xbf\x3c\x7f\xfc\xf8\xd5\xa3\xd3\x57\xfd\xd9\xe2\x17\x3f\xfc\xe5\x87\x9f\xbe\x7f\xf4\xfa\xd1\xf0\x56\xee\x97\xcf\xff\xfc\xd3\xf7\x7f\x39\x79\xfe\xd3\xab\xd3\x6f\x7f\x6a\x5a\x6a\xe4\xe4\xb4\xf2\xc9\x76\xd3\xca\x5a\x8b\xd5\x6c\x59\x17\x6d\xe9\xcd\x49\xd7\xa4\xc5\xdb\x35\x9e\xa2\x6b\x5b\xa9\xf2\x4a\x4e\x89\x54\xe8\x21\x22\xc1\x03\x54\x19\xa6\x44\xb4\x3e\xbf\xd9\xa0\x7d\x14\xa2\xaf\xd1\xb5\x3c\x3d\x58\xd5\x87\x34\xe1\x13\xd9\x83\x99\x4a\xf4\x27\x14\x0d\x72\x11\x48\x03\xf9\xd5\x6b\x74\x8c\xae\xd1\x9f\x50\x68\xca\x12\xf9\xd5\x7f\x0a\xac\x04\x7d\x8d\x04\x1d\x5f\xd0\xd9\x33\x00\x6f\xe4\xb4\xdc\xeb\xde\xcf\xd7\xf2\xe7\xff\xb4\x4c\x05\x6b\x62\xbb\x28\x51\x09\xd7\x09\x18\x84\xd6\x48\x66\x23\x25\xb3\x91\x07\x34\x37\x06\xc1\x34\xa0\x52\xba\xe8\x5a\x82\x5e\x5b\xa6\x95\x5a\x03\xe9\x8a\xf1\x1a\x2e\xf8\x19\xf6\x5a\xc8\xb5\xdf\xf5\x8f\xa3\x7d\xeb\xad\x72\x74\xad\xe1\xe9\xe3\x57\x2f\x05\xaf\x1b\x0f\x9b\x8c\x41\xbf\x77\xc2\x32\x3f\x26\xc0\x80\x44\x6d\xac\xcf\xd6\x57\x3d\xdb\x32\x82\x3d\xad\xc1\x2c\x22\x54\x37\x4f\xfc\x82\x1e\xa2\xf8\x01\xfa\xc5\x31\x33\x07\x7d\x80\xa3\xa9\xe6\xaa\x28\x35\xf9\xb4\xac\x5e\x2c\xd7\x50\xc7\x55\x58\x15\x5c\x96\xfb\xcb\x1e\x3a\x40\xa6\xdd\xd4\x35\x72\xbd\xd1\x43\xa4\xea\x45\x98\x80\xc5\xdf\xa0\x83\xef\x8e\x11\x90\xd1\xb0\x58\x68\x75\x77\x54\xeb\x54\xbf\x39\x06\xb2\xf6\xcd\xd5\x03\xca\xcf\x34\xca\x1d\x54\x07\x86\xf7\x9e\x86\x81\xed\xa6\x96\x34\xc3\x5a\xf0\x4d\x05\x06\x34\xa2\x16\x6a\xdf\x89\x7e\x74\x84\x5e\xac\xca\xf3\xb2\x2a\x3f\x70\x74\xb1\x9c\x5f\x2f\x96\xe7\x25\x9b\xa3\xe5\x07\xbe\x42\xff\xf1\x78\x42\xf6\xee\xa3\xcd\x3b\x8a\xf6\xd1\xe6\x5d\x04\xff\x86\xf0\x6f\x20\xc2\x8c\x19\xa5\xb2\x68\x49\x5e\x9e\x1f\x78\x87\xbc\x4d\xec\xd8\x32\x6f\x61\x4e\x61\x38\x36\xda\xc7\xc8\xa2\x57\x2f\xc0\xcb\x39\x3e\x35\xfc\xd4\x05\xc6\xfa\x3a\x9b\x0e\xec\x67\x6f\xd7\xd5\x94\x35\xf8\x4f\xc5\xcf\x2f\x96\x2b\xb6\xba\xee\xdc\x44\x27\x5c\xe0\x54\x1f\x88\xac\xab\x94\xc6\x5b\x67\xcc\xde\x7f\x6a\xec\xd9\x18\xdf\xbd\xb5\x1d\x7f\xbb\x95\x1d\xbf\xb3\xae\xe3\xbb\x56\x75\x6e\xff\x2a\x81\xe5\x65\x75\x71\x59\x3d\x85\x57\xeb\x0e\x2c\x82\x24\x3d\xe7\xeb\x72\xc5\x73\xed\xa2\x81\xb4\xac\xd6\x75\x41\x68\xd9\xb8\xf3\xb6\x50\x37\x7e\xbe\x98\xd7\x6a\xd2\x6a\x70\xb3\x15\xbf\x8f\x08\x09\xa6\x88\x84\xd1\x14\xf9\x34\x98\xa2\x10\x93\x7e\x63\x75\x67\xc1\x7d\xf1\x4c\x7f\xd4\xbf\xb4\xa0\x7e\x69\xb6\xde\x5b\xa0\xf7\xae\x87\xed\x06\xf7\x17\xc0\x4c\x2d\xdc\x84\x58\xbf\x7b\xd7\xdf\xde\xbc\xb5\x44\xfb\x2d\x4c\x4d\xfc\x01\x1e\x69\x72\x0b\x7e\xd5\x98\x1d\x2c\xc2\x8d\x95\x12\x00\x4e\x9a\xdb\x7a\x61\x04\x88\x3c\x0f\x1d\x20\x31\xd0\x36\x37\x25\xe8\x92\x10\xd9\x8b\x4f\x3e\xd7\x8a\x9e\x61\x62\xce\x20\x34\xe3\xe4\x59\xdd\x89\xa7\x6c\x01\x73\x3f\xbd\xae\x1d\x21\x62\x9a\x43\x4b\xd7\xcb\x55\x3a\x2e\xff\x1e\xfa\x4f\xa9\x24\xf8\x8c\x94\xa8\xbb\x28\x26\x64\x6d\x9d\x36\x7f\x46\xe0\x0e\xfa\x3e\xb8\x88\xf5\xae\x62\x16\xd6\x2b\xa8\x05\x79\x67\x3d\x41\xd2\x29\x24\x48\x6e\x52\x41\x90\x74\x4a\x07\x92\x9b\xd7\x0c\x54\x0c\xe3\x31\x8e\x71\x97\x65\x7c\x23\x9e\x71\x97\x69\xbc\x0b\xd7\x46\x3d\x48\xe3\x6a\xa6\x46\xca\x45\xb5\x94\xd6\x6c\xd6\xf4\x9c\xc1\x64\x5e\xed\xce\x06\x51\x08\x88\x43\xb8\x6f\xf6\xdd\x31\xc8\xc5\x06\x33\x5f\x5e\x21\x05\x33\xbe\x1a\xf1\x52\x0c\xb0\x6b\x8b\x0f\xc8\x44\x19\xfc\x40\x7e\x94\x49\x2f\x7c\xb6\xbb\xc0\xe9\x8c\x57\x6c\xf8\x64\x87\xb7\x06\x0d\xd9\xb3\x52\xbc\x82\xcc\x2f\xcf\x17\xd0\x39\x83\x5b\xd5\x12\xac\xd3\xec\x29\x6a\x33\x69\x23\xf0\x8e\xef\x24\x3a\x8d\x8e\x96\xda\x37\x14\x0b\x21\xf1\x57\xa7\x9e\x8d\xf6\x5c\xb0\x4f\x35\xd8\xf9\xf2\xca\x9a\x97\x5a\xa5\x75\x6a\xcc\x73\x4c\x3d\x39\x15\x5a\x38\x7d\xb3\xb1\xf1\x7e\xba\x91\xb6\x76\x0c\x3d\xb0\x03\x81\xb1\x1d\x03\xeb\xdb\xed\xbe\xb9\x99\x19\x38\xc2\x6a\xdb\xa3\x00\xba\x34\x11\x7a\x09\xe0\xf5\xd0\xb5\x58\x7e\xba\xc1\x2d\x38\xde\x06\x5c\xda\xd7\xe9\x06\xbb\xf4\xa8\x60\x9f\x36\xb0\xa0\x47\xa7\x79\xaf\x2f\x57\xe0\x51\xf2\x3a\x11\x61\xea\xe3\x56\x7e\xba\x09\x54\x2c\x40\x93\x89\xe2\xad\x39\x1a\xac\xe8\xab\xf3\xc1\xb6\xd7\x1b\xc0\xf6\xb4\xc1\x26\xa3\x86\xc4\xf6\xb4\x87\xed\xd9\x38\xb6\xdf\xd4\xa9\x3a\xa1\xd0\x61\x9f\xa8\x1f\x12\x2d\x66\x8a\x76\x7a\xdb\x7b\x39\x5b\xa2\x17\xa5\xc3\xb2\x05\xc9\xfa\xce\x47\xfc\x40\xfb\x2a\x53\xb9\xe6\xfb\x27\x9b\x7c\x47\x72\x0d\x5a\x97\x19\x0b\x20\x69\x41\x63\x01\xa9\x86\x7e\xda\x42\xdb\x43\x12\x0c\x16\xb3\xe5\x73\x99\xa5\x1c\x77\xe6\xc3\x74\xbe\xac\x9d\x7d\xb9\x84\x44\xcf\x11\xe2\xc5\x0b\x74\x4b\x62\x74\xe2\x41\xf3\x95\x49\xdd\xe9\x87\x0f\x5b\x26\xc1\xb4\xeb\xfe\xc1\x55\x9a\x3e\x41\x07\xda\x73\x9b\xa1\xa3\xae\xeb\x34\x38\x8c\xc8\x9f\xed\x88\xbc\x3b\xe7\xd1\x76\x77\xab\x19\x8f\x7e\x97\x15\x57\x1a\x1a\x98\xed\x18\x32\x17\x05\x37\xee\xf9\xb3\x11\x1a\x4f\x77\xa4\xe1\x1a\xdb\x56\x6c\xb1\xbe\x58\xae\x9d\x56\x02\xe1\xf7\x45\xf9\x54\x3a\xc6\xe9\x1b\x6d\x42\xb1\xb5\x43\xeb\x98\x27\x1b\x6e\x33\xf0\x29\xc8\xb1\xd1\xcf\x1a\x3f\x2e\x4a\xc4\x2a\x18\x02\x21\x5e\x9a\x73\xc2\x53\x0f\xfa\x60\x2c\xda\xda\xbc\x1c\x79\x4d\x00\x30\xc2\x9d\x7a\x75\x77\x24\xb4\xcd\xe5\x4f\xbd\xba\x33\x0a\xce\x32\x6e\x1d\x1d\xa1\x93\x99\x2b\xf8\x6d\x3f\xac\xdf\x70\xc8\x18\x0f\x8d\x48\x0b\x5f\x75\x1c\x6e\xc6\x95\x11\xe3\xde\x2d\xa4\xd6\xad\x4e\x1b\x83\xdb\xbe\xc9\x06\x37\x8d\x26\x5a\x12\xb2\xb7\xcd\x00\x28\x11\x90\x1e\x02\x32\x40\xe0\x94\xa2\xc8\x3d\x56\xcb\x2b\x87\x10\xe7\x9a\x37\x9c\xb6\xae\xf1\x0e\x4d\xfe\xa1\xd8\x97\x3f\xdc\xad\x99\x81\xaf\xae\xf8\x31\xd7\xbc\xe6\xb4\x75\x21\x1d\x23\xfc\xd0\x62\x9c\x2f\xaf\x3e\x7d\x82\xf6\x87\xa5\xe9\x8d\x64\xa0\x6f\xab\xa7\x75\xa6\x21\xc5\xf8\xd6\x9b\xcc\x84\xe7\xa3\x2f\x6d\x1d\x2c\x36\x47\xec\xe4\x2b\xdd\x16\xc2\x25\x1d\x8b\x1d\xff\x5c\xdb\xa2\x0c\x93\x34\xb7\xbe\x2b\x6a\x00\xdf\xcc\xf8\x88\x76\xc3\x69\xa0\xaf\x61\xf2\x6a\x38\x0f\x74\xd3\xbd\x54\xf8\x26\x5b\xa9\x60\x93\x54\xc6\xcb\x79\x77\xbf\x13\xde\x43\x47\x5d\xfe\xf7\xd0\xd7\xfd\x1f\x80\x38\x2c\xd0\x34\xbb\xb9\xfe\x49\x36\x41\x7d\xf2\x1c\x9e\x3e\xcd\x58\x33\x6f\x9c\x83\x44\x47\x46\xd5\xeb\x20\xf5\x2c\xe0\x10\xe7\x91\x71\x33\xdd\xab\xff\xba\xe4\xfc\x6f\x7c\x88\x74\xc6\xd6\xb3\xda\xb8\xb7\xba\x8b\x7e\xc0\xc5\xa7\x4c\x16\x8e\xcf\x09\x6d\x9f\xd2\xdb\xd2\xf9\xdd\xe7\x10\x5b\x7a\xf6\x59\x39\x2d\x35\x54\x13\x73\x7a\xc2\xb9\xd3\xdc\x9c\x86\x4a\x4d\xcf\xe9\xa8\x6e\x3a\xaf\xd8\x8a\xc2\xdd\x89\xa7\x83\x4e\x3c\xbd\x69\x27\x9e\x0e\x3a\xf1\x74\xb7\x4e\x98\x55\x25\x4d\x57\x39\x59\xb5\x44\x2b\x5e\xad\x4a\xfe\x81\x1b\x36\x20\x22\x75\xb8\x5b\xc6\x83\x8b\xcb\xf5\xac\x66\xc3\x24\x22\x03\xe4\xb3\x21\xe4\xa7\x97\x27\x36\x9c\x1e\x6a\x48\x4f\x87\x2e\x6c\x3d\x4f\x74\x4b\xbb\x26\xed\xf1\x4b\x6d\xa1\x34\x84\xb3\xe6\xb0\xd3\x16\x11\x62\xcb\xc5\x9c\xfa\x63\xbb\x3f\xd3\x29\xf6\x2f\xdb\x35\x6f\xb8\x5d\xd3\xdf\x75\xb3\xa6\x3f\xb6\x55\xd3\x77\x6c\xd4\xf4\xbf\x6c\xd3\xbc\xed\x6d\x9a\xfe\x96\x9b\x34\x0d\x6a\xe9\x6c\xd1\xf4\xb7\xd9\xa0\xe9\xdb\x8f\xe1\x37\x1b\x0f\xef\xd3\xe0\xe3\xdb\x29\xc5\xff\x22\xdb\x35\xfb\x05\x76\x42\x4c\x7e\xb3\x3d\x9c\x75\xb9\x1d\x41\xf3\xf7\x55\x6e\xe7\x46\xbb\x2d\xd5\xe3\x76\xb7\x67\x0d\xb3\x53\x41\x9e\x10\x93\xce\xb6\x90\x10\x13\xeb\x36\x13\xba\x65\x41\x1e\x01\xd8\xd9\x6a\x42\x55\x55\x8b\x10\x93\x5b\x3b\x42\xac\x77\xdf\x5a\x93\x67\xb0\xc9\xc1\xdb\x64\x69\x9a\x26\x79\x98\x4f\xb5\x82\x3d\x7b\x53\x13\x64\x44\x12\x46\x12\xc2\xf4\x72\x3e\x7b\x86\xba\x3d\x86\xa6\x09\x0e\x13\x0f\x87\x4c\xaf\xfe\x63\x26\x82\x43\x52\xf0\x4c\xd6\x0c\xaa\x6b\x03\x6d\x49\x24\x8a\x7d\x9f\x44\x91\x2c\x2b\xa4\x2a\x07\x99\x89\x50\x9e\x06\x01\xa3\xb1\x5e\x57\x68\x4b\x22\x79\xea\x65\x84\x7b\xb9\x5e\x86\xc8\x4c\x24\x88\xd3\x30\xa0\x38\xd7\x8b\x14\xf5\x52\xd3\xdb\xae\x52\x24\xec\xe9\x86\x55\x8a\x70\xf4\xa5\x4c\xd1\x2d\xe5\x44\x74\xe7\x32\x45\xa2\xc9\x58\x5e\xa4\xc7\x8c\x61\x66\x44\xbf\x94\x29\xba\xfd\xdc\x88\x6e\x5b\xa6\xc8\xa8\x9c\x6e\x7e\x44\x47\xcb\x14\xf9\xd4\x5d\xa6\x48\x0c\xe3\xf7\x29\x31\x65\x4b\xe4\x5f\x24\x5b\xfa\x97\x3e\xdc\x72\xbb\x07\x5b\x3e\xd3\x91\x95\x9b\x27\x51\xf2\x51\xd3\x5d\x85\xe8\x2f\xf5\x0e\x5e\xc3\x5d\x37\xdd\x4d\xbe\x87\xec\xe2\x62\x7e\x3d\x51\x3f\x4e\x11\x5b\x9d\x5d\x9e\xf3\x45\xb5\xee\xdf\xc9\xa3\x1f\x9f\x69\xf9\x81\x52\x4a\x2d\x89\x1e\x79\x6f\x13\x10\xca\x48\x91\x40\x5e\x91\xc7\x84\x32\x4e\xc8\xde\x74\x08\x17\x63\x3f\x0e\x82\x04\xca\x0c\x12\x9f\x17\x51\x98\xe5\x7a\x6a\x30\x68\x90\x86\x99\x57\xa4\x59\x01\x17\x20\x64\x41\xee\xa7\xa4\x30\x21\xe6\x49\x1a\xe6\x29\x0b\xe1\xf6\x6c\x4c\x93\x3c\x4d\x33\x27\x62\x3f\x09\xa3\x8c\x84\x29\xa4\x33\x7e\x40\xd3\xd0\xa7\x26\xc4\x61\x52\x60\x8c\x0b\xe0\x38\x8d\xbc\x30\xf7\x70\xe2\x44\x9c\x10\xbf\xa0\x84\xc1\x95\xdb\xac\xc0\x49\x50\x24\xa9\x09\x31\x4b\x71\x16\xf2\x1c\x38\xce\x59\x94\x53\x8c\xa9\x13\x71\x4e\xbd\x98\x31\x29\x63\xe6\x7b\xbe\x47\x02\xa3\x8c\x31\xa1\x7e\x98\xca\x3b\x23\x82\x30\xf6\xa2\x22\xe5\x4e\xc4\x24\xf0\x31\x0d\x53\xb8\x3b\x22\xe0\x3c\x48\x09\xcd\x8c\xa2\x08\xbd\x2c\xce\x33\xb8\x40\x3c\x0f\x8b\x22\x0d\x38\x71\x22\x8e\x49\xca\xc3\x3c\x06\x51\x14\x24\x4e\x69\x12\x19\x95\x47\xbd\x9c\xa7\x58\x5e\x5e\xe1\xa7\x38\x4a\xa2\x14\xbb\x65\x9c\xe6\x99\x17\xc9\x0a\x95\x24\xcc\x62\x4c\xfc\xd0\x84\x38\xc3\x49\x5a\x60\xc9\x40\x56\x44\x09\x89\x92\xc0\x89\x98\x07\x49\x1a\x25\x19\xc8\x2e\xe1\x05\x0e\x58\x6e\x94\x31\x2f\x52\x1e\xc4\x14\xae\x11\xf7\x69\x50\x90\x90\xfb\x4e\xc4\x5e\x91\xe1\x24\xcf\xa0\x01\x4d\x69\x96\x87\xa9\x91\x63\x12\x78\x19\xc3\x59\x06\x97\xb4\xc7\x2c\x4b\xb2\x28\x74\x2b\x2f\xe7\x09\xc9\x22\x70\x90\x30\x21\xa9\x47\x62\x23\xe2\x80\xc5\x01\x0d\x18\xbc\x23\x44\x9c\x45\x3c\xa0\x6e\x8e\xc3\x2c\xf5\x58\x92\x03\x27\x69\x1e\xe0\x22\xcd\x03\xa3\x4b\x47\x45\x42\x69\x0e\x88\xa9\x8f\x71\xe8\xa7\x6e\x8e\x13\xea\xf3\x10\x87\x04\x5c\x9a\x47\x51\x5e\x30\xb3\x83\x50\x1f\x67\x51\x04\x19\x3e\xc9\xd3\xc0\x27\xd8\x73\xc7\x0a\xcf\xf3\x49\x9c\x51\x79\xe7\x7b\x91\x12\xec\x1b\xcd\x2d\x2d\xc2\x24\x2e\x32\x55\xdf\x94\x17\x1e\xe7\x6e\xab\xc8\x22\xee\x79\x69\x01\x86\xef\xe7\x8c\xd2\x22\x33\x5a\x45\x1e\xb2\x38\xc1\x01\x20\x4e\x7c\x8f\xb1\x98\xb8\x45\xe1\x45\x19\x8b\xfc\x50\x5e\xef\xe2\x79\x3e\x25\x66\x07\xc1\x01\x49\x48\x22\xdf\xbd\x3c\xe6\xf1\x88\xc7\x6e\x51\x90\x38\x8d\x3d\x46\x21\xb8\x04\x51\x4e\x48\x51\x18\x5d\x9a\x70\x2c\xc4\x04\x22\x0b\x33\x12\x65\x09\x89\x9c\x88\x83\x9c\x64\x51\x5e\x80\x55\x84\x2c\x0b\x08\xe3\xb9\x31\x56\xf8\x3e\xf5\x72\x0c\x22\x4b\xf2\x24\x4c\xfd\xbc\x70\x22\x8e\x42\x8f\xc5\x7e\x18\x48\x07\x61\x45\xe4\xe7\xdc\x6c\x6e\x11\xf3\x58\x0a\x71\xdb\xcf\xe2\x38\x25\xcc\x1d\x36\x29\xce\x48\x96\x10\x19\xdd\x62\x9e\x33\xce\x23\x13\xe2\x84\xc4\x84\x64\x52\x64\x38\xa0\xc4\x0f\xfd\xd4\x89\x98\x91\xb4\xe0\x94\xc9\x38\x9b\x15\xd8\xf3\x23\xa3\x83\x30\x8a\x59\x14\x05\xc0\x71\x9a\x05\xc4\xf7\x3c\x77\x74\xcb\x48\x90\xd2\x34\xf6\x20\xce\x7a\x05\x4d\xe2\x04\x1b\xa3\x5b\x1c\x65\x21\x66\x20\x63\x2f\x0a\x83\x94\xfb\x6e\xab\xc8\x71\x42\x38\xc5\x09\x20\x8e\x78\x11\x12\x6c\x1c\xf3\xf2\x28\x49\xbc\x88\x80\x2e\xc2\x30\x0a\x59\x32\xe2\x79\x45\xe0\x71\x3f\x94\xb2\x0b\xe3\x18\x13\x8f\x30\xa3\x1d\x7b\x11\x63\x9e\xec\x99\x4f\xd2\x34\xc7\xa9\x5b\x79\x38\x61\x41\x86\x31\x84\xcd\x94\xe6\x24\xf7\x32\x23\xc7\x98\xfb\x71\x94\x79\xd2\x8e\x71\x80\x59\x1a\xba\xa3\x1b\x89\x03\x1a\xc7\x01\xd8\x71\x5e\x50\xce\xd3\x24\x31\x21\xf6\x83\xd4\x4b\xb3\x14\x7a\xc6\x71\x92\x06\x74\xc4\xdc\xfc\x04\x67\x5e\x96\x82\x52\xb2\x30\x4b\x42\x16\xf9\xc6\x78\xcc\x73\xca\x58\x00\x61\x93\xfb\x01\xa6\x2c\x73\x9b\x5b\x98\x26\x59\xc6\x82\x42\x8e\x0c\x91\xcf\xfd\xd8\x88\x38\xa2\x84\x47\x85\x0c\x56\x79\x94\x92\x94\x32\xb7\x28\xe2\x80\x16\x94\x70\x70\x90\x30\xe7\x45\x4a\xcc\xb1\x22\xa6\x2c\x8c\x7c\x39\xd2\x04\x3e\x8e\x49\x11\xb9\xad\x82\x06\x19\x8d\x29\x96\x99\x10\x2e\x3c\x96\xc6\xc6\xb0\x49\xb3\x2c\xf6\x88\x54\x1e\x66\x51\xe0\x27\xdc\x9d\xbb\x25\x5e\xca\x8b\xa2\x60\x32\x8b\x8c\x7c\xcc\x89\xd1\x2a\x58\x10\x7a\x51\xc6\xc1\xf3\x72\x4e\x49\x9a\x73\x77\xee\x96\xf2\x22\x61\x7e\x21\x47\x06\x92\x45\x71\x82\xcd\x79\x45\x14\xe3\x98\x16\x72\x08\xf3\x63\x12\xfa\xc4\xad\xbc\x8c\x91\xd8\xe7\x19\xc8\x98\x33\x12\x45\x38\x31\xca\x38\xc7\x34\x4a\xa9\x1c\x9a\x88\x30\x24\xd2\x9d\x04\x1c\x26\x22\x2c\x67\x71\x9e\x83\x83\x64\x39\xf7\x78\x8a\x8d\x61\xb3\x08\xe3\x3c\x28\xe2\x42\x0d\xba\x3c\xc7\xb1\xdb\x8e\xbd\xa8\xf0\xa2\x58\xe6\x0b\x31\xc1\x71\x54\xa4\x46\x97\xf6\x58\xe4\xc7\x79\x06\x0e\xc2\x48\x46\x13\xca\xdc\x23\x08\xc6\x7e\x91\x50\x2f\x50\x13\x77\x89\x97\x33\x23\xc7\x38\x8d\xb1\x97\xfa\x32\x1e\xfb\x38\x0b\x62\xec\x96\x31\xa1\x79\x1a\xc7\x45\x28\xad\xc2\x0b\xe2\x9c\x1a\xe3\xb1\x4f\x32\xc6\xd2\x18\xac\x22\xf0\xb2\x98\x04\x89\xdb\x41\xfc\x2c\xe1\x29\xf7\x40\x14\x38\xcc\x92\x94\xa7\x46\xe5\x05\x3e\xce\xa3\x38\x83\x9e\x25\x19\xf6\xbc\x3c\x70\xdb\x71\x90\x65\x61\x1e\xc8\xc4\x3b\x4b\x7d\x1e\x90\xd4\x38\x34\x89\x74\x85\x24\x09\x04\xab\x22\x8b\xc2\x98\x8b\xf0\xea\x8a\x15\x45\x96\x46\x05\x93\x83\x24\xcb\xa3\x82\x71\x23\xc7\x51\x16\x04\x38\xa1\x80\x38\x60\x41\x1c\x52\x1c\xab\x49\xd4\xb7\x8e\x63\xab\xed\x7b\xe1\xcf\x37\x3d\xa1\x6a\xbb\x06\xed\xe7\xce\x09\xd5\xbf\xdc\xec\x84\x6a\x88\xc9\x76\x4b\x07\x86\xe5\x88\xdb\xaf\x3e\x7a\xd3\xa5\x83\x88\x79\x09\xaf\x27\xdc\xfd\x34\xcb\x12\xcf\xb2\x74\x90\xa6\x51\xcc\xb8\x1c\x7e\x69\x90\x31\x16\x77\x53\x17\x07\x11\x3f\x8b\x78\xe1\xc7\x10\xc9\x0a\x9e\x04\x05\x15\x91\xcc\x04\xc9\xc2\xa0\x28\x42\x1f\xbc\x20\x2c\x70\xee\x47\xc5\xb6\xb3\xfa\x21\xf6\x78\x48\x64\xf0\x61\x39\x8f\x28\xc9\x2d\x4b\x07\x49\xea\x85\x11\x95\x06\x49\x52\x9f\x47\x19\x2e\xb6\x24\x82\x0b\xea\xe7\x89\xb4\xf9\x22\x0d\x70\x9a\x47\x96\x9e\x84\x29\xf7\xb2\x5c\xa6\x41\xd8\x8f\x39\xc1\x71\xb2\xcb\xd2\xc1\x6d\x9f\x23\xdd\xa6\x34\x2c\xc0\x79\xf6\xca\xaf\x4f\xb0\xbd\xf4\xeb\x13\x62\xaf\xfd\xfa\xc4\xb7\x17\x7f\x7d\x12\xd8\xab\xbf\x3e\x09\xed\xe5\x5f\x9f\x44\xf6\xfa\xaf\x4f\x62\x4b\x01\x58\xd9\x41\x28\x0f\x6b\xdc\x07\x2e\x9f\xcf\xe5\xf3\xe1\x61\x0f\x29\x03\x68\x6e\x3c\x02\x25\x9f\xcf\xe5\x73\x4b\x73\x02\xcd\x89\xb5\x39\x99\xcb\xe7\x96\xe6\x3e\x34\xf7\xad\xcd\xfd\xb9\x7c\x6e\x69\x1e\x40\xf3\xc0\xda\x3c\x98\xcb\xe7\x96\xe6\x21\x34\x0f\xad\xcd\xc3\xb9\x7c\x6e\x69\x1e\x41\xf3\xc8\xda\x3c\x9a\xcb\xe7\x96\xe6\x31\x34\x8f\xad\xcd\xe3\xb9\x7c\x6e\xd8\xd6\xb7\x65\xd1\x63\x69\x19\x26\xe4\x4c\x1a\x45\xbf\xe2\x1e\x6c\xb9\x95\x06\x61\x6a\x95\x4a\x5b\x30\xb5\xca\xa4\x1d\x98\x5a\x65\xd2\x04\x4c\xad\x72\xa9\x7e\x53\xab\x5c\x6a\xde\xd4\x8a\x4b\xad\x9b\x5a\x71\xa9\x70\x53\xab\x42\x2a\xdb\xd4\xaa\x90\x7a\x36\xb5\x3a\x93\x3a\x36\xb5\x3a\x93\xea\x35\xb5\x9a\x49\xd5\x9a\x5a\xcd\xa4\x56\xe7\xa6\xba\x83\xae\xa3\xbb\x5b\x5e\x87\x6a\xad\xa7\x5d\xd3\xff\xb9\x94\xb5\x87\x6d\xc7\xcd\x1f\xc1\x08\x5e\x2f\x9f\x0d\x41\xb6\x28\x14\x2d\xc9\x08\x11\xfc\x5c\xd6\xa7\x0d\xf4\xaa\xd1\xe8\x6b\x44\xde\x02\xa4\xb9\x96\x6b\x8b\x63\x2e\x71\xa8\xf3\x05\x7d\x1c\x70\x6a\xfe\x46\x15\xa8\x8f\x8e\xd0\x7f\x40\x35\x62\x3b\xf1\xba\xa4\xf3\x4e\x15\xaa\x37\xb3\xa6\xce\xf1\x66\xec\x2c\x9e\x02\x9b\x6b\x2d\xdc\xe7\xf1\x24\xd4\xac\x53\x05\x7b\x26\x8b\xff\xea\xc5\xab\xe7\x50\xa2\xb8\x2e\x07\xdc\x81\xa3\x03\x38\xd8\xf4\xfa\x0e\x75\xc1\x62\xd7\x09\x53\x09\x39\xef\x70\x31\x1f\x72\x31\x33\x71\x31\x1f\x72\x31\xd3\xb9\xe8\xc2\xc5\x43\x38\x4b\x25\x63\x5d\xa5\x96\x9a\x39\x1f\xb4\xda\xdb\xbb\x14\xdf\x6e\x35\x8a\xb7\xd3\x28\x6e\x35\x8a\xb7\xd2\x28\x9e\x75\x0a\x7c\xcf\xea\x2a\xdc\x5a\x61\xee\xb9\xaa\xd5\xad\x09\x09\x2b\x09\x77\xc1\x60\x1f\x73\xa2\xa9\xb4\xc6\x17\x8d\xaa\x14\xcf\x3b\x6c\xcc\x0d\x6c\xcc\x4c\x6c\xcc\x07\x6c\xcc\x3a\x6c\x74\x11\x46\x03\x7c\x24\x72\xea\x74\xa7\xda\xe1\xae\x50\x12\xb7\x6a\x8f\x5d\x6a\xff\xb9\x8c\x65\xe4\x32\x0e\xcc\x3d\xc8\xb9\x82\x74\x9c\x09\x97\x90\x38\xd2\x02\x89\xf5\x56\xe8\x1a\x56\x32\x80\x8d\x99\x45\x1f\x76\x5e\xc3\x8e\xf2\xd0\x46\x9a\xb9\x10\x5a\x19\xf7\x47\xae\x2e\x78\x1b\xca\x66\x12\x7c\x06\x35\xdb\x04\x1e\xa1\x49\x6f\x0f\x3d\xac\xbd\xb3\xf9\xe5\x7f\x20\x8c\xee\xa3\xc1\xb6\xe9\x21\x1f\xe2\xdf\x5a\x83\xe3\x6c\x88\x7f\xf7\x1b\x6f\xb1\x70\x81\x6f\xca\x05\x48\x71\x4b\x1e\xa4\x76\x86\x1c\x48\x4d\x0c\xe8\x9b\x91\xb6\xa3\xe2\xcf\xa5\x4d\xbd\xed\xa8\xf7\x73\x69\x62\xce\x5e\x13\x5f\x15\xc5\x9f\xa1\xbb\xa8\x98\xa9\xb2\xf8\xe2\x8b\xf9\x1c\x9f\x6c\x23\x7d\x9f\xcf\x45\x9b\xb9\x6a\x23\xbe\x9c\xcd\x1d\xc5\xf4\x67\x50\x4d\x5f\xa0\x4e\x25\x1d\xf8\x9c\xc9\xcf\xa9\xfa\x6c\x6f\x3e\x87\xe6\x82\x4a\x2a\x49\xc2\xe7\x4c\x7e\x4e\xd5\x67\x77\x49\xfe\x99\xac\xc9\xaf\x02\x8e\x1c\x57\xd8\x5c\x96\x97\xde\x93\xc5\x0f\xd8\xac\xae\xd8\xaf\x1e\x76\x6a\xf6\xcf\xb4\x5b\x24\x58\x3d\xea\x38\x2b\xf3\xc3\xdb\xd4\xa4\x41\xa4\x68\xce\xba\x34\xe7\x1d\x9a\xb3\x2e\xcd\xb9\x4e\x73\xb6\x0d\x4d\x2c\xfb\xc9\xd5\xd0\x20\xcf\x9b\x70\x39\x28\xd0\xba\xec\xff\xac\xbe\xb4\x42\x7b\x18\xb4\x0f\x05\x4d\xbf\x7e\x26\xcb\x70\xbb\x69\xca\x7e\x2a\xe0\x9a\xe6\xac\x4b\x73\xde\xa1\x39\xeb\xd2\x9c\xeb\x34\x67\x2d\x4d\x63\xd6\x39\x7e\x0f\x81\x99\xd7\x1f\xa1\xfa\xd2\x8f\xf6\xc3\x54\x3f\x82\xf3\xfe\x58\xba\x8e\x51\xfd\x08\xc1\xe0\xc7\xd2\x16\x42\x3f\xc0\x45\x09\x02\x66\x36\x6f\x58\x34\x39\xa5\x04\x14\x04\x67\x6d\x5f\x64\xb8\xa8\xb0\x1e\x2e\x66\xdb\xc4\xaa\x96\xac\xf8\x57\x48\xc4\x4d\xb3\x02\x52\xd9\xcc\x44\x30\xbb\x11\xc5\x1f\x8d\xa1\xa7\x4f\xf1\xc7\xd2\x44\xf1\xc7\xf2\x26\x14\xcd\xc1\xae\x4f\xf1\x67\x23\xc5\x9f\x4d\x14\xcd\xd6\xd6\xbf\xbc\xc2\x42\x12\x26\x2f\x6a\xb7\x07\x40\x2b\x77\x30\x0f\x52\x47\xa5\x7d\x19\x1e\x81\x45\xa2\xb3\x58\xe3\xda\x8e\xcd\x3f\x5f\xe4\xac\xe2\xe8\xca\xfd\xa6\x2f\xfe\xe0\x7d\xd3\x68\xdf\xf0\xba\x79\x66\x62\x1b\x06\xa0\xc2\xd4\x06\x5e\x6c\x0b\x53\x1b\x78\x87\xe6\xa6\x36\xf0\x0a\xcd\x4d\x6d\xe0\x95\x7c\x92\xcf\xe1\xfa\x8e\xb9\xed\xfe\x0e\x78\xa7\x9f\xe4\x33\x80\x92\xa2\xe3\xba\xe4\xf2\x81\xd0\xac\x37\x81\x08\x4c\x99\x89\x47\x98\x52\xc8\x4c\x3c\xc2\xec\x45\x6a\x6a\x03\x93\x17\xa9\xa9\x0d\xcc\x93\x30\x53\x1b\x98\x26\x19\xdc\x66\x20\xfe\x60\xda\x65\x22\x4d\xbd\x22\x56\x61\xc0\xc4\xcd\x44\xca\x41\x58\xd6\x7e\x3b\xe2\x48\x69\x54\xc3\x64\xe7\x56\x2f\x2b\xd1\xe6\x0c\x21\x33\x78\x02\xf6\xcf\x06\xd9\xc0\x93\xa6\x18\xc5\xe4\x09\xd8\x3d\x93\xcc\x3e\xf1\x74\x6e\xd9\x90\xd9\x3e\x1e\x6d\x96\x51\x12\x04\x11\xa5\x43\x82\xb8\x25\x08\xe2\x49\x15\xc1\x4e\x24\x48\xc7\x09\x6a\xf3\x92\x92\x20\x81\x10\x3b\x24\x48\x5a\x82\x64\x56\x8f\x4b\x13\x80\xd7\xc2\xeb\x38\x41\x6d\x26\x53\x12\xf4\x05\xc1\x7c\x48\xd0\x6f\x09\xfa\x82\x56\xae\x08\xfa\x23\xee\xd0\xc7\xa3\xcd\x7d\x4a\x82\x81\x20\xc8\x87\x04\x83\x96\x60\x20\x68\x71\x45\x30\xd0\x09\xf2\x71\x82\xda\x6c\xa9\x24\x18\x0a\x82\xc5\x90\x60\xd8\x12\x0c\x05\xad\x42\x11\x0c\x75\x82\xc5\x38\x41\x6d\x7e\x55\x12\x8c\xe0\xa5\x62\x48\x30\x6a\x09\x42\xf6\x7e\xa6\x08\x46\x9d\x97\x88\x71\x82\xda\x8c\xac\x24\x18\x0b\x82\xb3\x21\xc1\xb8\x25\x08\xaf\x4d\x6a\x4c\x16\xf0\xae\x24\xe0\x93\xcf\x5e\x7c\xb9\x14\xe7\xf6\x2e\xc5\xc1\x22\xb9\x57\x37\x9b\x09\x64\x50\x87\xc5\xf7\x6e\xfb\x5a\x1c\x33\x19\xfc\x4f\x79\x31\xce\xc9\x72\xf1\x81\xaf\x64\x95\x5f\x54\x2d\x91\x4f\x0e\xd2\xb2\x12\x09\x4a\x8e\x18\xec\xcf\x4e\x79\xb1\x5c\x71\xb5\x9d\x7a\xa0\x35\xed\xac\x89\xb6\x76\x57\x2d\x5f\xfb\xe4\x36\x2e\xe2\xf9\xbd\x5e\xc1\xa3\xf3\xd9\xd4\x07\xb9\x8f\xb0\x47\x82\x23\x5f\xd5\x29\xfe\x72\xba\xc9\x7a\x54\x29\xc4\x64\xd7\xd3\x4d\xa2\xc9\xc8\xe9\xa6\xce\xb6\x86\xc1\xe9\xa6\x10\x93\x2f\xa7\x9b\x6e\xfb\x74\x93\xd0\xca\x76\xa7\x9b\x8c\xca\xe9\x9c\x6e\x92\x0a\x72\x9e\x6e\x92\xe7\x68\xb7\x3c\xfd\xed\xff\xae\xcf\x33\xf1\x45\x76\x90\xb2\x35\x8f\x82\xde\x83\xf3\x3c\xec\x83\x7e\xb8\x78\x9f\x17\xbd\x1f\xb3\xf2\x62\xc6\x57\xbf\xc9\x91\x28\x8d\x55\xf8\x2e\x38\x94\x0f\x24\x63\xf0\x59\xe7\xe7\x5f\xe1\xe8\xd4\xcf\x5b\xdd\x09\x04\x9b\x67\x4e\xa0\xeb\x0d\x9c\xf6\xdb\xf8\x51\xa8\xa3\x23\xf4\x82\xaf\xce\x61\x14\x3d\x99\x2d\xcb\x8c\x23\xdc\xbf\x36\x45\x34\x7f\x71\x82\xbb\x67\x97\xc2\x78\x8a\x82\x64\x8a\x02\x3c\x45\xbe\x3f\x45\x24\x9c\x22\x1c\x4f\x51\x32\x45\x08\x6b\x5b\x8d\x42\x3a\x45\xa1\x37\x45\x01\x99\x22\x3f\x98\x22\x12\x4d\x11\xa6\x53\x84\xbd\x29\x22\x3a\x5c\x32\x45\x21\x9e\xa2\xc0\x9f\x22\x3f\x9c\x22\x12\x4f\x11\x4e\xa6\x08\x0b\xfc\x1a\x5c\xe4\x4d\x51\x48\xa6\x28\x08\xa6\xc8\x8f\xa6\x28\xf2\xa7\x28\x0c\xa7\x28\x88\xa7\xc8\x4f\x34\x40\x1f\x4f\x11\xf1\xa7\x08\x87\x53\x14\x4f\x11\x8a\xc8\x14\x85\xc1\x14\x05\x70\xb5\x80\x0e\x28\x38\x21\x53\x84\x83\x29\x8a\x04\x20\x9e\xa2\xd0\x9f\xa2\x20\x9c\x22\x3f\xd6\x00\x49\x32\x45\x04\x4f\x11\x16\x24\xa7\x08\x11\x3a\x45\xc4\x9b\x22\x2c\xd8\x91\x60\x6f\x1d\x72\x25\x66\xb9\x92\xae\x5c\x05\x17\x42\x8e\xa2\xdf\x44\x7c\x9e\x22\x14\xea\xdc\x2a\xc2\xa2\x5b\x82\x5b\x60\xc8\xd3\xb9\xf4\x95\xe0\x04\x57\x02\x20\x9a\x22\xbd\xbb\x38\x92\xf2\x10\x02\x06\xee\xfd\xae\x22\x84\x42\x85\x80\x85\xfc\xfc\x58\x0a\x36\x0c\x7b\xf2\x0a\x3c\xa5\xad\x50\x6a\x3f\xd0\x29\x08\xd5\x08\xd3\xf0\x85\x4a\x23\xa9\xf6\x50\xd7\xa1\x50\x81\xb0\x07\x61\x17\x42\x87\x42\xb0\x75\x56\xd3\xb9\x11\xea\xf2\xfc\x72\xce\xe0\x9a\x14\x91\x54\xae\x67\x65\x31\xb8\xe1\x09\xbc\xe0\x87\xd3\xbf\xbc\x7a\xf2\xc3\x63\x79\xa7\x94\x90\x18\x99\x22\xe8\xbc\x90\x10\x15\x16\xa9\xd4\x04\xd2\x55\x96\x8a\x95\x3a\x89\xb2\x5e\x10\x08\xd5\xe9\xbf\xfa\xee\xf9\x6b\xbe\x46\x6c\x91\xab\xda\xe8\x17\xa0\x52\x79\x9f\x86\x81\x0f\x01\xff\x97\x17\x5d\x7d\xf6\x52\x4a\x6f\xe3\xdd\x87\x97\x11\x4a\x3c\x6f\xda\x7f\x56\xbf\x2b\x48\x10\x03\x00\xe9\x00\x50\xcf\x23\x03\x10\x5f\x03\x19\x3e\x0d\xf4\xa7\x06\x02\x61\x97\x00\x31\x10\x88\xba\x4c\x9a\x40\xe2\x5e\x3f\x0c\x84\x68\x87\x91\x21\x8a\xa4\x4f\x65\x88\x82\xe9\x20\x26\x80\xb4\x2f\xad\x21\x48\xd6\x23\x33\x00\xc8\xfb\x5d\x19\x82\x70\x0d\x64\x48\xa1\xe8\x72\x39\x6c\x4e\x5d\xad\x31\x1d\xd5\x07\xa1\x23\x04\x7c\x3a\x62\x55\x41\x9f\x88\xc1\x2e\xa8\xdb\x6e\x22\x3a\x6a\x98\x31\x75\x19\x26\xa5\xa3\xfa\x4e\xe8\x88\xbe\x59\x9f\x09\x83\x49\xf4\xc9\x0c\x39\xc9\xe8\xa8\xc6\x73\x3a\x62\x35\x9c\xba\xad\xbb\xe8\xd3\x30\x68\xde\xaa\x2e\x15\x25\xb0\x59\x90\x44\x7b\x6a\x51\xa6\xdf\x01\x31\x52\x0f\xba\x58\x4c\x7d\x0c\x75\x10\xa3\x4d\xe8\x7c\x1a\x9e\xc7\x5d\x36\x1c\xbe\x81\x1d\xe6\x9f\xf4\x39\xb5\x06\x0a\xec\xd0\x68\xda\xed\x8c\xc1\x2a\x3a\x9d\xb1\xc6\x09\xec\xb0\x5f\xde\x03\xb1\x85\x0a\x6c\x0e\x05\x74\x54\x14\x98\x8e\x8a\x82\xd0\x51\xd5\xfb\xd4\xad\xb6\xa0\x87\xc2\x16\x2b\x5c\xe2\x8e\xa8\xcb\x84\x63\x3a\xa2\x0c\x4a\x47\x24\x99\xd0\x51\xd3\x62\xd4\xad\xd0\xb4\x2f\x6f\xc3\xe0\xd1\xa7\x32\x04\xc9\xa9\x4b\xa5\x9c\x8e\xb8\x50\xd1\xd7\xa8\x7e\x47\xd5\x74\x2c\xcb\x08\x3c\x8f\x06\x1e\xb6\x46\x10\x05\x63\x4d\x33\x1a\x05\xda\x22\x48\x4d\xc4\x33\x11\x09\xba\x44\x8c\x30\x61\x17\x8f\x91\x99\xa8\x8b\xc7\x08\x13\xb7\x30\x06\x2a\x7a\xb0\x35\x36\x4f\xfa\x24\x0c\x48\x58\xbf\x3b\xf6\x84\x43\x11\x32\x20\xc9\x3a\x82\x35\x00\xe4\x2d\x80\x35\x80\x48\x16\x0c\x8d\x8b\xbe\x56\xac\x79\x97\x53\x98\x98\x8e\xf4\x82\x50\x97\xb4\xfd\x3e\x09\x93\x6d\xd0\x9e\xde\x4d\xb6\x41\xc7\x05\x1e\xd1\x11\x43\x8d\xe9\xb8\xa1\x52\x3a\xa2\x94\x84\x3a\x94\xc2\xa8\xdb\x97\xd2\x3e\x07\xf6\x40\xe2\x74\x95\x9c\x8e\x18\x31\xef\xcb\xd4\x1e\x4f\xac\x16\xa4\xbf\x80\x18\x9e\xe2\x2d\xdc\x1e\x93\x2d\x9c\x09\xfb\x5b\x38\x3e\x0e\xb6\xb0\x67\x1c\x3a\x5d\x1f\x47\x63\x2e\x89\xe3\x91\x60\xa8\xa7\xe0\x66\x0c\xc9\x58\xb8\xc4\x6c\xcc\xef\x71\xba\x45\xb4\xc4\xd9\x58\x20\xc3\xf9\x16\xc1\x12\xf3\x2d\x42\x19\x2e\xfa\x1a\x32\x9a\xcb\x58\xa8\xc0\x78\xcc\x43\x31\xd9\xc2\x41\xb0\x3f\xe2\x65\x38\xd8\x26\xb0\x85\x5b\x84\x1d\x1c\x39\xa3\x1b\x8e\xb7\x08\x4b\x98\x6e\xe1\x8b\x38\xd9\xc2\xeb\x31\xdb\x22\x9a\xe2\x74\x2c\x82\xe1\xcc\x15\xc2\x70\x3e\x16\x16\xf8\x16\x61\x14\x17\xbd\x08\xb5\x4b\xaa\x82\xbd\xc0\x12\x8c\xcc\x2c\x93\x8e\x54\xb0\x35\x45\x91\xb8\x4d\xd8\x03\xed\xb9\x67\x78\x1e\xf6\x94\x33\x84\x88\x3a\x42\x33\xd1\x88\x3b\x10\xe3\xc3\xb1\x3d\x37\x69\xa9\xd8\x32\x93\xba\xa7\xb6\xac\xa4\xe5\x62\xc8\x67\xd6\x93\xe6\x10\x22\xef\x48\xcb\x96\x9a\x00\x06\x4b\x5a\xa2\xda\x9a\x25\xe0\xea\x1e\xa6\x63\xec\x13\x6a\x37\x14\x9f\x8e\x19\x4a\x40\xc7\x14\x1d\x52\x77\xe7\x23\xea\x36\xa5\x58\x7b\x3e\x7c\x4a\xa9\x5d\x74\x09\x75\x89\x8e\xd1\x31\xf3\x4a\xa9\xdb\x09\x32\xea\x36\x9d\x9c\x8e\x19\x06\xa7\x63\x4e\x50\xd0\x31\x13\xef\xa4\x15\x16\x23\xc0\x23\xee\x8a\xc9\x88\x85\x62\x7f\x34\x64\xe0\xc0\x69\xa9\x38\x1c\x75\x78\x1c\x8d\x46\x0d\x1c\xbb\x22\x31\x1d\xf5\x44\x9c\x8c\x86\x0c\xcc\x1c\xde\x88\xd3\x91\x70\x81\xb3\xd1\xa8\x85\xf5\x70\x60\x20\xc1\x47\x62\x2f\x2e\x46\x43\x92\x4a\x2d\x9c\xdd\xc4\x4e\xbf\xc2\x64\x3c\xb4\xf8\x8e\xc8\x81\x83\x11\xb7\xc6\xe1\x68\x6c\xc1\x91\xd3\x81\x71\x3c\x1a\xdb\x30\x1d\x09\x3e\x38\x19\xf5\x40\xcc\x46\xc2\x00\x4e\x47\x63\x20\xce\x46\x43\x01\xce\x47\xe3\x11\xe6\x8e\x60\x87\x8b\x6e\x34\xda\x25\x7f\xa0\x9e\x24\x69\x8e\x2d\x75\xf6\x89\xbd\xc0\x92\x4a\xd4\x4c\x1b\x9e\xfb\x2d\x86\xc0\x6c\x88\x81\xdd\x88\xc2\xae\x44\xcc\x39\x44\x93\x1c\x9b\xc8\xc7\x5e\x27\xfd\xb3\x8f\x9f\xf5\x8a\x8a\x39\x83\x68\x75\x6b\xce\x1f\xe4\x73\x73\xee\xd0\x8a\xcf\xb6\x82\xd2\x8a\xc7\x80\x23\xd7\xbc\xd4\x92\x39\xd4\xe6\x6d\xce\x1d\x5a\x05\x5b\xfa\xef\xd4\x2f\xa6\xf6\xee\x11\x3a\xc6\xbc\x4f\xc7\x04\x10\x50\xb7\x8a\x43\x3a\xd6\x85\x88\x5a\xed\x27\xa6\x63\xc6\x47\xa9\x4b\x7e\x49\x97\xb8\x2d\x89\x70\x58\x47\x4a\x5d\xda\xcb\xe8\x98\xf5\xe5\xd4\x6d\xbf\x9c\xba\xdd\xaf\xa0\x63\x1e\x82\xbd\x11\x17\xc1\x78\xc4\x0b\x31\x19\x75\x43\xec\xbb\x46\x0a\xa7\x85\xe3\x70\xd4\x45\x70\xe4\x8d\xe9\x09\xc7\xa3\x91\x0c\xd3\x51\x6f\xc1\xc9\x68\xb8\xc0\x6c\x34\xe0\xe1\x74\x24\x66\xe2\x6c\x34\x6e\xe0\x7c\x24\x2c\x61\xee\x88\x4b\xb8\x70\x86\x0d\x99\x3d\xb8\xfb\x80\x47\xfd\x12\x13\xbb\x63\x62\x7f\xc4\xed\x71\x30\x62\xf8\x38\x1c\xf5\x1d\x1c\x8d\x47\xb7\xd8\x11\xde\x30\x1d\x77\x9e\xc4\x19\x3f\x30\x1b\x8d\x7f\x38\x1d\x0d\xa2\x38\x73\x06\x11\x9c\x8f\x46\x29\xcc\x47\xc2\x14\x2e\xba\x71\x64\xb7\xe4\xc1\x18\x53\x6a\x7e\x6d\x2b\x24\x0d\x37\xc6\x94\xe1\xbe\xb6\x5d\xc3\x98\x31\x28\x00\x98\x4f\x31\xe6\x0d\x4d\xce\x67\x78\x1e\xd5\x08\x6c\x00\x71\xcb\xa0\xe1\xa9\xae\x73\x5b\xca\xd0\xf2\x67\xc9\x19\xda\x1e\x1a\x28\xa4\x2d\x83\x66\x16\xb2\x0e\x80\x69\xe0\xb0\xfa\x1e\xd7\x95\x63\x40\x5d\x74\x84\x63\x9e\x73\x70\xb5\xc7\x74\x44\xb8\x84\x7a\x36\xc3\xf1\xa9\xdb\x70\x02\xea\x32\x9c\x90\x8e\xd8\x45\x44\x47\xa4\x16\xd3\x11\xd3\xa3\x74\x44\xb5\x09\xb5\xc9\x9d\xd1\x11\x9d\xa6\xd4\x6d\xb5\x19\x1d\xb1\x9a\x9c\x8e\x68\x8e\x53\xb7\xe1\x16\xd4\x65\xf6\xd8\x73\xba\x2d\xc6\x9e\x55\xaf\x98\x8c\xf9\x34\xf6\xc7\x7c\x12\x07\x23\x5e\x8d\xc3\x31\xa7\xc0\xd1\x58\xe4\xc0\xf1\x88\x6f\x37\xe3\x9e\x55\x8d\x38\x19\x73\x20\xcc\x46\xe2\x23\x4e\xc7\x22\x08\xce\x9c\x11\x0a\xe7\x63\x11\x06\x73\xfb\xe0\x5c\x8c\x44\x08\xc8\x0f\xdc\xba\xc2\x23\x96\x86\xc9\x88\xa7\x63\x7f\xcc\x99\x71\x30\xe6\xac\x38\x1c\x0b\x55\x91\x3d\x14\xe1\x78\x2c\x58\x60\xea\x76\x97\x64\xcc\xe1\x31\xb3\x06\x0b\x9c\x8e\xf9\x32\xce\x46\xc2\x05\xce\x9d\xc1\x12\xf3\xb1\x50\x86\x8b\x5e\xc0\xd9\x25\x2b\x50\x6c\x53\x53\x14\xa9\x71\x9a\xf2\x02\xd9\x96\x98\xfb\xec\xb7\xcf\x89\x09\x77\xd0\x4a\xc4\x88\x3f\xd4\xfb\x63\xca\x0a\x9a\xa7\x43\xdc\x71\xc7\xa0\xad\xa3\xa2\x31\x1b\xd0\x98\x1a\x22\x66\x35\x59\x23\xcb\xa9\x32\x50\x53\x06\xa0\xc9\x6a\xf8\x3c\xd7\xd0\x0e\x9f\xf2\xa6\xaf\xc3\x67\x45\x47\xca\xa6\x9e\x3a\x95\x84\xa9\x5b\x49\x84\x5a\x7a\xe4\x53\x97\x76\x02\xea\xea\x4f\x48\xdd\x56\x17\x51\xb7\x65\xc4\xd4\x2e\x0f\x4a\x5d\x76\x91\x50\xbb\x3d\x33\xea\x56\x7d\x4a\xdd\x3a\xcc\xa8\xc5\xa6\x72\xea\x56\x11\xa7\x2e\x9b\x2a\xa8\xdb\x94\xb1\x37\xe2\x47\x18\x8f\x18\x1f\x26\x23\x9e\x8a\x7d\x87\x01\xe2\xc0\xe9\xa7\x38\x1c\x71\x45\x1c\x79\x23\x31\x28\x76\xfa\x5c\x93\xc1\x5a\x78\x4f\xac\x51\x9b\xd9\xbc\x15\xa7\x23\xa1\x0d\x67\x8e\xb8\x88\xf3\x91\x18\x82\xf9\x88\xcf\xe2\xc2\x19\xdc\xc4\x88\x6e\x61\x1c\x3b\x4d\x09\x13\xa7\xd3\x62\x7f\xc4\x2f\x71\x30\xe2\x98\x38\x74\x78\x26\x8e\x46\x62\x0d\x8e\x47\x83\xd5\x88\x27\xe1\x64\xc4\x47\x31\x73\x04\x00\x9c\x3a\xa3\x16\xce\x9c\xa1\x05\xe7\x36\xff\xc7\x7c\xcc\x85\x8b\x6e\xe8\xd9\x7d\xe8\x36\xd8\x48\xcd\x6a\xe0\x61\xc3\xd0\xad\x52\x0d\xc3\xa0\xad\x90\x9a\x9a\x05\x4d\x92\x63\x7a\x1a\x5a\xba\x1f\x49\x94\x86\x31\xba\x4d\x99\x86\x4f\xa9\xd6\x01\xd3\x30\xdd\xf4\x7d\xd8\x94\x69\x46\x3e\x7c\x9a\x6a\x9d\x30\xbd\xaa\x6b\x79\x9c\x61\x98\x96\x72\x1b\x62\xe5\xad\xdc\x4c\x2f\xe9\x5a\xe6\x3b\xec\xa9\x4b\x0c\x98\x9a\x85\x4a\xa8\x4b\xbf\x3e\x75\xf5\x31\xa0\x0e\xc3\x09\xa9\x4b\x78\x11\x75\xf5\x24\xa6\x36\xf1\x50\xea\x30\xab\x84\xba\x54\xcd\xa8\x4b\x23\x29\x75\x18\x42\x46\x6d\x66\x9e\x53\x97\x25\x73\x6a\xb6\xd8\x82\x3a\x94\x8c\x3d\xa7\x96\x31\x76\xba\x2b\x71\xfa\x2b\xf6\x9d\xbe\x82\x03\x97\x3b\xe0\xd0\xe9\x4a\x38\x72\x3a\x04\x8e\x5d\x11\x41\x8d\x37\xc6\x47\x89\x33\x5a\x60\xe6\xf2\x18\x9c\x5a\x82\x06\xce\x6c\x41\x36\x77\x7a\x2e\xe6\xce\xa0\x80\x0b\x6b\x44\xc4\x9e\x53\xeb\xd8\xe9\x88\x98\xb8\xbd\xdb\xb7\x58\x1a\x0e\x9c\x8e\x86\x43\x97\x0b\xe3\xc8\xea\x87\x38\x76\x46\x06\x4c\x9d\xde\x8f\x13\xa7\x2f\x62\x66\x09\x56\x38\x75\xba\x1b\xce\x5c\xd1\x01\xe7\x56\x2f\xc6\xdc\x19\x39\x70\xa1\x05\x87\x5d\xc6\x54\x2a\x06\x78\x62\x40\xd8\x08\x67\x18\x8f\xef\xb7\x8b\x1b\xc3\x70\x2c\xdb\x0d\x03\xb1\xc2\x67\x78\x14\x4a\x7c\xc4\xc8\x47\xd4\x3c\x34\x05\x61\xc5\x89\x79\x9c\xa1\x9e\x99\xff\xa4\xe9\xb7\x29\x04\x4b\x3e\x4d\x8f\xd2\x06\xa9\x81\xcf\xec\xbe\x3c\xec\x31\x0c\xbf\x66\x3b\xe1\x8d\x10\x0d\x6d\x0a\xc5\x84\xe1\x51\xbd\xa8\x64\xed\xb9\x7c\x8c\x5d\x32\x55\x30\xc4\xa5\x7f\x05\xe3\xbb\x74\xad\x7e\x0f\x5c\xc2\x56\x30\xa1\x5d\xac\x0a\x22\x1a\xed\x73\x6c\x31\x2d\xf5\x98\xba\x24\xaa\x60\x12\x9b\x96\xd4\x73\x66\xb7\x52\x05\x91\xba\xec\x51\xc1\x64\x66\x95\xab\xa7\xb9\xcb\x8c\x14\x0c\x77\x99\xa8\x82\x29\xec\x1e\x5a\x67\xc4\x46\xc7\xc6\xae\x1e\x60\x62\x11\x32\xf6\x6d\x16\x87\x03\x17\xb3\x38\x74\xa9\x05\x47\x2e\x61\xe0\xd8\xd1\x45\x5b\xfc\x4d\xec\x2a\xc4\xcc\x65\xa9\x38\x75\xc6\xc3\xcc\xe5\x51\x38\xb7\xdb\x37\xe6\x36\xa3\xc3\xc5\xb8\x77\xb5\x2f\x37\x56\x08\xec\x8e\x05\x98\x8c\x1b\x1c\xf6\xc7\xbc\x0f\x07\x4e\xef\xc3\xe1\x78\x10\xa8\x95\xed\xec\x6e\x3c\x1e\x94\x30\x1d\x0f\x6e\x38\x19\x8f\x06\xb5\x39\xb8\xbc\x4c\x1a\x85\xf5\x69\x36\x16\xd6\xa4\x61\x38\xf8\xe4\x63\x11\xa7\x36\x12\xa0\xa2\x8d\xec\xf2\xa3\x5e\xd7\xe0\x19\x5b\xbf\x5f\xa3\x6a\xc6\x2a\xb4\xe6\x73\x9e\x55\x50\x8f\xe8\xd5\x77\xcf\x5f\xa3\x72\x71\x51\x5f\x13\xd1\x54\x34\x78\xf6\xed\xab\xde\xc5\xc5\xed\xc1\xc4\x29\x6a\x37\xfe\xc3\x05\x8a\xea\x0b\x7c\x56\x5f\xa6\x7a\x43\x4f\xfd\x2a\x01\xe4\x97\xfa\xb3\xf8\x32\xd5\xfa\xd3\xe7\x5c\xab\xaa\xf4\xfd\xa3\x57\xb2\x30\x16\x92\x85\x5f\xdc\x77\x54\x09\xe8\xe6\x82\x2a\xf9\x45\xab\x92\x72\xd3\x2b\xaa\xdc\xa5\xf5\xde\xf3\xeb\xa6\x04\xd8\x7b\x7e\x6d\x28\x7d\xf7\x9e\x5f\xd7\x75\xf5\xde\xf3\x6b\x73\x59\x3d\x41\x43\xaa\x28\x8c\x50\x5a\x56\x6b\xc4\xb2\x6c\xb9\xca\xcb\xc5\x19\xaa\x96\xe8\xc5\x09\x36\xe2\xfd\xae\x84\x52\x40\x6f\xfa\x35\x90\x4d\x77\x87\x84\x91\xfd\xee\x90\x16\xdd\x8b\xa5\x40\xf8\xe2\x04\xbf\x29\xdf\xa2\x03\x84\x0d\x35\x4a\x15\x5d\x59\x9e\x7f\x52\xf7\xee\x4d\xdb\x5e\x95\xe3\x13\xff\x99\xf8\x18\x1d\x68\xa8\xa1\x0e\xdf\x1e\xba\x3b\x40\x6c\x28\x58\xfa\xed\x7a\xcd\xcf\xd3\x39\x47\x38\x42\xeb\xcb\xf4\x3d\xbf\x36\x88\x7f\x7d\x99\xfe\xc8\xaf\xd7\x8d\x0a\xda\xef\x76\xa1\x2c\x5e\x01\x90\x14\x4d\xfd\xe5\x21\xc2\x51\xf3\xcd\x7e\xc5\xca\x09\x54\x9c\x52\xfc\x98\x05\xb9\xae\xb1\x2b\x5e\xde\x28\xa4\x6f\x15\x53\x46\xbc\xee\xab\x5b\xd2\xb2\x7a\x05\x55\x51\x8e\xb5\x22\x28\x0d\x5e\x1b\x4a\x69\x50\x01\x35\x1a\x14\x19\xb6\x31\x59\x0d\x09\xec\x56\xd3\xa5\x53\xac\x96\xe7\x10\x60\xe6\xbc\xa8\x10\xa1\xe0\x19\x82\xb2\xb9\xa1\x14\xce\x9b\x49\x89\x8e\xe4\xdd\x10\x1e\x14\x70\xac\x8d\x6b\x32\x79\x71\x42\x94\x0d\xee\xa1\xfd\x46\x02\x7b\xe8\x4f\x88\xd0\xb7\x50\xe3\x11\x6c\xab\x44\x7f\x82\x3b\x2e\xb6\x66\x6f\x55\x9e\xcd\xb6\xe7\x2f\x80\xf2\x9d\x2d\x93\x7b\x1d\x2e\x09\x85\xc7\x92\x57\xb4\x8f\x48\x60\x61\x78\xcf\xc0\xf1\x80\xac\xa9\xb2\xbf\xe8\x40\xb9\xc8\x38\xe2\x2c\x9b\x29\xb3\x43\xe5\x1a\xb1\x8b\x8b\x79\xc9\x73\xa1\x4b\xb6\x40\x7c\x73\xc1\x16\x39\xcf\xeb\xba\x8c\x10\xde\xa7\x46\x6c\x42\x04\x0a\x4d\xc6\x16\x28\xe5\x28\x5d\x2d\xdf\xf3\x05\x2a\x17\xd5\x12\x51\x59\x14\x78\x8d\xd6\x19\x9b\x4b\xf4\x12\xe5\xda\x8c\xed\x6a\x56\x66\x33\xc4\xe6\xf3\xe5\xd5\x1a\x50\x0b\xbc\xd5\x52\xa0\xbd\x5c\xf3\x1c\x5d\x95\xd5\x6c\x79\x59\x49\x06\xd7\xe5\x72\x31\xc4\xa2\x04\x0d\xe5\x35\x27\xed\x97\x87\x0f\xd5\xb5\x32\xed\x4f\x22\xa0\xf8\xd8\x24\xb9\x8e\xe5\x62\x69\xb9\xb1\xdb\x70\x15\x5a\x08\x62\xed\x67\x88\x59\x93\x52\x2a\xf1\x6b\x24\xb4\xef\x9b\x55\x65\xeb\x47\xac\xf7\x23\x7e\xab\x0a\x7b\xfe\xaa\xff\x04\x97\x02\x0c\xae\xda\x31\x44\xc0\x13\x59\xf8\x12\x95\x8b\x0f\x7c\xb5\xe6\xf6\x28\x58\x2e\x3e\xbc\xea\x05\xc2\xce\x4f\x5b\x0d\x10\xd8\x31\x40\xb4\xd8\x74\x89\xad\xdf\xe0\x50\x18\x74\x1f\xfb\xc7\xce\x84\x43\xfb\x85\x2f\xb2\xd5\xf5\x45\xb5\xc3\x55\x80\xaa\x62\xed\xf2\xa4\x69\xd7\x02\x4f\xbb\x21\xdf\x5a\x42\x37\xe7\x9f\x83\x6a\x2b\x11\x57\xed\xde\x13\x37\xe5\x69\x2d\x48\x53\xd2\xf1\x1f\xbc\xd2\xf3\xb4\x2e\x73\x73\x40\xaa\x5d\x8d\xd5\xd7\x81\x04\x5b\xf5\xc1\xe0\xe6\x2c\x43\xf6\xf1\xc3\xa2\xac\x4a\x36\xd7\x4b\x5f\x75\x61\xf8\x26\x9b\xb1\xc5\x19\x7f\xfa\xb2\x2d\x8b\x2a\x2b\x8f\x79\x1b\xaf\x90\xff\xeb\x9b\xb4\xb9\x8d\xbc\x9f\x1a\xde\x58\x8b\xc2\xda\xe6\xe5\x53\xbd\x0d\x01\x3a\xbe\xfa\xdb\xae\x0d\x95\xbc\x79\x45\x21\xfe\xbf\x25\x6f\xd0\x26\x54\x7f\xc6\xca\xb4\xae\xab\xda\x64\xf9\x30\xf0\x28\xf9\x51\x7a\x15\x7c\x1e\xbf\xb6\xcd\x30\x12\x19\xf3\x09\x40\x67\xbb\xf6\xa2\x31\x0c\xdd\x4e\x2c\xb0\xab\x2e\xec\x4a\xc1\x1a\x99\x7c\xcc\xcb\x75\xc5\xe7\x8d\x15\x9b\x31\x16\xd0\xf9\xed\x52\x0b\xea\x0e\xd0\x85\x18\x68\x65\xa9\xb5\x37\xe5\xdb\x37\x93\x89\xe2\xf6\x5d\x1b\xae\x45\x22\xd9\xbc\xba\xc0\x77\x28\xab\x6d\x12\x8d\x21\x60\xf7\x1c\x69\x65\x93\x54\xcf\x93\xe6\x35\x1b\xc5\x78\x00\xff\xf3\x22\x5f\xa2\xf5\x15\xbb\x90\xe9\xc7\x9c\xad\x2b\x69\x0c\xc3\x10\x5e\xb9\x55\xd6\x63\xb6\xab\x30\x97\xe3\x57\x06\x1b\x86\x8a\xe2\xbb\xba\xfa\xc0\x35\x6e\xcd\x05\x6f\xe2\xea\x37\x09\x29\x23\xa1\xcb\xf0\x46\x56\xa1\xe5\x65\x35\x88\xc0\x4d\xc8\x75\xab\xac\x13\x72\xed\x3a\xeb\x0c\x19\xef\xf9\xb5\x2c\x01\x1d\x05\x47\x3e\xd1\x9f\x94\x1f\x2c\x0f\xb4\xba\xd1\x91\xb1\x6a\xf4\x11\x7a\x25\x2c\x50\xbd\x04\xac\x96\xeb\x75\x9b\xa6\x43\xcd\x43\x48\x88\xe1\xb5\x54\xb6\x68\x06\xaa\x56\x70\x93\x7a\xbc\x3a\x67\xeb\xf7\x1d\x97\xad\x6d\x77\x32\xe9\x98\xa8\x70\xc4\x7a\x74\x7d\xd7\xe9\xba\x70\x5a\x81\x45\x13\x41\xc7\x64\xdf\x81\xcd\x7e\x65\x34\x7c\xf1\x4c\x64\x54\x12\xb3\x82\xaa\xfd\x6e\xc0\xf6\xcb\xa7\xdb\xb3\xbd\xb2\xb3\x3d\x77\xb3\x3d\x77\xb0\xbd\xda\x82\x6d\x67\x11\xe9\x75\x5d\x45\x5a\x4e\x7f\x6c\x57\x47\x7a\xac\x08\xb3\xc4\x55\xf1\x4d\xa5\x97\x62\xfe\xfe\xd1\xab\x43\x95\xa0\x75\x6a\x31\x4f\x51\x56\x9c\x19\x8a\x6b\x5f\xcc\x99\x60\x62\x53\xa1\x3e\x16\x95\x70\x4d\x5a\x3a\x26\x44\x4d\x65\xe7\xe1\x44\x4d\xb7\xe8\xf6\xf7\x8f\x5e\x19\x2b\x6e\x9f\xae\xca\x8b\x39\x3f\xd8\x6d\x8a\x48\x36\xea\x4c\x14\xe9\x3f\xfd\x7e\xa6\x8b\xd4\x44\x84\x60\xbb\x84\x0a\xa5\x59\xff\x7a\x20\x95\xc5\xf2\x35\x46\xc7\x02\xee\x50\x4a\xf5\x91\xd4\xf1\x72\x35\x69\xef\x59\x57\x17\xc7\xd7\xa4\x0f\xd7\xf3\x32\xe3\x13\x6f\x8a\xc8\xde\xe0\x2e\x8c\x06\x2d\xb9\x21\x5a\x32\x45\x81\x03\xad\x7f\x43\xb4\xc1\x14\x45\x7b\xf6\x8b\x34\x6e\xfc\xee\xc1\xd7\xf8\x50\x6f\xac\xb5\xb0\x4a\xe6\x50\x7f\xe7\xd8\xa2\x81\xbf\x05\x85\xdb\x79\xa7\x11\xb4\x76\x64\x8e\xec\xda\x7d\xbc\x05\x05\xf3\xa8\x87\x13\x72\x6b\xc3\xde\x3f\x49\x58\x6d\xa2\xcb\x2d\x04\xd7\x16\xd7\x8e\x21\xd6\x16\xe2\xba\x81\xb6\x81\x72\xd6\xcf\x6f\xa0\x7a\x25\xf4\xb5\xc2\xec\xf7\x43\x32\xed\x55\xd5\xd7\x8a\xbb\xdf\x0f\x83\x69\x5b\xd5\xfd\x7e\x18\x4d\x55\xb1\xf7\xfb\x11\xfe\xf8\x76\x4a\x83\x4f\x2a\xb8\xff\x5b\x56\xda\xff\x6c\xf5\xf0\xff\x7b\x2a\xdb\xc3\x4d\x05\xe5\x82\xe7\xb7\x5b\xe2\xfe\x3b\xb6\xe6\x6d\xd5\x7a\xb6\xe6\xda\xb3\xd7\x3e\x71\x56\xc0\x1f\xfa\xf2\x26\x0a\xd0\x82\x9d\xf3\xf5\x85\xee\xa5\x47\x3a\x1b\x02\x44\xb0\x21\xff\xfb\xf7\x8f\x26\x34\xdf\xa2\x28\x68\xae\xb0\x31\xa1\x79\x1d\x05\x82\x0f\x60\x6a\x13\x05\x87\xea\x8b\xe0\xdf\x90\x19\xb4\xa8\x25\x7a\x35\x9d\x52\xfe\x8d\xaf\x11\x43\x0b\x7e\x35\xbf\x46\xd2\xd7\x72\x13\x61\x3d\xa0\xa0\xce\x6d\x1e\x8b\xcb\xf3\x94\xaf\x3e\x22\xb8\x55\x0a\x6e\x55\x11\x1f\x7c\x02\xe9\xfc\xa1\xb3\xc9\x7c\x79\x05\x2d\xc4\x7f\x4d\x0d\xba\x8d\xbb\xd1\x6d\x08\x50\xcb\x65\xd3\xca\xa5\x8e\x08\xb5\x78\xea\x81\x59\xae\xfe\x79\xc4\xf3\xe1\xad\x2c\xf0\x42\x2f\xf2\xba\xf3\x9d\xb5\xa4\x21\xc4\x2f\xca\x4e\x46\x25\x7a\x38\x15\x5c\x9b\xc7\x30\x75\xbf\x96\xe1\x56\x4f\x78\x2c\x7a\x7b\x8c\xba\xb7\x6f\xeb\x6f\xe6\x7d\x4d\x7d\x57\x56\x57\xe5\x9a\xa3\x9f\x9e\x9f\xae\x01\xc3\x98\x62\xea\x8b\x52\x94\x81\x7c\x44\xdf\x0a\xfd\x0a\xb9\x1c\x80\x60\xd4\x48\xc2\x8a\x8a\xaf\xd0\x82\x9f\xb1\xaa\x5c\x9c\xdd\x82\xe0\x01\x15\x17\x82\x57\x2a\x38\x5c\x2c\xab\x89\x55\xaa\x47\x47\x68\xb1\x1c\xcd\x54\xe1\x4e\x16\x29\xd0\x7f\x34\xd2\x7d\x60\x04\x93\x82\xfd\x47\x2d\x64\x43\x4a\xaa\x24\xa3\x04\x53\x5b\x43\xab\xce\x07\x1d\xee\x3a\x19\x80\x4d\x2b\xdf\xfe\xf4\xbd\xa6\x15\x58\x4e\x80\x71\xfb\x82\xad\x61\x79\x61\x2b\x1f\x6a\x34\x05\x38\x84\x4b\x34\xca\xaa\x96\x82\x44\x8d\xf7\x96\x95\xff\xed\x4f\xdf\xdf\x8e\xea\xe5\xda\x4e\xab\x78\xb6\xc8\x27\x6c\xb1\xac\x66\x7c\xa5\x18\x71\x99\x01\x5b\xe4\xba\x19\x88\x1e\x8e\x98\x42\xeb\x67\x77\xa5\x40\xc6\xac\xa2\xf1\x3c\x05\xff\x9b\xd9\xc7\xf3\x97\x9f\xdb\x3c\x9e\xbf\xfc\x4c\xd6\xf1\xfc\xe5\xed\x18\xc7\x72\xd5\xb1\x8d\xe5\x6a\x07\xd3\x58\xae\x6e\x6c\x19\xbf\xee\x68\x19\xbf\xfe\xc6\x96\xf1\xfa\xf3\x9b\xc6\xeb\xcf\x66\x1b\xaf\x6f\xcb\x38\x36\x3d\xeb\xd8\xec\x64\x1e\x9b\x4f\xb0\x8f\x77\x3b\xda\xc7\xbb\xdf\xc8\x3e\x60\x51\x5e\xb7\x8c\x85\x9c\x19\x55\x2f\x84\x73\x5e\x54\xdb\x67\x65\x0b\xb0\x09\xf9\x0d\x2d\x8b\x06\x13\x5c\x61\x73\x5b\xc6\x00\xc8\x6e\xc7\x1c\x00\x55\xc7\x20\xe0\x97\xa7\x13\x12\xba\xec\x40\x02\xe9\xa6\xb0\x30\xd9\x81\x78\x05\x5a\xa0\x87\xc8\x27\xb6\x95\x2e\xcd\x52\x26\xad\xa9\x3c\x7c\x88\x16\xb0\x44\xde\x18\x83\xdc\x3a\x44\xd0\x01\x5a\x18\x2f\xab\x37\x9b\x90\xc0\x33\xb4\xb5\x8f\xa8\x7e\x79\x72\x33\xa4\xa3\x99\x2c\xd0\x81\xe1\xc6\xd0\x01\xe9\xfe\x52\x97\x20\xf7\xdf\x69\xbd\x30\x95\xff\x6f\x67\xbe\x2f\x27\xf6\x97\x8b\xda\x7a\x5f\xde\x92\xf5\x4a\xbd\x77\x2d\x55\x33\xde\xda\x9e\xb7\x30\xde\x41\xc4\x04\x54\x37\xb0\x5f\xcd\x0b\x1a\x3c\xe3\x06\xac\xc8\xff\xe6\x16\xfc\x72\x59\xb1\x8a\x7f\xee\x00\xbc\x02\x2a\xb7\x65\xc2\x80\xed\x76\x4c\x58\x32\xa6\x9b\xf0\x6a\x39\x1a\x7f\x05\xc8\xa8\xfd\xaa\x1e\x81\x1d\xa8\xa8\xbe\xd8\x13\xe9\x60\xfb\xcb\xcb\x49\x14\x0c\xcc\xf2\x53\x15\x76\x4b\x31\xe7\xf7\xa5\xb1\x91\x90\x23\x20\x76\x57\xd8\xcb\x81\xc2\x9e\xde\x44\x61\xdf\xe6\xf9\xe7\xce\x7c\x59\x9e\x7f\xa6\xcc\x57\x5e\xf9\x7d\x1b\xef\xcc\x79\xef\x9d\x39\xdf\xe9\x9d\x39\xdf\xfa\x9d\xb9\x3f\x22\xec\x37\x89\x2c\x6c\x18\x35\x27\xbf\x19\x5b\xad\xae\x45\xb3\x7a\x0c\x91\x17\xc3\x77\x86\x95\xf6\x7a\x78\x33\x8e\x61\x22\xb5\xdf\xe6\xdc\x68\x5f\xd2\x50\x3c\x7c\x6a\x44\x97\xdf\xcc\xab\x2b\xdf\x2e\xd4\x15\xe0\xcb\x42\x9f\xdb\x5c\x9b\x6e\x38\x5e\x2d\x2f\xf8\xaa\xba\x46\x7f\x57\x57\x0c\x03\x20\x98\x57\x83\x62\x30\xad\xa8\x0c\x64\x7d\x68\xc2\x53\x87\x95\xe6\x4e\xf4\x6e\x74\x59\x97\x67\x8b\xb2\x28\x33\xb6\xa8\x50\x0a\xcf\xcb\x85\xe6\x1b\x40\xd4\x31\xfb\xdb\xce\x4b\xd7\xcc\xd4\xbf\xdc\xc2\x3c\xf0\x90\x03\xbb\x3b\x76\xc4\x35\x79\x7e\x21\xcc\x92\xcd\xf7\x3a\xb2\x1f\x15\x1c\x32\x06\xe4\x46\x72\x1a\xda\xad\x84\xc8\xbb\x6a\xfe\x04\x5f\xbd\xd2\x45\xdd\xef\x45\x67\xcd\xb7\xeb\xb3\x9f\x88\xec\xcd\xa0\xbd\xf8\xdb\x75\x5a\x7b\xba\x2b\x16\x4c\x71\x82\x19\x4e\xe1\x4c\x4d\x86\x73\xcc\x71\xb1\x37\x40\xf2\xf6\xdf\xa8\xab\x53\x84\xbd\xad\x97\x07\xc0\xe8\xa6\x8d\xd9\x0e\xc2\xf2\x95\xda\x3c\x01\x61\xb1\xfe\x22\xff\xfb\xeb\xaf\x86\x03\x18\x22\xef\x6f\x7c\xe0\x0f\xc7\x68\xb8\x0a\xa6\xff\xc9\xb1\xb9\x06\x3f\x6e\xd8\xe8\xef\x05\xb4\x26\xed\x7d\x04\xd2\x87\xe6\x7c\x71\x56\xcd\xd0\xd7\x88\x6e\xb9\x95\xba\x1f\x68\x4e\x96\x8b\x0f\x7c\x55\xbf\x1a\x6a\x61\x58\xc5\x07\x31\x68\xd7\xa7\x03\xb6\x0a\x3c\xf5\xa8\xdd\x68\xb7\xb3\x32\xf7\x11\x9d\x76\x83\xe8\xbd\x35\xca\x59\xc5\x10\x5b\xef\x48\x67\xeb\x99\xac\xee\x4a\xe1\x46\x0b\xd0\x87\xd5\xf2\xb5\x4f\xec\x4b\x21\xf0\xf8\x13\xf6\xec\x28\x5a\x5d\xa3\x32\xec\xdc\xa9\xe1\x9e\x4a\x65\x36\x4c\xd6\xea\x35\xed\xe2\x91\x6a\x33\xe0\x92\xdd\xdd\x7a\xf3\x7e\x97\xb6\xfb\xa4\x57\xbb\x84\x57\xb7\x7a\x33\xd8\xc2\x2f\xfe\x6a\x1e\x0e\x2f\x2e\xd7\xb3\x49\x9d\x48\x89\x1c\xc1\xf4\x5e\x69\x86\xee\xe5\x12\xc8\xb0\x4f\xb6\x4e\x45\x34\x05\xd7\x11\xa4\xc6\x39\xed\xba\x8d\x75\x23\xc9\xc0\x2b\x00\x8d\x30\xc9\x6c\x79\x01\x83\xa4\x65\xec\x47\xa3\x69\x6b\x63\xf6\x1c\x65\xf3\xe5\xc2\xf5\xa6\xb2\xad\x49\x03\x9e\xbe\x2d\xc3\x8f\x76\x5b\x86\xc7\x4e\x5b\xd6\x31\x43\x96\x22\xd9\x6d\x76\xbe\x9a\x76\xba\x9e\x00\xfc\x1f\xc1\xb0\xff\x28\x25\x33\x44\x5a\xc7\x52\x89\x6f\x18\x66\xeb\x5d\x63\x76\x02\x70\x86\xa9\x5e\x58\x97\xc9\x89\x85\x4c\xe3\x42\x57\x1d\xff\x19\x75\x83\xab\x6d\x7c\xe0\x4a\x99\x7c\x8d\xfe\x4d\xf9\xd6\x24\x76\xbb\xa9\x02\x70\x67\x7d\xb9\x49\x8f\xad\xfb\x66\x7a\xbb\x65\xd4\xd6\x98\x8f\x6f\xa7\x34\xdc\x66\xbf\xcb\xd1\xd7\x7f\x40\xb3\xaa\xba\x58\xdf\x3f\x3a\x3a\xaf\x66\xeb\xc3\x94\x1f\x5d\x56\x05\xfd\x65\x8d\x3e\x90\x43\x7c\x48\x50\x7a\x8d\xfe\xe7\x39\xab\x66\x25\x5b\x0b\x8b\x69\x37\xc8\xc0\xae\x10\xb9\xd9\xe3\xe8\x08\x7d\xcf\x2b\x79\x1c\x8e\x73\x21\xee\x92\xa5\x73\xbe\x46\x7f\x55\x94\xfe\x7a\xe7\x2b\xd8\xc6\xbf\xe2\xfc\x51\xb3\xff\x65\xb0\x93\x06\xdd\x93\xca\xbb\x87\xee\xde\xad\x7f\x7e\x60\x47\x8f\xfe\x2a\xbb\xa3\x21\x7f\x06\x3f\xb4\xb8\xcf\xd5\xf7\x2e\x6a\xf5\xeb\xdd\xbb\x86\xfd\x39\xc7\x1d\x26\x1b\x60\x27\x1b\x67\xb0\x73\xe6\xaf\x53\xb9\x1b\xff\xa7\x65\xce\x0f\x7f\x59\xa3\xe5\x0a\x7d\x27\xb7\xd2\x94\x45\xc9\x73\x94\x2d\x73\x3e\x05\x2c\x6c\x91\xa3\xcb\x35\x47\x65\x25\xc6\xb5\xbf\x0a\x39\x6a\x7d\x50\xfb\x70\x9a\x3e\x9c\xa9\xef\xdd\x3e\xc8\x5f\x1f\xc8\x3d\x49\x6d\xb3\xc3\x06\xfa\x58\x47\xf6\xeb\xaf\xda\xb7\xc3\xab\x72\x91\x8b\xb7\xcb\x0e\x8c\xdc\x3a\x24\x78\x41\xfa\xcf\xb0\xd9\xe7\xce\x57\x47\x5f\x1f\xdc\xda\xdf\xd7\x47\x77\x64\x6f\xd7\xd5\xaa\x5c\x9c\x3d\x5e\x2d\xcf\x4f\x66\x6c\x75\xb2\xcc\x85\xe6\x5e\xc1\x8f\x87\x85\xf6\xab\x12\xfe\x29\x7b\xcf\x17\x52\xc6\x7d\x93\xbd\xb8\x5c\x5c\x0b\xf9\xde\xf9\xaa\x89\x60\x97\xd9\x9a\xe4\x5c\xfc\x38\x91\x74\x64\x07\x61\x69\x13\x36\xdf\xd7\x43\x20\xfc\x94\x2d\x2f\x17\x15\x5f\xa9\x99\x4b\xf8\x69\x5e\xc7\x0a\xd9\xbc\x0d\x16\xf0\x14\xce\x33\xd6\x5f\xf8\xa6\x5a\x31\xf1\xe5\x6a\x56\xce\x39\x9a\xd4\xd8\x1e\x2a\x24\x92\xf4\x57\xd0\xa6\x45\x98\xa9\xee\x7d\x5b\xd5\x0d\xf6\xf7\x85\xab\x7f\x05\x3a\x95\xc0\xdf\x1c\x23\x6f\xf3\x3d\xf5\x3c\xa1\x73\xf9\xd3\x43\xf8\xe9\xbb\xc7\x8f\xc5\x4f\x16\x4a\x42\x5c\xf0\xba\xbe\xbe\x5c\xad\x96\x67\xac\xe2\x53\xb0\xba\x6a\xc6\x57\x1c\xce\x79\xa2\x05\xdf\x54\x48\xb0\xc0\xb2\x8a\xaf\xa0\x11\x74\x63\x1b\xfe\x80\xc1\x89\x04\xbf\x8b\xbc\xcd\xe3\x13\xcf\xdb\x13\x16\xea\x6d\xbe\x87\x8f\x7f\x17\xc1\x79\xbe\xbc\x6a\xe9\x43\xb3\xaf\xa4\xe4\xe5\x50\x3e\x51\x5d\x14\x08\xfc\xc7\x8f\xf7\xe0\x68\xa6\xb7\x87\xf6\x91\x86\x19\x1e\xec\xd7\x15\x87\x14\xf5\x36\x0b\x56\x5d\xbd\x5c\x9c\xb3\x2a\x9b\xf1\xbc\xa5\xf7\x00\x2d\x17\xf3\x6b\xc4\x2e\x2e\x38\xf4\xbb\x5c\x83\x03\xa2\xcb\x45\x59\x4d\xc5\x8b\x66\xc6\xd6\x1c\xde\x36\x85\x20\x1a\x4c\x0d\x8c\x10\x52\x55\xef\x8b\x6a\xb0\x8a\xa1\x9e\x69\x5f\x2f\x58\xb9\x1a\xf6\x0c\xfa\xa5\x78\xfd\x4a\x89\xee\xe0\x40\xf1\x7e\xa7\xdf\x01\x4b\x4b\x01\x28\xfe\xaf\xe2\xbd\x84\xaa\xbd\xf1\x26\xce\xc0\x17\xe0\x0c\x30\x0a\xb7\xbe\xd0\x58\xb9\xcc\x5b\xba\x46\x5e\x2e\x72\xbe\x41\xc7\xe8\x00\x1b\xcd\xbe\xf1\xa3\x7b\xf7\x34\xe3\xdf\xdf\x97\xcd\x2c\xc6\x0f\x74\xde\x00\xc8\xdb\xbe\xb1\x0b\x53\x7a\x2c\x34\x2e\x25\x23\x7f\x3d\x38\xae\xd5\xff\x40\x93\x17\xda\x3f\x36\xc4\x8f\x1a\xd1\x37\xdf\x20\xec\xd5\x06\x84\x7e\x55\x3e\xa4\x54\x52\x73\x22\x8d\x15\xfd\x8a\x3a\x76\xd8\x08\x7f\x0b\x42\x80\xd0\xa6\xa4\x46\xf8\xd9\x8c\x67\xef\x5f\x65\x6c\xce\x56\xff\x5b\xb4\x9a\x08\x3d\xbc\x58\x96\x0b\xb9\x9b\x1a\x04\xd0\xfc\xd4\xf5\xf8\xf6\x67\xe9\xf5\xad\x70\xaa\xd9\x6a\x79\x85\x1e\xad\x56\xcb\xd5\x04\x7a\x75\xef\xa9\x48\x85\x5a\xd3\xfc\xf3\xfe\x3d\xb4\xdf\x22\x38\xac\x96\x32\xb2\x4e\x70\xb4\x77\x58\x2d\xff\x7c\x71\xc1\x57\x27\x6c\xcd\x27\x7b\x68\x5f\x22\x10\x26\xbf\x58\x56\xc2\xc0\x81\x59\x29\x97\x7b\xe2\x61\xdd\xd1\x8f\x9f\x61\x24\x68\xe5\x04\x59\xb5\xc8\xc4\x5b\x71\x4c\xe5\x32\x9b\x1a\x9c\xa4\x94\x0d\xda\x98\xe8\x02\xfc\xa6\x6e\x23\x35\x0a\x53\x95\x1b\xea\xed\xf5\xf5\x22\x1d\xe2\xa4\x6e\x68\x52\x8b\x86\xf6\xae\x32\xce\xc7\x8f\xa9\x8a\x75\x2a\xcc\xe1\x83\xf4\xba\xe2\x68\xcd\xff\xeb\x92\x2f\x32\x08\x74\x76\x46\x5b\x1a\xb5\xe9\xc0\x40\x78\x7d\x9e\x2e\xe7\x8d\x23\xd9\x28\x53\xaf\x4b\x99\x0c\x29\x37\x98\xc6\x85\x14\x49\x01\x61\x25\xa0\x13\xaf\x61\xa9\xd9\x78\x6c\x60\x02\xc2\xb0\xce\x84\x3f\x64\xc2\x61\xf0\x0f\x76\x64\x12\x13\xc9\xa5\xa7\xb8\x7c\xe4\x75\x50\xec\x1f\x5b\xac\x26\xda\xa2\x33\x8f\xbc\x41\x67\x82\x4f\x92\x28\xa6\x8a\xd9\x58\x32\xfb\x78\x4b\x66\x31\xd9\xb5\x53\x2d\xa4\x89\xab\x6e\x47\xbb\x1e\xd0\xd8\x26\x60\xe8\xbb\x84\x48\xfd\xd5\x38\xd1\x4f\x9a\x1a\xa4\x22\x75\x1f\x26\x57\x83\xac\xa9\x85\x1f\x1d\x54\x1a\xd0\xfa\x07\xa1\x04\x19\xad\xb6\x1c\x5c\xda\x1e\xeb\x84\xf5\x51\x46\x43\xb9\x7f\xec\x70\xfd\x5e\x44\x6f\x9b\x7d\xae\x44\xb8\x91\xfd\x8a\xb3\xfc\x64\xb9\xa8\xca\xc5\x25\x1c\x9e\x05\xed\xb7\xa1\x48\x70\xf2\x03\xf4\xfd\x9b\x63\x60\xeb\x44\x24\x16\x86\xd1\xe0\xde\x0f\x8b\x0f\x6c\x5e\xe6\x00\x24\xa5\x7d\x4f\x75\xab\x91\x77\x97\x0a\x92\x08\x61\xa2\xe0\x4d\x43\xe7\xad\x72\x13\xd1\xb4\xf9\x71\x7f\x5f\x24\xe3\x75\x84\xea\xa1\xb9\x2b\xc3\x88\x4c\x04\x45\x94\xfc\xbb\x16\x0c\x8d\xd0\xfe\xe3\x86\xb1\xa3\x23\xf4\x43\x81\xae\x38\x12\xf9\xda\xe5\x05\x12\x99\xea\x14\x95\xd5\xff\xfb\x3f\xff\xb7\x1e\x96\x74\x14\xc0\xf1\x1d\x4b\xcf\x07\x80\xf7\x06\xc1\x5f\x5a\xef\x2b\xf0\x82\x49\x6b\xe5\x02\x18\xeb\x66\x48\xf4\x2f\xbe\xfe\x25\x30\x98\xef\x50\x57\x9f\xa0\xaa\x2e\xa6\xe3\xa1\xd6\x95\x64\x0b\x36\x87\xc3\x0f\x8d\x1c\x5f\x72\x96\xa3\xa2\x5c\xad\xab\x5a\x4a\xd0\xad\xdd\xd5\x3c\x1c\xdd\xd0\x64\xb1\x1c\x8a\x77\xbd\x57\xdb\x84\x24\x74\x57\xe9\x5f\x45\x56\x8d\xd7\x46\xbe\x35\xaf\xc3\x31\xac\x87\xe7\x51\x6d\x50\x27\x35\x2a\x50\x0b\x3a\xb6\x38\xcc\x83\x7e\x3c\xd0\x91\x61\xf9\x9a\x01\x35\x77\x1a\xed\x9a\x12\xb0\xc6\x7a\x5b\xf3\xd5\x62\x54\x37\x81\xdf\xc1\x04\xeb\xb4\x5e\xf6\xdd\xef\xcb\xf6\x9c\x5d\xa3\x72\x91\xcd\x2f\xe1\x25\x44\xbc\x5c\xe8\xaf\x34\x26\x29\x3f\xae\xa5\xf3\x68\x07\xe9\x80\x29\xdf\x4c\x80\x9e\x7a\x4f\x23\xb0\x37\x49\xd2\xd2\x05\xea\xdb\x04\xea\x41\xf2\x22\x05\x36\x96\x1f\x7c\x4e\x99\x0f\x47\xf8\xbe\x44\xa9\x92\xe8\xe3\xdb\x95\x28\x84\x8c\x1b\x0a\x3d\x06\xa1\x7b\x9b\xbe\xd8\xbd\x8d\x77\xb2\x87\x7e\x05\x89\x4c\x24\x0f\xf2\xd7\x46\x1f\x81\x55\x1f\xf0\x46\x65\x78\xc7\xc0\x9e\xfe\x0a\x66\xd6\x44\x2d\x4f\xa3\x16\xfe\x7c\xfa\xf8\x80\xa2\x1c\x66\xca\x78\xde\x44\xde\x3a\x6c\xaa\x13\x58\xcd\x77\x08\x68\xda\x77\x88\x3f\x0f\x7a\x39\x89\xca\x35\xda\xd1\x58\xf2\xd7\xe0\xeb\xa6\x24\x1a\x58\x1d\xd5\x80\x8a\x1e\x00\xb5\xa4\x44\x8b\xb1\xed\xec\x4f\x27\xdd\x69\xe7\x89\xaa\xf3\x0b\x2d\x1b\x99\x54\xe7\x17\xe8\xb8\x37\x96\xec\xa1\x3f\x1c\x1f\xcb\xa0\xdc\xcf\x4e\xd4\x22\x46\x75\x7e\xd1\xcf\x33\xb4\x17\xf4\x16\x7a\xef\x73\x4e\xbe\x09\xb1\xa2\x63\x60\xf0\xde\x07\xbe\x5a\x97\xcb\xc5\xbd\xfb\xe8\x1e\x4c\xfa\xde\x9b\x8a\x5f\x25\x3f\xf7\xee\x6b\x59\x21\xfc\x2e\xbb\xab\x7e\x97\x5f\xee\x7c\xf5\x51\x4d\xd2\xbd\x5a\x9e\x73\xf4\xed\xb3\xef\x51\x7a\x59\xce\x73\xb4\xbc\xa8\xca\xf3\xf2\x6f\x7c\xb5\x9e\xa2\x79\xf9\x9e\xa3\xd5\xe1\x2f\xeb\xa9\x7c\x25\x86\x99\xf6\xf5\x05\xcf\xca\xa2\xcc\x84\xf3\xe6\x25\x28\xfc\x82\x55\x15\x5f\x2d\xd6\x80\x0f\x1a\x55\x33\x8e\x8a\xe5\x7c\xbe\xbc\x2a\x17\x67\xf7\xe5\x9c\xa7\x30\xbf\xde\xb9\x48\x74\xaf\x36\x9a\x7b\x72\x72\xb7\x03\x70\xc8\xce\xf3\xde\x2c\x6a\x73\x44\x52\x3c\xbb\xf3\x95\x54\x97\x3a\x34\xd9\x4c\x73\x77\x07\x30\xd1\x67\xd0\x1d\x28\xa7\x7d\xbb\xe8\xcd\x1a\xff\x41\xfb\x7e\xb8\x58\xe6\xfc\xf4\xfa\x82\xb7\xc9\x5c\x3b\x57\xad\x5e\x3c\xca\x85\x3e\x6f\xfc\xb2\x5c\x9c\x2d\xff\xd7\x2b\xf4\xc1\x3b\xa4\x87\x1e\xbc\x9e\xb7\x2d\xb4\xb3\xa4\x0d\x33\x2a\x34\xd6\x98\xd8\xea\x6a\xc6\xe6\x3d\x4c\xf1\xa1\x77\x20\x27\x62\x56\xf5\xde\x28\x79\x8a\x51\xfd\x36\x63\xeb\xe7\x57\x8b\x17\xf5\x16\x98\x63\x05\x74\xd8\xfd\x1d\xc0\x9b\x25\x12\xa8\x1a\x27\x85\x52\x47\x8c\x2e\xb8\x5c\x1f\x12\xcf\xe1\x20\xf1\x9e\x90\x8d\x2e\xab\x37\xef\x65\x01\x43\x01\x01\x9f\x3b\x93\x5f\xbd\x7e\xbd\x9c\x95\x8b\xa5\xe8\x15\x43\x57\x3c\x45\xea\xa0\xaa\x9a\xb5\x3e\x54\x06\xad\x64\xf2\xf1\x8e\x3a\xa2\x0a\xcb\x26\x1f\xa7\x7f\xff\xf8\x76\x4a\xa3\x6d\x96\x44\x06\x27\x76\x5f\x3f\x7b\xfa\xa4\xaa\x2e\x5e\x8a\x21\x63\x5d\x35\xd8\xfe\x98\x96\x67\x72\x33\xcb\xe1\x2f\xeb\x3f\x6e\x83\xf9\xde\xe5\x9a\xc3\x0b\x5b\x56\xdd\x7b\x70\x67\x48\xe8\xbb\xf2\xec\x27\x40\xf8\x40\x74\xf8\x97\xf5\x4c\x04\xe5\xf2\x6c\xb1\x5c\xf1\xfb\xf3\x72\xc1\xef\x34\xa4\xaf\x78\xea\x6f\x45\x52\x28\xe9\x67\x9e\xca\xb1\x49\x1e\x33\xbe\x77\x78\x34\x2f\xd3\x23\x81\x42\x04\xe7\x3b\x47\x47\x28\x5f\x2e\xee\x55\x68\xf9\x81\xaf\x56\x65\xce\xeb\x15\x87\x7a\x81\xe3\x8e\x76\x06\x59\x2d\x1d\x88\x08\x77\xaf\xd9\xd1\x00\x0b\x12\x1d\x80\x43\x49\xb3\x0b\x25\x2c\x04\xd6\xc9\x74\x10\xe0\xee\xc1\x9d\x8f\x06\x71\xc8\x27\x6a\x65\xab\x66\xf9\x8f\xf7\x09\xf9\xf8\x56\x88\x61\xfa\x46\x8a\xe1\xed\xde\x9d\x3b\xff\x3f\x00\x00\xff\xff\x30\x1c\x67\x1d\x64\x21\x06\x00") +var _web3Js = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xfd\x6b\x7b\x13\x39\xd2\x38\x0e\xbf\xcf\xa7\x50\xfc\xdc\x0f\xb6\x89\xb1\x73\x60\x18\xc6\x99\x0c\x1b\x02\x33\x64\x6f\x20\x5c\x40\x76\x76\xef\x6c\x96\xab\xed\x96\xed\x1e\xda\xdd\xfe\x75\xb7\x73\x18\x92\xef\xfe\xbf\x54\x3a\x95\x0e\x7d\x70\x12\xe6\xb4\xc9\x0b\x70\x4b\xa5\x53\xa9\x54\x2a\x95\x4a\x55\x19\xfd\x7f\xcb\x28\xa3\x7b\x9d\xc9\x32\x19\x17\x51\x9a\x10\xda\x29\x7a\x49\x2f\xeb\x7e\x51\x29\x79\x27\xed\x2d\xbb\x5f\xa2\x49\x67\x3d\x39\x49\x4f\xf9\xaf\x02\x7e\x9d\x05\x19\x09\xf6\x8a\xcb\x05\x4d\x27\x44\xd6\xb5\xd7\x92\x45\x5b\x0f\x1e\x88\xc4\x5d\x56\x66\xf9\xe0\x41\xd0\xcd\x68\xb1\xcc\x12\x12\x74\xd2\xde\xfa\x66\x97\xa5\x47\x32\x2d\x12\x69\xac\xd6\xc9\x5e\x42\xcf\xc9\xcb\x2c\x4b\xb3\x4e\xeb\x20\x48\x92\xb4\x20\x93\x28\x09\xc9\x3c\x0d\x97\x31\x25\xed\xd6\x46\xba\xd1\x6a\xb7\xba\xbb\xc5\x2c\x4b\xcf\xc9\xa4\x3f\x4e\x43\xba\xd7\x7a\x73\xf4\xe2\xf8\xf5\xcb\x4f\x6f\x8f\x3e\x7e\xfa\xf1\xe8\xf8\xed\x8b\x56\x6f\x72\xcd\xea\x8b\xf7\x58\xdf\xf7\xbe\xd0\x8b\x45\x9a\x15\xf9\xf0\xcb\xf5\xf5\x2e\x1b\xc3\xc9\xe6\x69\x7f\x1c\xc4\x71\x27\xee\x8b\xac\x9e\xec\x7d\x87\xf2\x01\x26\x7b\x00\xb8\x75\x7a\x42\x4f\x77\x45\x57\xf3\x4e\xf2\x2c\x19\xd2\xee\x75\x2f\xee\xe9\x92\xb4\xc7\x71\x77\x2d\xa0\x58\x93\x32\x13\x7a\x11\x35\xc2\xd5\x24\xcd\x3a\x0c\x3a\xdd\xdb\xdc\x4d\xbf\xcf\xfa\x31\x4d\xa6\xc5\x6c\x37\xdd\xd8\xe8\xe6\x9d\x8c\x21\x5e\x75\xe3\xba\xdb\xf9\xb2\x35\x3c\x51\x5d\x16\x55\xf4\x38\x96\x7a\xa2\xed\xee\x97\x35\x9e\x20\x3b\xb3\x77\xb2\x46\xc8\x97\x35\x42\x08\x69\x8d\xd3\x24\x2f\x82\xa4\x68\x0d\x49\x91\x2d\x69\x8f\xa7\x46\xc9\x62\x59\xe4\xad\x21\x39\x81\x6f\x09\x0d\x79\x49\x30\xa7\xad\x21\x69\x7d\x4a\xcf\x13\x9a\xb5\x7a\x3a\x87\x8d\x8e\xe5\x04\x61\x98\xd1\x3c\x6f\x89\x9c\x6b\xf8\xff\x54\x54\x2d\x8b\xc3\xff\x22\x2d\x5d\x16\xf5\xed\xa5\x9f\x50\x11\xa3\xbd\xd1\x65\x41\xf3\x9d\x6d\x7f\x7b\x12\x48\x61\x7a\x8d\x90\xeb\xde\x9d\x20\xe0\x46\xfd\x51\xc3\x41\xd8\x6b\x86\x80\x95\x51\xfd\x47\x1d\xfa\x38\x4d\x0a\x9a\x14\xb7\x1e\xfc\x9f\x72\xde\xd9\x8c\xfd\x61\xa6\x7d\x12\xc4\xf9\x6f\x37\xf4\x8c\xe6\x34\x3b\xf3\xad\xfa\x3f\xfa\xa4\xe5\xcb\xd1\x7b\x3a\x8d\xf2\x22\x0b\xfe\x0b\x26\xaf\x57\x55\x07\x3d\x3f\xba\x15\xdf\x2f\xb2\x20\xc9\x27\x5e\xd6\xf7\x67\xc1\x41\x66\x91\xc2\xea\x48\xc8\x69\xf1\xa1\x9a\xa4\xee\x0c\x17\x76\xd3\xbf\x49\xa3\x5f\x79\x02\x82\x26\x88\xaf\xaa\x60\x91\x45\xf3\x20\xbb\xf4\xf6\x23\x4d\xe3\xda\xc9\xdb\x17\x6d\xfd\x79\x51\x68\xee\xc1\x95\xd5\x94\x21\xe1\xa0\x74\x1b\xff\x23\x21\xc1\xdb\xfb\x30\xca\xd3\xf3\xe4\x16\x3d\x0f\x92\x34\xb9\x9c\xa7\xcb\x7c\x85\xae\x47\x49\x48\x2f\x68\x68\xec\x5d\x77\x36\xb1\xba\x72\xd4\x1d\xb3\xf6\xf3\x28\xb9\x0d\xe3\xde\x5f\x02\x26\x5e\x26\x21\x0d\x5b\x16\x9a\xe8\x19\x23\x84\xbf\x00\x8e\x46\x51\x18\x36\xc3\xd1\xcd\xea\x3f\x0b\xe2\xa5\xb7\xfb\xcb\x28\x29\xb6\xbf\x79\x52\x3d\x05\x6f\xe9\xf9\xf3\xe8\x77\x44\xfe\xad\xd6\xdc\xc1\x2c\x48\xa6\xbf\x27\xe9\xdc\x09\xe5\x94\xd4\x8d\xa4\xfa\x4a\xaa\xf1\x62\xe6\x1d\xdf\x8d\x6a\x11\xb4\x76\xba\xb6\x76\xdd\xfb\x72\x7d\xda\xdb\xfe\xdd\x0e\xfd\x7f\xa1\x33\xef\xef\x24\x3b\x4e\x96\x49\x78\x63\x52\xb9\xf5\xc6\x75\x7f\xec\xfd\x73\x1f\x7b\xef\x0f\x7d\x7f\xe4\x33\x87\x77\xf0\xe2\xbc\xf0\x47\x93\x36\xbf\xee\x66\xae\xf7\xaa\x9d\x3b\xdb\xab\x56\x9d\xf7\x49\x96\xce\x6f\x39\xed\x45\x7a\xcb\xa3\xe6\xed\x04\xbe\xdf\x77\xdd\xfc\x11\xf0\x17\x25\x61\x94\xd1\x71\x71\xe8\xdd\x33\x57\xe8\xc9\xed\x26\x22\x1a\x07\x8b\x8f\xbf\xeb\x64\xf8\x31\xd9\xec\xb4\x4b\x17\x69\x1e\x55\x1d\xd4\x17\xc1\x65\x30\x8a\xa9\x29\x14\xfc\x2e\x5c\xa9\x8c\xe6\xee\xe4\xf8\x75\x3b\x1a\xd8\x97\xe3\x7d\x61\xe2\xf3\xb7\x3f\xc9\xdc\x09\x92\x4a\xea\x6e\x46\x67\xbf\x03\xfa\xff\xb0\x58\xbf\x8b\xf3\xe3\x8d\xf9\xe4\xd7\xc6\xba\xcd\xf4\xee\xd1\xde\x10\xed\xb7\xde\xb8\xbe\xf6\xcc\x1e\x7a\xb6\xb4\x2a\x39\xee\x71\x13\x39\x0e\x8c\x37\xc8\x9e\xb4\x70\xe8\xb4\xfb\x83\x49\x9a\xcd\x83\xa2\xa0\x59\xde\xee\xee\x02\xc0\x87\x34\x8e\xc2\xa8\xb8\xfc\x78\xb9\xa0\x26\x2c\x6b\x9f\x41\xad\x0d\x1e\x3e\x5c\x23\x0f\x0d\x48\xa1\x73\x27\x51\x4e\x02\xb2\xc8\xd2\x94\x01\x93\x62\x16\x14\x24\xa3\x0b\x76\xc8\x4a\x8a\x9c\x88\xb9\x23\x2c\x93\xd5\x70\x58\x90\x79\x50\x8c\x67\x34\x1f\xb2\x4f\x91\x8d\x7e\x9e\x9c\xe2\x8f\xc7\xc6\xd7\xa9\x99\xb9\x63\x7d\x9f\x9e\x3c\x39\x3d\x39\xed\x91\x7e\xbf\xbf\x46\x1e\x0e\x9c\xb1\xc9\x1e\xef\x11\x65\x4d\xd3\xe9\x8a\x29\x2e\x66\x51\xde\xff\x04\x0b\xe3\x47\x89\x20\x06\xd8\xe7\xe8\x3a\x64\x19\x87\x49\xb1\x8b\x80\xf9\xbe\xed\x83\x3e\x82\x1c\xd1\xdc\xee\xda\xf5\xee\xda\x9a\xa7\x1f\xfd\x45\x96\x16\x1c\x6b\x7b\x24\xa1\xe7\x46\x5f\x3b\x5f\xae\xbb\xbb\xd5\xa5\xfa\x20\xbd\x64\xcb\x71\x91\xb2\xc6\x3d\xb0\x75\xed\xf6\xa3\x5c\xcc\xb9\x46\x08\x23\x47\x89\x14\x61\xd7\xb2\xbe\xce\x12\xfb\x30\x6f\x9d\x81\xc0\x76\xe7\xdf\x27\x9d\x93\xcd\x47\xdf\x9d\x3e\xec\xfe\xfb\xb4\xfb\x6c\xd0\xe5\xe3\x34\x0f\x0e\xa5\xdd\xba\xee\x7d\x69\x61\x52\x6c\x0d\xbf\xeb\xb5\x38\xbd\xb5\x86\x5b\x8f\xaf\x4f\x7b\xdf\xfc\xce\xe4\xfd\x3c\x4d\xe3\x1a\xda\x1e\x31\x90\x12\xc2\x66\x79\xf2\x7f\x4e\xa5\xf0\xeb\xb1\xfe\x79\x8a\x92\x77\xf0\x47\x1d\x19\x43\xcf\x6e\x4a\xc3\xac\xf0\x2a\x44\xcc\xe1\x6d\x0a\x66\xa9\x2b\x92\xaf\x59\xa4\x82\x76\x79\x8b\x55\x65\x6f\x42\xb5\xff\x61\xa8\x35\x69\xf6\xe1\xff\x34\x22\x5a\xd1\x9f\x7a\x8a\x7d\xf2\x7b\x53\x2c\xdb\xc3\x14\xc9\x16\x7e\x9a\x2d\x66\x94\xc0\x66\x07\x84\xdb\xf7\x51\x2e\xcb\x55\x3f\x04\x5d\xc2\xcf\xc7\xe8\xf7\x29\xce\xd8\x31\xbe\x4c\xfa\x25\x62\x6b\x55\x3f\x9f\x1a\xf5\x88\xa2\x1e\x2a\x87\x4e\xde\x98\xcc\x59\xe9\x95\xe8\x9c\x17\x70\x08\x9d\x25\xaf\x4a\xe9\x66\x99\x2a\x52\xe7\x8d\x56\x96\xbe\x19\xb1\xb3\x4a\x38\xa9\x7f\xd9\xea\x5d\x77\x6f\x46\xf8\xa2\x77\xf5\x94\xff\x6d\x13\xca\x1f\x3c\x84\x0e\x7f\x9c\x45\x39\x99\x44\x31\x65\x94\xba\x08\xb2\x82\xa4\x13\x72\x4e\x47\x3b\xfd\x5f\xf2\xfe\x1a\x80\x88\x2f\x06\x30\xc9\x28\x25\x79\x3a\x29\xce\x83\x8c\x0e\xc9\x65\xba\x24\xe3\x20\x21\x19\x0d\xa3\xbc\xc8\xa2\xd1\xb2\xa0\x24\x2a\x48\x90\x84\x83\x34\x23\xf3\x34\x8c\x26\x97\x50\x47\x54\x90\x65\x12\xd2\x0c\x08\xbe\xa0\xd9\x3c\x67\xed\xb0\x8f\x9f\xde\x1e\x93\xd7\x34\xcf\x69\x46\x7e\xa2\x09\xcd\x82\x98\xbc\x5b\x8e\xe2\x68\x4c\x5e\x47\x63\x9a\xe4\x94\x04\x39\x59\xb0\x94\x7c\x46\x43\x32\xba\x14\x54\x44\xc9\x8f\xac\x33\x1f\x44\x67\xc8\x8f\xe9\x32\x09\x03\x36\xe6\x1e\xa1\x51\x31\xa3\x19\x39\xa3\x59\xce\x66\x68\x47\xb6\x25\x6a\xec\x91\x34\x83\x5a\x3a\x41\xc1\xc6\x90\x91\x74\xc1\x0a\x76\x49\x90\x5c\x92\x38\x28\x74\x59\x17\x05\x7a\xa4\x21\x89\x12\xa8\x76\x96\xca\x95\x1d\x15\xe4\x3c\x8a\x63\x32\xa2\x64\x99\xd3\xc9\x32\xe6\x82\xe3\x68\x59\x90\x9f\x0f\x3f\xbe\x3a\x3a\xfe\x48\xf6\xdf\xfe\x8b\xfc\xbc\xff\xfe\xfd\xfe\xdb\x8f\xff\xda\x25\xe7\x51\x31\x4b\x97\x05\x61\x12\x25\xd4\x15\xcd\x17\x71\x44\x43\x72\x1e\x64\x59\x90\x14\x97\x24\x9d\x40\x15\x6f\x5e\xbe\x3f\x78\xb5\xff\xf6\xe3\xfe\xf3\xc3\xd7\x87\x1f\xff\x45\xd2\x8c\xfc\x78\xf8\xf1\xed\xcb\x0f\x1f\xc8\x8f\x47\xef\xc9\x3e\x79\xb7\xff\xfe\xe3\xe1\xc1\xf1\xeb\xfd\xf7\xe4\xdd\xf1\xfb\x77\x47\x1f\x5e\xf6\x09\xf9\x40\x59\xc7\x28\xd4\x50\x8f\xe8\x09\xcc\x59\x46\x49\x48\x8b\x20\x8a\xe5\xfc\xff\x2b\x5d\x92\x7c\x96\x2e\xe3\x90\xcc\x82\x33\x4a\x32\x3a\xa6\xd1\x19\x0d\x49\x40\xc6\xe9\xe2\xb2\xf1\x44\x42\x65\x41\x9c\x26\x53\x18\xb6\xa2\x32\x42\x0e\x27\x24\x49\x8b\x1e\xc9\x29\x25\xdf\xcf\x8a\x62\x31\x1c\x0c\xce\xcf\xcf\xfb\xd3\x64\xd9\x4f\xb3\xe9\x20\xe6\x15\xe4\x83\x1f\xfa\x6b\x0f\x07\x92\xd9\xfe\x0d\xc8\x76\x9c\x86\x34\xeb\xff\x02\x2c\xf2\x6f\xc1\xb2\x98\xa5\x19\x79\x13\x64\xf4\x33\xf9\xdf\xb4\xa0\xe7\xd1\xf8\x57\xf2\xfd\x9c\x7d\xff\x8d\x16\xb3\x90\x9e\xf5\xc7\xe9\xfc\x07\x00\x0e\x83\x82\x92\xed\xcd\xad\x6f\x80\xe1\xd5\x6f\x05\x15\x02\x2c\x2a\x23\xe4\x31\xdf\xde\x21\x24\x05\x04\xcc\x76\x41\x1f\xe4\x61\x52\x98\x80\x51\x52\xf8\xe0\x8e\x1d\xc0\x65\x09\xe4\x8b\xcb\x24\x98\x47\x63\xc9\xc6\x51\x89\x90\xe7\x00\x8f\xf2\x95\xfc\x50\x64\x51\x32\x35\xcb\xe4\x90\xe6\x83\x7e\x4f\x03\x6b\x8c\x19\x0d\xbc\x63\x3c\x76\x41\x97\x65\xb0\x9e\x6e\xab\xfe\x02\x70\x94\x8b\x01\x1a\x9c\x39\x47\x55\xf4\x60\x87\x15\x7c\x5a\x5a\x88\xa3\xfc\xbe\xaa\x02\xb6\x11\x0e\x7c\x75\xa5\x4e\x8f\xa4\x04\x7a\x3f\xcb\x82\x4b\x0e\xce\x99\xb8\x25\x0a\x1c\x30\xfa\x44\x12\x80\x58\x49\x9c\x43\x84\xa4\x48\x09\x4d\x18\x0d\x0f\x42\xca\xfe\x53\xad\x30\x66\x1c\x70\x36\xc9\xb8\x92\x90\x6b\xcd\x8d\x99\xd7\x8d\x47\xcc\xc0\x72\x73\x67\x86\x24\xb2\x07\x35\xe4\x46\x17\x81\xf7\xcf\x69\x31\x4b\x43\x4f\xb7\xb8\x72\x3d\xcd\xe6\x84\x4b\x2e\xa9\x31\x23\x6b\x84\xaf\x41\x51\xfc\x93\x98\x19\x91\x45\xfe\x06\xbd\x27\x5f\x38\xf1\x5c\x2b\xb1\xfc\x6f\x1c\xf3\x39\xf9\x82\x2b\xbb\x86\x2c\x78\xab\x90\x93\x2f\xf0\xae\xe1\x9a\x88\xcf\x88\xf1\x06\x2e\x11\x31\x32\x84\xbe\xb0\x9d\x88\xb1\x7b\x40\x88\x81\x0c\xb4\x53\xe3\x2e\x39\x38\x92\x28\x62\xd8\xcc\x4d\xf1\x0e\x61\xad\x3f\x89\xe2\x82\x66\x1d\x54\xb6\x8b\x74\x10\x82\x8a\x0a\x21\x14\x48\x22\x00\x9d\x42\xf7\x64\xf3\x74\x97\xf3\xcf\x68\x42\x3a\xeb\xb8\x11\x5c\x07\x7f\xa0\xc1\x9f\x72\xb4\xa3\xe4\x2c\x88\xa3\x50\xd3\x00\xab\x71\x7d\x48\xda\x64\x83\xe0\xca\xd7\xb0\xac\x81\x6b\x36\x29\xb0\x84\xd2\xc8\x22\x0e\xa2\x84\xd3\x97\x35\x8d\x1c\xe0\x9d\xc8\x29\x9f\x45\x91\x7e\x34\xfa\x85\x8e\x8b\x6b\xab\x42\x39\xc9\xba\x1c\xaf\x36\xb4\xe0\xca\xa7\x0e\x75\xc3\x99\xb9\x1e\x2f\x6f\x09\x5c\x30\x69\xa8\x58\xde\x39\x61\xc0\xa7\x3d\x72\x02\xe0\xa7\xdd\x66\xa8\x89\xa3\x1c\x24\x20\xbe\xf8\xca\xb1\x93\x63\x34\x00\x0b\xe0\xd8\xf1\xa5\x2f\x74\x81\x32\xc4\x38\xcd\x36\xc2\x4d\xee\x2e\x7d\x81\x9d\xbc\x8c\xbe\x73\x49\xe0\x53\x5a\xe0\x15\x98\x0b\xce\x21\x48\x96\x15\x13\x7d\x63\x25\x8c\x1a\xfa\xf3\x60\xd1\x29\xe3\xb1\xa0\x95\xf3\xac\x11\x83\x77\xf2\x9a\x3b\xbc\xa7\x27\x50\xe4\x94\xb3\x67\xf9\xa5\x56\x11\xea\x8f\xd8\xa7\x8e\x26\x93\x9c\x16\x4e\xa7\x32\x1a\x2e\xc7\x14\xf5\x2b\x18\x8f\x7b\xa4\xa6\x73\x80\x9d\x22\x28\xa2\xf1\xbb\x20\x2b\x5e\xc3\x4b\x22\xab\xe6\xbe\x9d\xdf\xf1\xf4\x53\xd6\x95\x31\xa6\x44\xc3\x0f\x6e\x95\x6f\x82\x62\xd6\x9f\xc4\x69\x9a\x75\x3a\x4e\x8b\x1b\x64\x67\xab\x4b\x06\x64\x67\xbb\x4b\x1e\x92\x9d\x6d\x31\x68\x84\xbe\x60\x3c\x26\x1b\xa4\xa3\x36\x1d\x03\xeb\x25\x28\x24\xcf\xd0\xde\x45\xc8\xce\x36\x19\x1a\x09\x25\x9d\x95\xa8\xef\x91\x4d\x8c\xfd\x8c\xe6\xcb\xb8\x90\xd4\xc3\x67\xf0\xcd\x32\x2e\xa2\x9f\xa3\x62\xc6\xe7\x44\x52\xa0\xd1\xb7\x9e\xa2\xa3\x9e\x39\x83\xb2\x72\x31\x42\x5e\xbf\x79\xe2\xf3\x93\xbe\xd5\xaa\x6f\x0d\x34\xec\x01\x5a\x23\x6a\x78\xad\xd6\xae\x5e\x38\x34\x9e\x88\x11\x8b\xce\x8a\x5d\x21\xcd\x5e\x06\xe3\x59\xc7\x66\x4c\x11\xa6\x2d\xc6\xf5\x4b\xe7\x4b\xcf\xd5\x69\x17\x17\xe2\x08\x81\xae\x6c\xb8\xda\xce\x8e\xd9\x7d\xb9\x8e\x10\x11\xaa\xb5\xcb\xa8\x98\xc6\x13\x01\x62\xcf\x11\x74\xc0\xed\x92\xc4\x13\x7c\xd8\x93\x85\x9b\x30\x97\xe2\xc6\x1e\xa1\xe2\x19\x1e\x19\x90\x6d\x0d\x7a\x4d\x68\x9c\x53\x6b\x78\x83\x01\x09\xd3\xa4\x5d\x90\x20\x0c\x89\x28\x55\xa4\x66\x95\x7d\x12\x15\xed\x9c\x04\x71\x46\x83\xf0\x92\x8c\xd3\x65\x52\xd0\xb0\x04\x4b\x5f\x69\x9c\xd7\x7a\x11\x0e\x06\xe4\xe3\xd1\x8b\xa3\x21\x99\x44\xd3\x65\x46\x09\x3b\xb0\x25\x34\x67\x27\x40\x76\x4a\xbb\xcc\x4d\x66\xf5\x5b\x10\xc9\x1f\x67\x92\xcd\xc9\xa0\x18\x81\x12\x2b\x25\xcb\x5c\xa1\x35\xa3\x93\x00\xd4\x31\xe7\xb3\x34\xa6\xbc\x87\x51\x32\x5d\xaf\x61\x04\x15\x3c\xc0\xe6\xfc\x62\xd0\x3d\x92\x3a\x2b\xdf\x58\xe4\x72\x4e\x6a\x45\x7d\xcf\x16\xd7\x71\x55\x63\x88\x80\x78\xc3\xe4\x3c\xd0\x64\x9d\xd3\xc2\x99\x53\x4e\x56\x6f\x83\x39\xb5\xf7\x21\x9d\x83\xe5\x4c\xb7\xac\x67\xf3\xa9\xde\xcf\x74\xc5\x9e\x3a\x15\x5f\x14\x18\xd4\x52\xad\xfc\xab\x18\xb6\xac\x64\x91\xd1\xb3\x28\x5d\xe6\xaa\x43\xdb\xbb\x0c\x25\x51\x42\xa2\xa4\x70\x4a\xd4\xe1\x1f\xf5\xd7\xd7\x20\xfb\x9b\xa4\x19\x81\x47\xc2\x11\xd9\x23\x5b\xbb\x24\x22\xdf\xcb\x01\xc8\xf7\xc2\x24\xda\xd8\x28\x2b\xce\xfe\xac\x3e\x6f\xec\x91\x8d\x8e\xc4\x41\x44\x1e\x91\xad\x53\x26\xe1\x93\xab\x2b\xb2\xb9\x5b\x5a\x49\x05\x2b\x17\xf4\xb0\x41\x22\xf2\xb0\x6c\xe6\x36\xec\x5e\x30\xe1\xa0\x8c\xed\xcb\xbf\x6b\x27\xd5\x4c\xb9\xee\x76\xba\xd6\x14\x0e\x06\x64\x12\x65\x79\x41\x68\x4c\xe7\x34\x29\xd8\xf9\x8a\xa3\xa9\x47\xf2\xcf\xd1\x82\x44\xc5\x2a\x53\x6e\x60\x7f\xd3\x87\x7d\x86\xbf\xca\x19\x80\xa7\xf3\x61\x18\xb1\x46\x82\x58\x2d\x72\x81\x4f\x87\xff\xb8\xf8\xf6\xf3\x45\x4d\x3a\x25\x0c\xe2\x24\x22\x1b\x64\xeb\x54\xf2\x09\xb2\x41\x9c\x6e\x78\xd0\x5e\x8b\x60\x8b\xf9\x79\x20\xc5\x56\xe9\xa1\x7d\x4e\x15\x37\x66\x3d\x7f\x68\xa6\xc2\x84\x2d\x13\x53\xb7\x5c\xfc\x35\x94\x49\xca\x18\xd2\x66\x15\x43\x22\x8d\x68\xba\x96\xa3\x0c\x06\x64\x1c\xc4\xe3\x65\x1c\x14\x54\x0a\x3e\xec\xc8\x27\xfa\x42\xa2\x82\xce\x6f\xc1\x8e\x18\x2b\x3a\xf9\x13\x31\xa5\xae\x0d\x7b\xbd\xd2\xbe\x72\xcb\x09\xf9\xfd\x18\x0c\x66\x2e\x5f\x9d\xb7\x10\x47\x5b\x24\xfa\x51\xa3\x0d\x11\xba\x48\x71\x33\x99\x56\x68\x8c\x38\x64\x63\x8d\x91\x4c\x57\xb7\x9a\x4a\x25\xe2\xd7\x25\x95\xeb\x41\x50\xc3\x1e\xf1\x0f\xea\xf7\xe9\x88\x50\x31\xad\x23\xe2\xd0\x20\xdb\x34\x41\x4b\xa5\x92\xa8\x04\x21\x65\x3a\xa2\x72\x84\x88\x12\x70\xc2\x80\xd6\x34\x62\xaa\x35\x44\x78\x88\xbe\xd3\xb1\x81\x9b\xd5\x15\x44\xb2\x14\xa7\x62\x0c\xcf\x89\x38\xf7\x9e\xc2\xad\xe3\xfe\x1d\x6b\x94\xf8\x90\x3b\x30\x32\xb9\xbe\xb4\x5a\xc4\xd0\x8b\xc8\x1a\xb5\x86\xa9\x4a\xe5\xa0\x47\x55\xab\x67\xc0\x18\xe5\x1c\x88\x95\xb9\xeb\x91\x36\x51\x47\xa9\x93\xa8\x4f\x0e\x16\x5d\x2b\x65\x92\x83\x01\xc9\x97\x73\x7e\x43\xe7\xd9\xa5\x84\x88\xa8\xe0\x45\x75\x27\xd1\x29\xe3\x8a\xea\x0b\xb6\x24\x1f\xff\x91\xcd\x9b\x88\x90\xd2\xa6\x83\x82\xc1\x80\x64\x74\x9e\x9e\xc1\x35\x26\x19\x2f\xb3\x8c\xc9\xa7\x4a\x38\x4d\x21\x59\x74\x33\xca\xa1\xe7\x9e\xde\xe6\xab\x68\xfc\x24\x32\x1b\x6b\xfe\x8c\x91\x91\x47\x4e\xfd\x8d\x29\xed\x83\xb5\x0e\x4b\xae\x75\xbc\xa7\x56\xc9\xe3\x3c\x54\x56\x58\x57\x0e\x92\xac\xd8\x0e\x86\x2f\x49\xcc\xfb\x0b\xde\x5b\xd6\xd6\x58\xdc\x32\x61\x53\x0b\xe8\x7d\x87\xdb\xab\xda\x26\x18\xe2\x5a\xb4\xd3\xed\x79\xb3\x9f\xa7\x69\x5c\x96\xc7\x84\x90\x92\xac\xe3\x8a\x3c\x7c\xb9\x59\xda\x6c\x55\x26\xe7\xc2\x65\xb9\xef\x69\x50\xda\xe3\x63\x9e\xb9\xc6\x08\xc2\xb5\xdf\x00\xd4\x29\x9b\x0d\x69\x38\x3b\x7c\xdc\x6b\xf1\xbb\xdf\xd6\xf0\x1b\xf8\xc9\xfa\xd6\x1a\x3e\x61\xbf\xf1\x75\x6c\x6b\xf8\xb4\xe7\xb3\xf5\x88\x92\xa2\x35\xdc\xda\x64\x3f\x33\x1a\xc4\xad\xe1\xd6\x36\xfb\xcd\x6f\x65\x5b\xc3\xad\x1d\xf6\xb5\xe4\x50\xd0\xc0\x52\x80\x3d\xb9\x3e\xed\x3d\xfd\x2d\xed\xa2\x6a\xae\xa1\x6f\x66\x4d\x84\x2b\x59\xc5\xa8\xc8\x2c\x67\xdb\x16\xe1\xdc\x15\x4d\x8c\xfc\x45\x2b\x2c\x8d\xcc\x9e\x34\xa9\xeb\x16\x76\x47\x25\xc6\x46\x8d\x1a\x45\x57\xe2\xde\xe9\x92\x6c\x27\x5b\xd2\x06\x26\x4c\xd6\xb0\xeb\x2d\x99\xbe\xbb\xb7\x64\xba\xb7\x64\xfa\x6f\xb1\x64\xd2\x0b\xe1\xae\xcc\x99\x9e\x47\xd3\xb7\xcb\xf9\x08\x58\xa1\xe2\xce\xa3\x68\x9a\x40\x62\xff\x17\xc5\xc9\x97\x45\x14\x9b\xf6\x35\xfd\x01\xa4\xf1\x7f\x25\xd8\xd8\x0b\x32\x4e\x93\x49\xe4\x18\x03\xc9\x93\x19\xda\x15\xe0\xec\x02\xdb\x82\x1c\x38\xe7\xd5\x39\x01\x7e\x4f\xe0\xc1\x06\x3b\x67\x31\xbe\xa5\xad\x64\x61\x29\xb0\xb9\x01\xe5\xcc\x43\x86\x63\x0e\x19\xe5\x24\xa1\xd3\xa0\x88\xce\x68\x4f\x72\x22\xb8\x38\x2a\xce\xd3\x76\x4e\xc6\xe9\x7c\x21\xa5\x55\x28\xc5\xe6\x56\x95\x9c\xc4\x69\x50\x44\xc9\x94\x2c\xd2\x28\x29\x7a\xfc\x3a\x94\x91\x7d\x98\x9e\x27\xd6\x99\xce\x54\x93\xb8\xc7\xb7\x2b\x8e\xe5\x2b\x85\xef\x6b\x39\x16\xb6\x94\x12\x4a\x43\x38\x45\x8f\xf4\x1c\x87\x7e\x63\x18\x40\xda\xb5\xb2\xf3\x31\xdb\x35\x18\x30\xd4\x2f\xb9\xb0\x6a\xb7\xcf\xe7\xa2\x33\xee\xbf\xfc\xf8\xea\xd3\xf3\xc3\x9f\xde\x1e\xbf\x79\xfe\xf2\xfd\xa7\xf7\x47\xc7\x6f\x5f\x1c\xbe\xfd\xe9\xd3\x9b\xa3\x17\x2f\xd1\x19\x4e\x69\xe2\x60\x26\xfb\x8b\x20\x7c\x4d\x27\x45\x87\x7f\x15\xe9\xc7\xf3\x34\x3f\x50\x58\x14\x6d\xf6\x8b\x54\x88\x4b\x5b\x4f\xba\x3d\xf2\xe4\xb1\x79\xc3\x83\x77\x4b\x18\x4e\x87\x37\x62\x1a\x60\x98\x13\x2f\x0f\xbf\x25\x38\x7f\xae\xce\xc6\xe6\xa1\x79\x55\x1c\xba\x52\x87\x81\x45\x0f\x42\x8a\xf4\x15\xbd\x90\xe3\xce\x97\xa3\xbc\xc8\x3a\xdb\x08\x7f\xb1\x75\xb5\xcf\x8b\x4b\x2d\xf7\x06\x79\xb2\xd3\x25\x03\x8c\x22\x1b\xdd\xef\xa3\xe9\xac\x10\xc5\x7a\x24\x26\x0f\xbf\x32\x3e\xc5\x0e\x7c\xa7\x68\x2d\x95\xe9\x6e\x8d\x5d\x79\x3c\x33\xd1\xaa\xb4\x73\xbf\xdb\x0c\x58\x6a\x53\xde\x58\xb7\xcf\xd7\xfc\x06\xa9\x9f\xa0\x3a\x4e\xc7\x25\xf9\xf2\x15\xf1\x41\xe6\xdf\x76\xee\x94\x71\x67\xf3\x59\x9b\x64\xe9\xfc\xb8\x98\x3c\xbd\x9f\x38\xcf\xc4\x89\x77\x46\x65\x8c\x4c\xbc\x42\x92\x93\xc6\xbe\x69\x90\xac\xce\xc8\xec\x27\x47\xe5\x73\xd6\xde\xbc\xdd\x5f\x9b\x6c\x88\xea\xc9\x33\x42\xda\x5b\x6d\x32\x24\xed\xcd\xf6\xed\x79\x54\x1d\x26\xd9\x89\x95\x95\xfa\x07\x83\xcb\x09\x13\x8c\xe7\xcb\xb8\x88\xb8\x50\x39\xba\x24\xdb\xff\x99\x33\xf1\x5c\xd9\xd0\x05\xac\xe6\x82\x4e\x69\x56\xb1\x95\xbc\x17\xb5\xd6\xed\xdf\xab\xce\x88\xb0\x65\x2e\x99\x11\x81\x26\x8b\xfa\x18\xd6\x54\x8b\x6a\x73\x8d\xe6\x34\xb7\xb2\xb6\xbb\xfd\x45\x7a\xde\xd9\xda\x7e\xda\xed\x9a\x28\x3d\x98\xd1\xf1\x67\x12\x4d\x0c\x9c\x22\xb1\xc8\x42\x44\x1e\x4d\x13\x1a\x1e\xe6\x6f\x75\xb6\xa3\x88\x56\x75\xcc\xe8\x85\xe8\xb1\x89\x0c\x49\xb4\x70\xe8\x83\xb6\x0b\x53\x12\x4b\xd9\x91\xe5\x3c\x62\x62\x78\x10\xe7\xda\x6a\xd9\x6e\xbd\x16\x5f\x3e\x0c\x49\x76\xb3\xd9\x23\x5b\xdd\x1e\xd9\x7a\x82\xe4\x91\xed\xae\x91\xdb\x25\x7b\x7b\x7b\x8c\x64\xbd\x54\x98\x31\xf6\xf1\x28\x88\xa1\x53\x84\xab\x0e\xf4\x85\x07\x17\x35\x5d\x22\xe2\x8a\x04\x5b\x08\x34\xc8\xc3\xb1\x83\x65\x38\xd3\x82\x61\x45\xbb\x4a\x38\x84\x65\x11\x4d\x09\x97\xd3\x2d\x7a\x53\x5d\x30\xf0\x67\x18\xc5\x32\x60\x3e\x8f\x7b\xbc\x37\x48\x97\xd9\xe9\x92\xab\x2b\xd2\xda\x6c\x09\x1d\xf1\x60\x40\xc6\x8a\x8a\x98\xf0\x2c\x27\x52\xb5\xce\x81\x60\x96\x95\x98\xed\x4a\xd8\xf2\xf2\xd6\x9a\x64\x31\xb1\x1e\xfd\xa3\x67\x72\xf9\x7c\xce\xa3\x64\x69\x2f\x81\xf6\xe4\x96\x7f\x6d\xa8\x5b\x56\xbe\xa5\xee\xc6\x1a\x74\xe8\x06\xe4\xb3\xac\xa6\x9f\xe3\x4a\x02\xf2\x91\x0e\x5d\x89\x76\x44\xf3\x2e\xd5\x1c\xdf\x05\xd9\x7c\x1d\x94\x09\x7e\x5f\x86\x32\x87\x71\xd7\xa2\x0c\x30\x86\xe4\x61\x13\x45\xa2\x39\x17\x45\x0e\x27\xf7\x99\x9b\x5b\x2b\x51\xc0\xf4\xc3\xe8\x2c\x0a\x69\xf8\xfc\xb2\x82\x81\xdf\x84\x9a\x6a\x70\x73\x7c\xd7\xc8\x59\x96\x62\xe7\x78\x65\xf4\x1c\xdf\x06\x3f\xee\x15\x2c\xaf\x5a\xa1\xa8\x4c\xdc\xd2\xaf\xa5\x1b\xe3\x45\x6e\x6b\xe6\x5c\x94\xe2\x48\x34\xed\xa2\xc8\x11\xce\x7c\x18\xf2\x2c\x2f\xd8\xac\x6e\x29\xad\x6d\xb5\xc9\x33\xbe\x2f\x0b\xb7\x18\xab\x61\xb3\xf4\xd8\x88\x1e\xe5\x56\x6c\x7c\x31\x9d\x68\xc4\x31\xf1\xa1\xe2\x60\xe3\xc8\x1d\x49\x30\xa7\xfc\x75\x0f\xfb\x65\xc9\x5f\x02\x86\xd5\xa9\x6a\xf0\x60\xde\x39\x81\x42\x1b\x3d\x82\x35\xe5\xac\x90\x78\x5f\x4d\xf6\x48\xd9\x33\xdd\x87\xdd\x01\x3a\xcf\xe4\xd1\xaf\x82\x27\xe6\x70\x45\x25\xca\x9f\x6c\x9d\x9a\x72\x70\x7b\xf3\x82\xc9\xcb\xee\xe4\xf6\xf3\x38\x1a\x53\x26\x96\x6c\x93\x87\x50\xdd\x8a\x74\x5e\x33\x33\xf8\x08\x7e\x67\x13\xb4\x2a\xfa\x4b\xf5\x00\xce\x26\xa3\xce\x87\x16\x1f\xe0\x88\x13\x37\x60\x36\xe6\x9e\x3c\xee\x8a\x3d\xbc\x48\x05\x7c\x97\x3c\x94\x47\x4a\xdf\x0c\x58\x15\x71\xd1\xf0\xc9\xe3\x9e\x68\x7f\xb5\x29\xa8\x38\x92\xf3\xe1\x7b\xce\xe4\x77\x8a\xfd\x20\x1f\x47\x51\x15\xfe\x3d\x67\xf9\xdf\x10\xf3\x52\xa5\x03\xaa\x81\x66\xf8\x5f\x6d\x02\xb4\x6f\x9a\xb2\x19\xd8\xd7\xde\x6b\x4a\xa6\xa0\x94\xb7\x97\xa0\x5c\x55\xe8\x62\xdb\xe7\xbd\x66\x05\x69\xca\xc0\x5d\x6b\xf3\xa2\x45\x36\x88\x38\xe0\x00\xda\xf9\x6f\x65\x53\xf0\x78\xb3\x47\x70\x52\x99\xc3\x80\x2f\xd2\xee\x03\x1d\x34\x87\xd6\x77\xcf\x86\x81\x15\x3b\x74\x52\x1c\x38\xbc\xc0\x87\x65\x19\x4e\x29\x8e\xcc\xa1\x9b\xe4\xf6\x23\x4d\xe3\xa1\x9d\xe0\x40\x31\x09\x64\x68\x27\x60\x28\x25\x96\x0d\xed\x04\x17\xea\xd8\x01\x3b\xf6\xc2\xe1\x46\x75\x8a\xa7\x3e\x17\xf0\xd8\x0f\x89\x07\xab\x53\x3c\x70\x18\xdb\x28\xc9\x85\xf4\x4d\x8f\x9b\xe3\x96\x33\x27\x08\xa7\xb9\xb0\x82\xea\x87\xde\x75\x77\x2d\xef\x74\xcd\x9b\xa1\xd6\x70\xeb\x69\xaf\x65\xde\x28\xb5\x86\xdb\x60\xbe\x00\x0b\xa3\x35\xdc\xda\xea\xb5\xf0\xbd\x54\x6b\x68\x7e\x5e\x9f\xf6\xb6\x36\x7f\x67\x7f\x2e\x87\xdc\x30\xbe\xc2\x01\x51\x94\x14\x65\xfe\x87\xc4\xd5\x55\x94\x14\xdc\x35\x0b\xfb\xf1\x58\xfd\x3a\xd5\x89\x3b\xe8\xb7\xe5\xb9\x25\x4a\x0a\xee\xb7\x25\x4a\x8a\x27\x8f\x15\xd8\x53\x5d\xd1\xf6\x37\x4f\x4a\xea\x62\xf0\x35\x7e\x8c\xec\xa3\xe1\x57\x74\xc5\x05\xe0\xb6\x0d\xc2\x61\x52\xac\x68\x76\x61\x94\xa8\xb0\xb6\x80\xe6\x2a\x4a\xde\xc8\xb6\x22\x4a\x0a\x29\x2a\x3e\xbb\x91\x3f\x17\xde\xab\x7a\x1b\x88\xad\x46\x21\xec\xee\x8d\x20\xee\x8d\x20\xfe\xbc\x46\x10\x44\x5b\x41\x70\x51\xe9\x8e\x0c\x20\x1a\xd8\x35\xd8\xac\x9e\xdb\x2d\xa4\x60\x8d\xae\xdd\x76\xf4\x3d\x12\xea\xf9\x8c\x26\xea\xb1\x62\x8f\x1b\x7e\x33\x01\x5c\x79\x6f\x90\x92\xe5\xc0\x6b\x18\x61\xe9\xbe\xed\xb7\x89\xc0\x49\xa5\xfc\xc8\xff\xbf\xba\x22\xed\x36\xe2\xb3\xa9\x7c\xb6\xc0\x7f\xec\xa2\x77\x86\x51\x22\x5a\x6f\xec\xee\x63\x4a\x0b\x6c\xef\x0b\xd6\xe3\xed\x5c\xbe\x02\x05\x5e\xc2\x2a\x31\x4c\xdd\xb5\x7c\xcf\x2d\x5d\x4d\x29\x5a\xaa\x99\x74\xad\xb8\x32\xd2\x91\x7d\xec\x1a\xd6\xec\x80\x1e\x6c\xcd\x6e\x37\x52\x69\x87\x06\x26\xfe\xc6\xb1\x03\xdf\x3d\x36\x46\xc6\x38\xa3\x8c\x98\xe4\x7a\x30\x7d\xb2\x70\x72\x0f\xa3\xc9\x84\x82\x35\x32\x47\xb9\x75\x2e\x39\x57\x8f\x42\xf0\x71\x44\xa2\x44\xcc\x92\x34\x5c\x4e\xbc\x87\x10\xf3\xe8\xc2\xb6\x43\x5f\x3f\x82\x05\xe7\x30\xaa\x17\xe5\xa8\x3c\xf7\x3f\x98\x35\xe9\xae\xf4\x4a\x4f\x13\xa4\x22\xd5\x55\x30\x9a\xce\x47\x51\xe2\xba\xb7\x29\xd2\x29\x65\xdc\x9d\xd5\x40\xa7\x7d\xbe\xa8\x82\xc5\x82\x26\xb0\x96\x82\x84\x3f\x80\xb0\xb0\x2b\x6a\xab\xbb\x84\x11\x8c\x69\x16\x8d\x19\x7b\x92\xbd\xaa\x2f\x2c\x6e\x4f\xd3\x89\x80\x85\x7d\xa8\x12\xb5\x72\x78\x75\x7a\xbf\x2a\xb4\x2a\xbd\x05\xbf\x32\xd9\x25\xf5\xd8\x1d\x07\x71\x2c\xf0\x2b\xef\x70\xf8\x88\x66\x81\x5e\xba\x79\xf4\xab\xf0\x2c\x08\x77\x75\xb3\x20\xef\xb1\xff\x25\xa1\x81\xef\x5f\xcf\xa5\x1d\xc6\xb7\x32\x04\xf5\xeb\x4c\x2b\x51\xe3\x77\xcd\xe4\x5b\xb8\x62\x55\xac\xef\xed\x81\x74\x31\x89\x12\xeb\xa1\x52\x1d\x12\xb4\xcb\x22\x51\x95\xb8\x5e\xb6\x95\x06\x3c\x77\x3f\x7f\x5e\x7e\xf4\xe7\x1a\x5f\x57\x43\xd3\x60\x99\x19\xb5\x57\x0d\x7a\x1d\x46\xad\xdf\xff\x77\xc9\x33\xd2\x6e\x93\x61\x33\x6b\x2c\x84\x32\xaf\x4d\xd6\x0a\x78\x63\xbc\x9f\x2b\x27\x94\xcc\xe8\x7b\xeb\xa5\xf5\x17\x7e\x9c\xc9\xbd\x47\x5e\x09\x07\x98\xe1\x07\x73\x4c\x64\x40\xe2\x95\x58\xd4\x8d\x79\x51\x08\x4e\x95\x6c\xfc\xf9\x9c\x33\xa9\xe5\xb5\x4b\xf8\x95\x1f\x29\xa1\x3b\x31\x61\x9d\xd5\x51\x67\x6c\x6b\x25\xb8\x43\x9b\x92\x1f\x79\x32\x21\x90\x37\xf0\x0d\xb0\x48\xe7\x8b\xe2\x12\xab\x04\x1b\x6c\xa2\xb5\xab\xd0\xa4\x47\xc4\x9e\x86\x20\x7d\xac\x80\x1b\xe9\x6e\xaa\xd4\xd1\x94\x17\x13\x95\x03\x11\x55\xd6\x8d\xc1\xb8\x58\xd9\xf0\x88\x05\x37\x19\x87\x7e\x89\x57\xee\x1c\xea\x75\x94\x17\xce\xb3\xbf\x13\x63\x34\xa7\x1e\x8f\x50\x95\xa3\xd7\x35\xbb\xdb\x8b\x7a\x14\x24\xaf\xe9\x97\x8b\x90\x9b\xb5\x8a\x47\x70\x4a\x15\x59\xa4\x05\x7a\xe8\xca\x0b\x4b\xe1\x88\x3b\x1d\x22\xc6\xc3\x3e\xf5\x7e\x50\x80\x9a\x6f\x8a\x8c\xbd\x4d\xad\x47\xbe\x7d\x95\x2c\x48\xfb\xf6\xcb\xf6\x14\x62\x36\x4f\xf6\x70\x8f\x35\x2c\x1e\xc6\xc6\x9e\xab\xe8\x17\x4f\xb5\xdc\xe7\x59\x1c\x52\x8b\x40\x9d\x14\x3f\xb9\x55\x4f\xe6\x06\x03\x39\xdd\xf4\x8c\x66\x97\xc5\x0c\x1c\x91\xa0\x7a\x30\x76\x5c\xaf\x53\xd2\x1c\xcd\xc1\x8f\xf1\x4c\xd7\x7f\x43\xa1\x1c\x2f\xdd\x69\x13\xae\xd2\xf9\xba\x47\xda\x6d\xa9\x7c\xaf\x50\x52\xbc\xe3\xb3\x64\xe9\xf4\x94\xfa\xee\xfa\xb4\xb7\xd5\x28\xd0\xde\x57\xd4\xc9\xc1\x6d\x74\xb5\x52\x2e\x63\x20\x25\x5a\x39\x69\x63\xc6\xfe\xe7\xaa\x32\xf8\xf5\x58\xff\x3c\x45\xc9\x3b\xf8\xc3\xd2\xcd\xb1\x34\xae\x9c\x63\xbf\xa4\x76\x8e\xfd\x7e\x8a\xaa\x43\xfa\x39\xa7\xc6\x06\x1a\x3a\xe7\xee\x7d\x15\x15\x1d\x2b\xbc\x8a\x8e\x8e\xc3\xdb\x4a\x3a\x96\xba\xa2\x96\xce\x2c\x52\xa1\xa6\xe3\x2d\x56\x95\xbd\x89\xa2\x8e\xe1\xb6\x44\x51\xd7\xcc\x4b\xbe\xe8\x56\x03\x45\x5d\xa3\x50\x5e\x5f\xeb\x65\x9d\xe7\xf6\x6f\x15\xf2\xe0\xc5\x57\x21\x10\x59\xc2\x26\x11\x9e\xbe\x22\x91\xd8\x85\x2a\xc8\x44\xb6\x5b\x5d\xfe\x46\x3a\x5d\x2e\x49\x35\x79\x30\xe7\x69\xef\x6e\x9f\xca\xa9\x51\x36\xa0\xbb\xbb\x0f\x3d\x52\xf9\x78\xc7\xc3\x87\x91\x7f\xdb\x28\x6f\xee\xd8\x76\x4c\xb3\x22\x88\x12\xbf\x73\x5b\x07\x91\xfc\x36\xa9\x86\xa8\x39\x50\xdf\x4c\xaf\x26\x6b\x51\xc4\xca\xa8\x75\x05\x51\xd0\x6c\xce\x8e\xfc\xd1\x04\x6a\x36\xfb\x1d\x0a\x97\xb5\x64\x1a\x9d\xd1\x44\x9a\xb4\x98\x47\xea\x32\x5f\xb9\x96\xfd\x0b\x3f\x66\x6b\x73\x5b\xc0\x32\xaf\xdc\x69\xd7\x6f\x7c\x8b\x21\x9a\x2f\x11\xee\x99\xb6\x55\x78\x85\xe3\xf4\x8c\x66\xd9\x79\x16\x15\x05\x05\x73\x2f\xde\xab\x16\xd9\x80\xde\x37\xc6\xdd\x39\x68\xd9\x73\xfc\x8a\x1f\xac\x20\xf4\x51\x34\x4a\x04\x0a\x0b\xd7\xe9\xb0\xfd\xd0\xbe\x11\x32\x5d\xad\xa4\xd5\x9c\xd6\xda\x96\xe0\xcd\xe3\x3f\xc0\x8f\xc1\xc1\x00\x54\xe1\xc1\x9c\xad\x0a\x70\x79\x28\xb4\x59\x6c\xbc\x8c\x13\x50\x7e\xc7\x10\x47\x9f\x29\x09\x48\x1e\x25\xd3\x98\x2a\x27\x5c\x00\xd9\x37\xec\xa1\x81\x82\xb9\x8f\x19\xee\x93\x83\xb7\x76\x75\x45\x4e\xda\x27\x5b\xa7\xed\xd3\xae\x12\x06\x6b\x7c\x00\x88\xee\x99\x78\x67\x5f\xd8\xaf\x61\x89\xe8\xce\x6d\xa0\x38\x2a\xc0\x56\x61\xab\x47\x1e\x81\x31\xf6\x26\xf4\x65\x0b\x7b\xa1\xd1\x1d\x72\x04\x59\xe9\xa5\xa1\x27\xfd\x3a\x94\x9d\x16\xa4\x37\x87\x87\x12\x50\x37\x30\x18\x90\x20\x8e\xc9\x28\xc8\xa3\x31\x77\x7e\x00\x2f\x05\x76\xb6\x85\x02\x27\x4e\xd9\xc9\x58\xf6\xa6\x47\x76\xb6\xeb\x8c\x4e\xcc\x85\x2d\x38\x9a\x3c\x81\x4b\x5d\x24\xa1\x53\x10\x20\x21\x22\xd4\xc9\x69\x8b\xec\xfd\x00\xeb\x53\xa7\x3d\xe6\x89\x95\xca\xb4\x7d\x59\xdb\xaa\x1c\x60\x46\x4b\x7b\x56\xb1\xda\x71\xab\xa5\x34\xab\x7d\x7e\x19\xde\x60\x1c\xa2\xdb\xb5\xb6\x51\x54\xe4\xc1\x03\x82\xbf\x4f\xd0\x6f\xe4\xff\xed\x54\xee\xba\x2a\x2c\xc6\x60\x7a\xa3\xb9\x11\xcb\xb7\x6a\x6a\xe4\x2c\x98\x73\x23\x26\xcc\x9c\x1a\xe4\x6e\xed\x96\x33\x63\xf5\xab\x62\x62\x50\x9b\x5f\x7b\x5e\xee\x72\x62\x4c\xbf\x27\x9a\x91\xa2\x99\x80\xb3\x51\x0b\x6c\x11\xb6\x39\xd2\xf9\x21\xa9\x25\x8c\x15\xb6\xc4\x54\x6c\x3d\x56\x80\xdb\xa7\x27\x3b\x02\x54\xa6\x71\x10\x05\xb1\x75\x6a\x25\xe8\x6f\x77\x77\x00\xac\xde\x60\x7b\xc0\x63\x11\x43\xac\xdf\x13\x50\x63\x77\x34\x91\xd1\x84\x74\x50\x16\xe2\x90\x36\x3f\xbe\xe1\xc4\x02\xc3\xf6\xbd\x86\xd8\xaa\x98\x72\xb1\x49\xc8\x53\xb5\x6f\x9e\x61\xde\x7c\x53\xdd\x52\xc1\xf7\x9c\x09\x17\x9f\x2d\x63\xde\x8d\x8a\x4e\xcc\xca\xf1\x74\x6b\xd7\x6b\x8d\xe6\x59\x65\xf0\xa1\x88\xfc\xd2\xf9\x35\x5c\x28\x96\xee\xf6\xc2\x55\x51\x1c\xe4\x05\x39\x39\x65\xc2\x04\xaf\xf7\x46\xd3\xbe\xee\x9f\x77\x35\x07\x20\x67\x11\xc7\xc1\x12\x1c\x68\xf4\x33\x28\xf8\x54\x34\xd0\x84\x48\x2a\x8c\x63\xd1\x11\x46\x71\x60\xfb\xa6\x89\x8c\x2e\x49\x48\x27\xc1\x32\x06\x45\x68\xbe\x64\x72\xaa\xda\x98\x5b\xc2\x47\x4d\x4f\xc4\x78\xb4\x67\xd1\x38\x46\xdd\x80\x01\xeb\x1d\x71\x45\x51\xb8\xe1\xe9\xad\xd4\xa8\x5e\x3a\x6a\x97\x3a\x62\xb4\x44\x72\x7b\x8d\x00\xc5\x0b\x52\x3e\x69\x31\x8a\xef\x91\x16\x5b\x04\xec\xbf\xd3\xd6\xa9\xa6\x76\x01\x81\xd2\xa0\x50\xb2\x8c\xed\x67\x0f\x68\x36\x1b\xa1\xcd\xf6\x2e\x67\xf5\xb7\x66\x21\xb8\x1e\xaa\x9c\x95\xc0\xf7\x06\xe1\x29\x8f\xcf\x7a\x0e\x37\xbc\x6c\x38\xc6\x78\xd9\xbf\xb0\xea\x2d\x22\x16\xdc\xaa\xf3\xef\x13\x7e\x1a\xff\xf7\x69\xb7\x5e\x44\x10\xca\x5b\xe5\xea\xa1\xfc\xde\xc1\x8a\x61\x21\xa1\x9b\xb3\x0e\xf9\xf0\xd4\xbd\xcb\xb2\x70\xe6\xb9\xb4\x10\xf7\xe8\xf6\xc6\xe0\x75\x46\x6d\xde\xca\x08\x3f\xa8\xd2\x03\xaa\xcd\x16\x6a\x5c\xc1\x2a\xfb\x6f\x6c\x4c\xbc\x4b\x4a\xff\xfc\x5e\x51\x5d\xa7\xb2\x34\x9e\x60\x67\xb2\x82\x95\x39\x85\xd4\xb3\xe4\x93\x53\x9f\x07\xf1\xfe\x62\x99\xcf\x3a\x8e\x5b\x52\xf9\x4c\x5b\xfa\x18\x75\x6b\x66\x63\x71\x1d\xae\x9f\xf9\xbc\x7f\xe2\x96\x90\x13\xcf\xce\x59\x8f\x60\xe7\xb2\x96\x6f\xd2\x5b\x79\xf4\x15\x13\x88\x3d\xf9\xde\x7a\xfe\xa0\xeb\x8e\xd4\x21\x10\xff\xdb\xcf\x9f\xcf\x1d\x6b\x8d\x1b\xd6\xd2\x89\x60\xb3\x09\x7e\x52\x2b\xe6\x63\xe5\xd9\x58\x73\xee\x08\x2d\xdd\x91\xb1\x24\x91\x3b\xdb\x26\x0e\x41\xf9\xfd\xe8\x24\x4b\xe7\x5e\x73\x03\x0e\xe5\xe3\x2d\x23\xfb\xc1\x8e\x65\x20\x64\x58\x06\xad\xf0\x60\x4a\x32\x35\xde\x72\x03\x16\x25\x06\x82\x59\x94\xe1\x4c\xb3\x86\x55\x7d\x15\x5e\x05\x7b\x13\xbe\xb1\xe4\x82\xae\x78\xe2\x03\xdd\x93\x82\x8e\x40\xd7\x43\xb2\x0d\xc6\x0f\x5d\xe9\xce\x59\x20\xaf\x6c\x11\x55\xd6\x89\x9b\x77\x2a\xf6\xad\x28\x28\xf0\xa1\xe0\x77\xec\xb8\xf4\x06\xd9\xe1\x1e\xef\xf9\x6e\x9b\x33\x90\x9c\x04\x93\x82\x66\x6a\x91\xe0\xfe\xde\x68\xad\xfa\xcb\xf8\x1c\x77\x6b\xce\x51\xe2\xb0\x9b\x54\x62\x4f\xc4\x8d\x79\x5b\x56\x3f\x76\xea\x51\xea\x43\xda\x0e\x78\x53\xc9\x68\x1a\x72\x1a\xf2\xb0\xba\x6f\x0c\x76\x63\xaf\x1a\xa6\x11\xa3\x32\xbd\xcd\xa2\x69\xdf\x20\xd1\xdd\x72\xad\x3f\xc4\x1e\x82\xff\x1a\x52\xbf\x34\x48\x6d\xf8\xf7\x87\x22\xfe\x7b\xda\x47\x7f\xbf\x0b\xed\x13\x2f\xe9\xe3\xe8\x8c\x37\x25\x7d\x3b\x86\xd8\x8a\x9b\x8a\x43\xac\x76\xfd\xcd\x76\x16\xb3\x17\xab\xd4\x2f\xe6\xcf\x4b\x6f\xb1\x43\x5f\xfe\xf5\x57\xbe\x84\x17\xe2\xd6\xcf\x35\x52\xad\xeb\x7e\x87\x6c\x91\x0d\xb3\x77\x5d\xee\x90\x89\x87\x11\xf3\x4c\x3d\x77\x3f\x6c\x5d\xba\x19\x0f\xb6\x2b\x9c\xd9\x1b\xb8\xb6\x2c\xbe\x0c\x2e\xb6\xb6\xe2\xd8\xf0\x9c\xab\x95\xb5\xdd\x35\xd5\xaa\xde\x8b\x44\xab\xeb\xb5\x17\xbc\xe5\x57\xbb\xea\x4d\xdc\xf5\x69\x6f\xeb\xf7\x8e\xbb\x7f\x5c\xff\xec\x6d\x59\xf1\xee\x4d\x78\x22\x81\xff\xb9\xad\xcb\x52\x3f\x7d\x5b\xa2\xb7\x6f\x4b\xfc\x60\x6d\xe9\x79\xfd\xb6\x54\xcf\xdf\x96\xe8\xfd\xdb\x12\x3d\x80\x5b\x9a\x2f\xe0\x9c\x1a\x1b\x58\xd8\x38\xfe\x51\xbe\xe2\x23\xb8\x63\xef\x2b\xb8\xe3\xd5\x9f\xc1\x1d\x37\x7d\x07\x77\xec\x3e\x84\x3b\xbe\x83\x97\x70\xcb\x5b\x3f\x85\x3b\x6e\xfc\x16\xee\xf7\x0e\xea\x7f\xdc\xc0\xe2\x6c\x59\x65\x72\x26\x5d\xab\xf0\x1f\x82\x38\x91\xd5\xd9\x12\x9b\x9d\x2d\x0d\x2b\xb1\xa5\xcf\xf0\x6c\xa9\x2d\xcf\x96\xd8\xf4\x6c\x89\x6d\xcf\x96\x96\xf1\x99\xa7\xde\x26\x8b\xe3\x37\xb5\x3f\x3b\xf6\x1b\xa0\x1d\xdf\xc0\x02\xed\xb8\xb1\x09\xda\xb1\xc7\x06\xcd\x2e\x7d\xb3\x35\x52\x61\x86\xd6\x74\x91\x34\x37\x44\xfb\xb6\xc9\x2a\x69\x2f\x73\x0a\x8a\xd9\x71\xd1\xe6\xd1\xf8\xa6\x29\xa1\xc9\x19\x09\x53\x0a\xd6\x0a\xf0\x3a\x30\x48\x42\x70\x60\x4b\xfe\xf9\xe6\xf5\xab\xa2\x58\xbc\xa7\xff\x6f\x49\xf3\x62\x0d\x04\xb3\xcb\x05\x4d\x27\x56\x0e\xf7\x63\xa3\xde\x6f\xb4\x25\x5e\x44\xc3\x7d\x1b\x9a\x7c\xb9\xde\x5d\x33\x22\x45\x96\x42\x9a\x09\x20\xa9\xff\x92\xcf\xd8\xee\x13\x4d\x93\x34\xa3\xc3\x38\x4a\xe8\xda\x35\xb7\x58\x65\x78\x68\xe4\xea\xfe\xfe\xe5\xec\xfd\xcb\xd9\x3f\xf1\xcb\x59\xfe\x6a\x56\xd8\xb0\x19\xcf\x66\xf9\x86\x43\x6e\xf6\x7a\x56\xec\x7d\xc7\x45\x14\x43\x9d\x5c\x9f\x09\x6b\x87\x3f\x4f\x72\xc0\xa2\xe2\x52\xb1\x44\x5d\x64\x1c\x07\x79\x4e\x4e\xa0\xc8\xa9\xe8\x26\xcf\xd0\x4c\x98\x57\xb5\x36\x80\x7b\x23\x58\xa5\x42\xb9\xca\x38\x08\xa9\xf0\x64\xdd\xdc\xc9\x39\x40\xb2\x9a\x8e\xdf\x1e\x7e\xfc\xc0\xce\xd6\x30\x09\xed\x73\x1a\xb5\x39\x69\xb6\x3f\xa3\xdf\x6f\xd0\xef\x9f\xd0\xef\xfc\xd7\x60\x94\xca\x8f\x49\x94\x24\xf4\x52\x7d\xd1\x79\x91\xc2\x53\x46\x99\xb2\x88\xc6\x66\x42\x12\x24\x66\xc2\x3c\x1a\x67\x76\x4a\x1c\x47\x4e\x21\x03\xde\x00\x95\x1f\x46\x91\x69\x16\x24\xa1\x1a\x8a\x91\xf5\x93\xf1\xf5\xd1\xf8\x7a\x67\x7c\xbd\x34\xbe\xfe\xcf\xf8\xfa\x97\xf1\xf5\xd6\xf8\x7a\x61\x7c\xfd\xc3\xf8\x3a\xe6\x5f\x6b\xa7\xe5\xae\x6b\xd8\x1c\xbd\xdb\x7f\xc1\xa6\x78\x48\x76\xb6\x7b\x2a\xf1\xc3\xe1\x4f\x6f\xf7\x3f\x1e\xbf\x7f\xf9\xe9\xf5\xcb\xb7\x3f\x7d\x7c\x35\x24\x8f\x75\x26\xcc\xea\x50\xff\xd4\x39\x25\x94\x33\x24\x5f\x88\x95\xa0\x9d\xa8\x43\xc6\xa7\x17\x47\x3f\xbf\x25\xd7\xba\xa6\x77\x47\xaf\x5f\x33\xe8\x8f\x87\x6f\x5e\x1e\x1d\x7f\x1c\x92\xad\xcd\xcd\xcd\x81\xe8\xa1\xb8\xf1\x7e\x1e\xa7\xe3\xcf\x43\xd2\x66\xac\x33\x2f\xda\x46\xde\xfe\x18\xe2\x18\x0f\xf5\xdb\x46\xfe\x00\x83\xed\xe7\x75\xbe\x4f\xee\xe3\x60\xdc\x6f\x64\x7f\xf5\x8d\x6c\x4d\xb9\x80\xc8\x67\xc1\xce\x5d\x79\x80\x38\xc8\x2e\x17\x45\xfa\xf7\x0f\x78\x73\x18\x43\xda\x23\x1d\xfe\x82\x35\xe8\x05\x18\xb0\x9c\xb6\x37\xb4\x93\xeb\xbe\x01\x28\x2e\xc7\x0f\x54\x45\x12\x79\xf0\x40\xe6\xf6\xa5\xbf\x08\x2e\x26\xcf\xe8\x45\xdb\x7e\x45\x67\x78\xfe\xfa\x81\x6c\xb3\xd2\xb6\xeb\xe3\x6d\xe9\x2e\xd2\x2c\x4e\xe4\x65\xb8\xba\xe0\xb7\x9c\xb3\x13\xeb\xb5\x1d\x07\x95\x38\x62\x9d\xeb\xbf\xa2\x17\x7d\xd0\x5e\x0a\xcf\xbd\x3e\x1b\x23\x86\x15\x39\x6c\xdd\x3a\x3f\xd1\x71\xf5\xdb\x90\x6c\x7f\xf3\x84\x97\x44\x8f\x93\xe5\x9b\x33\xc6\xf2\x14\x8e\x5b\xc3\x6f\xbe\xeb\xb5\x4c\x94\xb7\x86\x4f\x37\xaf\x4f\x7b\xdb\x8d\x7c\x3e\xdd\xf3\xbd\x7b\xbe\xf7\xe7\xe5\x7b\x9a\xed\xf1\x77\xfe\x77\xc0\xf7\x2c\xd9\x7d\x75\xd1\xdd\x23\xb9\xcb\x82\x3e\xc1\x7d\xa5\x50\x43\x36\xaf\xed\x0f\x04\xbb\xd7\xb1\x88\x26\x4f\x31\x00\xfb\x56\x22\xfc\x32\x89\x8a\x37\xc1\x42\x89\x8b\x6d\x29\x51\x0f\x39\x0f\x6a\x6f\x4a\x59\x93\x49\xed\x43\xcd\x16\xdb\x5b\x86\x9c\x3f\x44\x19\x9b\x9b\xaa\xd0\xff\x56\xe4\x8d\x82\xd1\x28\x98\x52\xd5\x12\xce\x43\xc2\xff\xd0\xce\x9b\x7b\xea\x44\xd9\x6f\xaa\xb3\xe3\xf4\x8c\xc6\xc1\x58\x36\x6b\x67\xeb\x33\xc6\xd0\x97\x3d\xf5\x57\x8e\x20\x7e\xaa\x85\xc8\x67\x41\x92\xa4\x89\x31\x6e\x13\x42\x9f\x6b\x86\x15\x10\x35\xad\xc0\xc9\x6a\xe8\x81\xc0\xa8\xd4\xe7\xa5\x61\x35\x50\x5d\x4d\xe2\xec\x36\xf4\x02\x19\x95\xa9\xf3\x98\x3d\x36\x0f\xa0\x7f\x88\x26\xa0\x41\xae\x1e\x38\x04\xfa\xd9\x84\xf5\x81\xe2\xb9\x86\x53\x5f\x65\xc5\xb8\xbf\x8d\xea\xc6\xd5\x37\x2d\x80\xca\x14\x2b\x94\x61\xc5\xfc\xc6\x56\xda\x11\xc3\x22\x08\x85\x29\x29\x98\x7a\x5e\x2c\xe8\x98\x6d\x5e\xca\x3c\x1f\x1b\x5d\x09\xef\x29\x3e\xcb\x29\x5d\xc5\x88\x32\xb8\x50\x84\xe3\xb2\x6c\xb0\xc6\xb3\x20\x0b\xc6\x05\xcd\x72\xa9\xe2\x87\x7b\x79\x51\x1a\xed\x23\xde\x36\xa2\x69\xd2\x43\xb6\xd0\x64\x73\xcd\xef\xf6\x23\x9a\xce\x0a\x22\x3d\xd2\x5a\xde\x7d\xc5\x18\x0c\x69\x93\x83\xf4\xa0\x77\x79\x0f\xda\xf1\xf8\x18\xe2\x16\x22\x00\x03\x11\x69\xe1\xb5\xaa\xba\x21\xde\xea\xf6\x7f\x49\xa3\x04\x82\x35\x90\x67\x50\x07\x19\x92\xd6\x66\xab\x4b\x36\x04\x70\x89\xe1\xdb\x8d\xe7\x02\xa2\xf5\xfc\xd9\x27\x03\x06\xb1\xe2\x6c\x88\x1e\x6e\x70\x8f\xcb\x37\x9d\x97\x32\x43\x44\xd3\x11\x0d\x6c\x9d\x60\x86\x08\x91\x3c\x5c\x1f\xd3\xd6\xbc\x70\x6f\xcd\x15\xb3\x12\x25\xac\x12\x3f\xb2\xb0\x3f\x6a\x8f\xa3\x24\xd6\xb8\x36\x3b\xe4\x1e\x48\x8e\xf9\xd6\xae\x44\xfa\x19\x0f\xf6\x3c\x18\x90\x1f\xa3\x24\x24\xfc\x71\x97\xe8\xa8\x0a\xd6\xcc\x24\x8a\x56\x4b\xdf\xe4\x83\xed\x4b\x0f\xe2\x47\xcd\xe8\x85\x34\x61\x56\x67\x2e\x96\xc6\x4f\x3d\xec\xc4\x51\x7e\x56\x62\xd5\x6c\xe3\x77\x2f\x60\x5c\x23\x6c\x6a\x76\x49\xb4\xb1\xb7\x8d\xc1\x65\x20\x64\x6c\xdb\xa1\x9b\xea\x44\xac\x1d\x11\xfa\x42\xb5\x30\x21\x1d\x5e\x64\x6f\x8f\x6c\x76\x8d\x53\xda\x28\xa3\xc1\x67\x0d\xca\x46\xb9\xb1\x47\xc4\xab\x72\x36\x83\x07\xb3\x20\x3b\x48\x43\x0a\x35\x78\x0f\x61\x6c\xb2\xa5\x39\x4e\x5e\x64\xcd\x28\x84\x4f\xda\x4a\x24\xb2\xcf\x8a\xfc\x76\x34\x02\xcd\xfd\xf7\x10\xc9\x4d\x66\x3e\x2f\xca\x5e\xa7\x9b\x93\xed\xf1\x31\xdf\x59\x64\x74\x12\x5d\xf0\x08\x5a\x9b\x17\x5d\x36\x0b\xc0\x35\xfc\xee\xed\x45\xa8\xb7\xf2\xd9\xf7\xda\x2e\xc3\x11\x34\x88\x81\x9b\x57\x06\x13\xf0\x85\xf8\x34\x7c\xed\x0b\xb7\xeb\xa2\x1b\x98\x2a\x18\xc5\x0b\xcc\xf3\xd9\x87\xe5\x20\xcc\xb6\xf9\x72\x90\x33\xc2\x5a\xd2\xd4\x31\x49\x33\xdb\x84\x2e\x2f\xb2\xb2\x70\xf8\x68\x46\x19\xd4\x58\xcc\xcd\x7e\xd1\x89\x6e\xb6\xd2\xc1\x3a\x51\x44\x06\x37\xbc\xb6\x69\x10\xd6\xdf\x8d\x3d\x92\xc8\x7d\xe1\x7b\xb2\x4d\x9e\xb1\x93\x0d\xd9\x20\x6c\x3f\x48\x7c\x34\x21\x5c\xc8\xcf\xe8\xc5\x5d\x92\x86\x15\x73\xc0\xa6\x8d\x1a\xd6\xf0\x9b\x11\x87\xc3\x33\x10\x75\xfc\x36\x14\xf0\xbb\x4d\xab\xe5\xb1\x74\xb2\x8c\x63\x85\x86\x01\x3d\xa3\x49\xc1\x1f\x0a\x00\xcb\xff\x25\x4f\x13\x12\x8c\x22\x9b\xc7\x4b\xb7\x89\x1f\xd3\x1f\x97\x71\x6c\xbf\xa1\x94\x8f\x09\x58\xe9\x47\xbc\xb4\xfb\x18\x8a\x37\xec\xb4\xab\x19\xbb\xdb\x86\x21\x48\xb1\xca\xb1\xea\x94\x7d\xf7\xc1\x84\x22\x4a\x42\x7a\x71\x34\xe9\xb4\x3b\xed\x2e\xf8\x86\x7c\xb4\xe5\x79\x0e\xa9\xe0\x1d\x3b\xc1\xe2\x72\x41\x45\x73\x00\x04\x54\x64\xfa\x33\xeb\x44\xdd\x2f\x32\x7e\x70\x9f\xc1\xef\x92\x6b\x21\x8a\x99\x96\x7f\xaa\x15\xb2\x41\xda\x1d\x36\x73\xaa\xf6\x0d\xd2\xee\xb6\x1b\xad\xbd\x30\xca\x17\x71\x70\xc9\xe7\x05\x7c\x8c\x26\x05\x93\x6d\x15\x36\xec\x37\x6b\x17\x90\xfd\x82\x17\xab\x7a\xe1\xca\x6a\x33\x27\xdf\xbf\xbc\x8c\x1e\xb0\x2d\xcd\xa2\x18\x3a\xed\xcb\x60\x8b\x97\x1d\x61\x56\xd7\x25\x8f\x7e\x50\x89\x6a\x5a\xdd\xbe\x55\x3e\x7c\x56\x36\x9b\xce\xcc\x1a\x68\x16\x60\x7c\xb2\xc9\x33\xfb\x4d\xab\x78\x0f\xc6\xd6\x8c\x76\x36\x32\x18\xe8\x81\xa6\x67\x34\x8b\xd3\x20\xa4\xa1\x52\x04\x7b\xd6\x04\x1e\xc0\x47\x4d\x24\x65\x6f\x1a\x07\xe4\xe3\xd1\x8b\xa3\x21\x99\x07\x9f\x41\x35\x1c\x25\x67\xcb\x38\xa1\x59\x30\x8a\xe9\x5d\x0e\x50\x9f\x06\xec\xd7\xbb\x5b\xe4\x11\x41\xd9\xdd\x6e\x3f\xa3\x8b\x38\x18\xd3\x4e\x9b\xb4\xc1\xa9\x1b\x3b\x2d\xb4\xcc\x08\x91\x69\x72\x46\xb3\x22\xd7\xf1\x36\x41\xee\x0b\xe9\x38\x9a\x07\xb1\xcd\x64\xa3\xc4\xcf\xec\x8b\xf4\x05\x2f\xe0\x52\x5e\x65\xec\x4c\xd3\xad\x21\x17\xf0\x44\x4d\xb5\xd1\x1f\x8b\xd4\x0d\x8e\xa9\xc2\xcf\x34\x19\x63\xad\x6c\xcb\x78\xe2\x5d\x8d\x0b\xd5\x55\x1d\x99\x35\x91\x5a\x52\x77\x7c\x9e\xb8\xdc\x42\x7d\x6a\xee\x28\xc6\x61\x9f\x03\xc4\x34\xcf\x3f\xce\x82\xa4\xb3\x09\x4e\x64\x1f\x71\xab\x73\x61\xbd\x2f\x08\x6b\xab\x0b\xb1\x5b\x51\x8e\x81\xc5\xfd\x25\xb8\x69\x16\xa8\x0c\x92\x4b\xe1\x78\x47\xb8\x23\x4d\xca\xd1\xda\x17\x78\xdd\x4f\x42\xae\xfe\xe7\x34\x14\x4d\x2e\x73\xe1\x48\x3d\x27\x23\x3a\x49\x33\xda\x77\xe8\xea\x95\x38\x3a\x54\xe3\xfe\x4a\xec\x41\x35\xa4\xf5\x0a\xf6\x79\x03\xf9\x6a\xfd\x3e\x14\xa6\x62\xf3\xe0\x82\x87\xad\xbc\x88\x8a\xcb\x21\x79\x0a\x2a\x6c\xb9\xeb\x44\xb9\x70\x69\x0c\x45\xbb\xf6\x26\x83\x26\xb9\xb3\xc1\x20\x76\x8d\xa2\x78\x3a\xab\x0b\x5b\x65\x85\x21\xdd\x19\xa3\x1d\x76\x0a\xe1\x48\x6b\x7b\xab\x80\xf8\x4a\x7f\xff\x70\xf4\xb6\xaf\xb0\xcc\xdb\xd3\x0e\x2c\xc1\x75\x6c\x4e\x02\x3b\x94\x67\x8f\x2c\x82\x3c\x67\xbc\xab\x98\x65\xe9\x72\x3a\x33\x57\x80\x1a\x88\xa0\x35\xa8\xd5\xbd\x9c\xd4\x5c\xed\x11\x9c\x96\x3c\x32\x6f\xe9\x88\x25\x80\x78\xdb\x61\x56\x57\x53\xdb\x99\xb4\x1f\x45\x15\x90\xce\x7a\x94\xff\x18\x25\x51\x41\x2d\xa4\x5b\xdd\x00\x09\x11\x75\xc2\x94\xb2\xdc\x8e\xa2\x75\xf1\x5e\x6c\x2a\x7c\x1d\xb0\xf3\x52\x02\xdc\x9f\xfc\x4c\x6d\x41\x6a\x4a\x0b\x08\x57\x7c\x34\x39\x4e\x22\xaf\xb6\x0b\xca\x16\x33\x2a\x7e\xa8\x05\x47\x8a\xb4\xa7\xb4\x53\xca\x21\xba\x37\x6a\xa3\xea\x87\xaa\xa6\xc3\x3b\xd3\x85\x22\xe0\xb6\x2b\x27\x34\xcb\xd2\x4c\xba\xa4\xe1\x3d\xce\x49\x92\x16\x64\x9c\x66\x19\x1d\x17\xc3\x73\xb5\x6e\xcc\x5e\x1b\x0b\x88\x15\x94\x24\xb0\xe4\x99\xf0\xdf\x33\xf8\xaf\x5f\xa4\xaf\xd3\x73\x9a\x1d\x04\x39\xed\x00\x73\xe1\xfa\x5e\xcd\xc7\x18\xd4\x3f\xc4\x2d\xb3\xb8\xba\x39\x61\xff\x9f\xea\xa3\x38\x02\xc1\x7e\xbf\x31\xe1\x71\x4f\x64\x09\x3d\x27\x2f\xd9\xa8\x3a\x6d\xb8\xea\x85\x8e\x80\xad\xea\xbf\xdb\x05\xa1\x17\x51\x5e\xe4\x3d\xb2\x88\x69\x90\x83\x58\x0c\x23\x4f\x13\x85\xaa\x49\x1a\xc7\xe9\x79\x94\x4c\xa1\x64\xce\xb8\xa0\xb5\x8c\x44\x0f\x7b\xe0\x5f\xa1\xa7\x9f\x7d\x54\x44\x89\x55\xbd\x07\xef\x57\xa6\x57\xe1\xe0\x33\x85\x45\xc8\x19\x3e\x5c\x46\x47\x60\x4f\xab\x98\x2c\x27\x01\xc6\x6a\xc1\x57\x05\x9f\x78\x8e\x5a\x41\x59\xef\xd2\x3c\x8f\x46\x31\x9f\x42\x70\xa1\x21\x8c\xfa\x3e\x1c\x32\xf9\x32\x2b\xf8\x4f\x26\x52\x4b\x6c\xbd\x9c\x4c\xa2\xe9\xa5\xf8\x38\x92\xa4\xf4\x88\x7c\x66\xcd\xf3\x3f\x7d\x5d\x05\x9f\xe2\x66\x8b\x83\xcd\x35\x98\xba\x5c\xe2\x9f\xf2\x2a\x8a\xc3\x4d\x35\x9c\xba\xff\xe1\x9f\xe2\xc2\x48\xe7\xf1\x02\x8f\x1e\xa9\x85\xa9\xef\x71\x78\x81\x5f\x83\x51\x6a\xe4\x79\x4a\xc8\x7b\x18\x3e\x00\xb8\xbe\xc1\x79\xbc\x04\xea\x05\x2a\xcc\x3f\x05\x16\x10\x08\xb1\x20\xd0\x07\x5c\xa6\x08\x84\x50\x8d\xc3\x29\xfa\x5d\xc8\xdf\xb6\x48\xc1\xf9\x82\x75\xf2\xbd\x52\x72\x3a\x27\x87\x71\x90\xb0\x93\x41\xa0\x58\xb3\x48\x17\xba\xb2\x34\x23\x01\x79\xf5\xf2\x9f\x70\x08\x97\xd2\xda\x9d\x31\x14\xb5\xcf\xca\xa3\xdd\xcf\x33\x2a\xfd\xec\x05\xe8\x2a\x57\x44\x41\x41\xc1\x02\xd8\x7a\x0a\x72\x72\x4e\xd9\x02\xd1\x0e\x56\xe4\x30\xd6\x90\x34\xf4\x33\x35\x8e\xe4\x72\x9c\x98\xa5\x70\x51\x87\xd5\x2c\x99\x04\x16\x8a\x78\x09\x1c\x35\xd6\xe4\x54\x9c\x3b\x59\xf2\x10\xde\x86\x45\x05\xe4\x99\xd1\xc8\x10\x7f\x21\xc9\xaa\x76\xf9\x06\x1c\xc7\x9e\x15\x7c\x4e\xa3\xfb\x05\xfb\xdf\xb2\xc4\x8b\xb4\x6a\x81\xa3\xf3\xc2\x6f\xb6\xd4\xd9\x6a\xfb\x1d\x17\x3b\x20\xe4\x6e\x96\x7a\x11\xcd\x69\xfe\x7b\x2c\xf3\x44\x28\x17\xd9\xe2\x56\xaa\xaa\x9c\x1f\xf3\xd9\x16\x4d\x94\x29\x8b\x43\x0d\xaa\x23\x8d\x68\x42\x53\x81\xbc\x3a\x64\x53\xaf\x49\xc1\xac\x4d\x39\xb9\xd2\x15\x68\x00\x85\x7e\x6c\x7b\x63\x4d\x42\xcd\xf1\xe7\x1b\x26\x03\xc2\xaa\x97\xe5\xc5\x8f\xab\x2b\xb2\xb9\xeb\x3d\xdb\x88\x7a\x9d\xb3\x09\x4f\x37\x0e\x44\x02\xe5\xb2\x27\x0f\x1e\x10\xf1\xdb\x27\xf3\xb3\x26\xed\x5c\x7c\xc0\xf0\xb9\x40\x33\x44\x31\x51\x58\xa9\x44\x36\x2f\xda\xbd\x76\x1b\xdf\xb7\x58\x8e\xd2\x7c\xa5\x31\x9d\x94\x8a\x74\x89\x0c\x1d\xeb\xa1\x14\x45\x27\x1c\x4c\x06\xf1\x50\x27\x31\x61\x35\x09\xb0\xc5\x79\xda\xce\xc9\x58\x85\x74\x71\x48\xcb\x8c\xf8\xd2\x84\xbe\x4a\xa8\x06\x9d\x91\xcd\x3a\x4d\x7d\x97\x41\x32\x0c\x7c\x84\x28\xcb\xb7\x5e\xe1\xc5\x77\x07\x39\xad\x53\x05\xb0\x46\xa2\x76\xea\x5a\x93\x5b\xfe\xb5\x60\x96\xfb\x8b\x78\x99\xeb\x2e\x88\x6f\xaf\x77\x43\x05\x64\x2a\x92\x66\x74\xfc\x39\x97\xa7\x26\xce\x22\xe5\x2d\x67\x2e\xde\xca\xc5\x97\xe0\xc6\xd7\x1b\x8c\x98\x93\xfc\xd8\x1b\x88\xd8\x0c\x29\x8c\x1a\x60\xeb\x3f\x40\x05\xb0\x63\x3b\x08\xae\x24\xa6\xce\xaa\xdc\x98\x39\x51\xde\xd2\xa0\x0d\xfe\xb3\x79\x71\xb2\xf9\xe8\xbb\xe0\xd1\xe4\xf4\xcb\xe3\xcd\xeb\xff\x19\x44\xfd\x82\xe6\x85\x02\x5f\x61\xf0\x15\x63\xfe\x4a\xa3\x6d\x30\x4e\x50\x00\x0c\xfe\xd3\xd9\xbc\xe8\x3e\xab\x1c\x28\xa6\xc0\xc1\x40\x07\xcb\xe2\xe1\xb0\xa0\x7b\xdc\x85\xb0\xb0\x3a\x9c\xc3\x43\x5e\xb6\x21\xa3\x61\x9b\x14\x2c\x3c\x01\x12\xd3\x57\x85\xb7\x33\x66\x5f\x18\xa3\x43\x60\xfb\x8f\x7e\xf4\x82\x59\x5d\x86\xd8\x5d\xed\x1c\xbc\x1d\xe7\x73\xf6\xef\x38\x58\xe4\x20\x3c\x88\xdf\x3d\xec\x9e\xd1\xee\x2d\xf7\x3a\x8f\x3a\x6b\x54\x7e\xa4\xf6\x76\x8e\x19\x1a\x8c\x67\x64\x1c\xe4\x4e\x35\x51\xce\xa9\x64\x39\x17\xb3\x83\x48\x89\xaf\xb1\xe6\x04\xc5\xdb\xca\x97\xf3\x39\x0d\x4b\x69\xcb\x6a\xee\xae\x69\xcc\xaa\xbe\x8a\xd6\x06\x03\x3e\x20\x0b\x39\x81\x2a\x29\x7e\x39\x1b\x90\xd6\x86\x08\x88\x57\x41\x0e\xae\x68\x66\xc1\x8e\x6c\xc4\xd4\xa4\x48\x59\xc7\xe7\xee\xe5\xf1\x26\xdc\x50\x12\x8b\x3c\xc0\x75\x77\x31\x23\x31\x85\xc7\xd4\x28\xfe\xde\x62\x41\x33\xd6\x5b\x39\x0f\x09\xc4\x2e\x9c\x46\x3c\xbc\x5d\x90\xd3\x79\xb0\x60\xf3\xb1\x65\xe8\xf9\x3a\xca\x7e\x01\x75\x1a\x9c\xb2\x6d\x3d\xe9\x92\x1f\xc8\xb7\x6c\x37\x17\x59\x27\xd1\x69\xbf\x48\x8f\x59\x43\x42\x13\xb4\xbe\xb7\x87\x32\x81\xe2\xab\x2b\xfc\x7e\xcf\x53\x23\xd6\x2d\x59\x35\x96\x78\x0a\x47\x6b\x52\x73\x7c\x83\xef\xeb\xe8\x0b\x8a\x4c\xdf\x88\x83\x9e\x24\xc7\x12\x5a\x2c\xd2\x3b\xa5\x45\xa9\xbc\x56\xfb\xf2\x0a\xa4\x88\x54\xc6\x8a\xfc\xec\x47\xd7\xa2\x9d\x76\x5b\xd0\x92\x4b\xa7\x06\x82\x6f\x44\xb5\x08\x68\xec\xf4\x9e\x55\x54\x41\xc7\xb2\x17\xe8\xd6\xdd\xa6\x69\x60\x79\x33\x6d\xf9\xc7\xa8\xf4\x3b\x76\xee\x99\x70\xff\xf9\xf2\x22\x4e\x91\xb8\x41\xc1\x75\x04\x6c\x92\x90\xdd\xff\x8d\xbd\x52\xea\x46\xf4\x65\xb3\xd2\xda\x9a\x2a\x69\xd3\x2a\x69\x4a\x9e\x5a\xd2\x34\x18\x69\x91\x32\x89\x32\x0a\xc9\xf6\x26\x77\x19\xf4\x48\xdc\x0f\xf2\x36\xf9\xf3\x84\xcd\x0b\xc2\x6d\x3b\x5c\xdb\xae\x5a\x52\xf6\x5f\xf6\x0b\xe7\x03\x98\x6f\x2b\xfb\xad\x66\xf4\x6b\x49\x33\xde\x6d\x4f\xfa\xd4\x95\xf8\x40\x32\x3c\xdf\x6b\xab\xb6\x59\x4f\x45\xe2\xee\xcb\x57\x9f\x09\x21\x23\x2f\xc2\x8d\x92\xaa\x51\x3f\xa6\xea\x91\xc7\x9b\xfe\x4b\x02\xe9\x87\x58\x1e\xa6\x73\x2d\xe5\xd6\xc7\xd8\xf4\x9e\x24\x7d\x37\x5f\x46\xdc\x4d\xbe\x93\xf9\xce\x80\xa4\xc3\xbb\x61\x89\x85\xb2\x6f\x49\x5e\x04\xc9\x98\x71\x11\x5d\xf8\xea\x4a\x21\x4d\x14\x86\xc7\x6b\xf0\xcb\xf0\x9b\xe1\x4d\xe5\xa6\x11\xc0\x8b\x54\x95\xed\xa6\x88\x92\xe7\xe1\x3a\x2c\x7d\x70\x6c\x8b\x1a\xa2\xc8\x13\x21\xc9\x8b\x1f\xc1\x5a\x45\xcf\x60\x34\xbc\x6f\xed\xbb\x43\x0f\xef\x4b\x63\xdc\xc8\x1e\xd7\x63\xe7\x47\x6d\x43\xb2\x2a\x7e\x64\xd1\x1b\x61\x48\x96\x68\x37\x1c\x11\xeb\x53\x51\x3f\x1c\xde\xf5\x1b\x0c\xe6\x48\xf4\xad\xe1\x62\x60\xf2\x45\xb2\x8c\x63\x08\x92\xd0\x71\x57\x08\xd8\x6d\x83\x0a\xc3\x33\x76\x71\x5d\xdb\x70\xe4\x23\xde\xd9\x06\xec\x80\x03\xde\x84\x19\xf0\xa4\x1b\x4d\xa4\xe8\x5e\xd3\xd1\x80\x07\xc0\xfa\xb1\x38\x01\x35\x1a\x8e\xc4\x0d\x8a\xd1\x90\xa5\x41\xc1\xca\x31\xd8\x07\x12\xbe\x8f\x82\x89\x5c\x2a\xa9\xce\x1c\xc4\xdf\x73\x73\x5d\x69\x03\x84\xca\x31\xb0\x62\xf6\xa3\x01\xe5\x39\x29\xbb\x74\xf7\xa9\xf5\x75\xb8\x98\xe4\xaf\x70\xb5\x2d\xeb\x35\x19\x43\xd4\xa7\x0e\xf5\xec\x6d\xf8\x38\xba\xca\xa8\x03\x31\xee\x97\x6c\x02\xe9\x72\x4e\x46\x71\x3a\xfe\x4c\x66\x34\x08\x69\xc6\x3e\xd2\xb9\x6d\xb4\x11\xe5\xcf\x59\xb2\x4f\x68\x98\xd1\x0b\xe5\x16\x1d\xca\x92\x49\x14\x17\xb6\x32\xd3\x43\xb0\x00\x6b\x78\x1f\x66\x29\x95\xe7\xfc\x6f\xb6\xb6\xf5\x41\x9f\x83\xd7\xe0\xa5\xfc\x98\xce\xeb\xc2\x55\xf9\x4e\xe9\x2e\x94\x2f\xe0\xb0\x3e\x69\xaf\xb9\xfd\xb8\xc1\xcc\xc4\x29\x13\xf3\x16\xd1\xd8\x9d\x87\x8f\x2c\xb9\x6e\x1e\x0a\x05\x54\x31\x01\x50\x93\x31\x01\x50\xac\x72\x02\x9e\x3c\xd6\xf8\xe7\xd0\x37\xc6\x3f\x54\x85\x6b\xf2\xa1\xdf\x01\xba\x11\xf6\x4b\xfc\x8e\x08\x91\x6f\x28\x7f\xf4\x64\x2a\xbc\xf9\x19\xaa\x5f\x3c\x1d\x04\xc3\x21\xff\x4f\xa6\x08\x03\x92\xa1\xfe\xc9\x73\x90\x71\xc9\x10\x7f\xc8\x72\xc7\xc5\xe4\xe9\x50\xfc\x2f\xd3\xc0\x5c\x65\x28\x7f\xe8\x7a\x38\xac\xfc\xa5\xd3\x05\xbc\xfa\x29\xea\x71\x6d\x6e\x87\xbe\x44\x0e\xed\x9a\x72\x0e\x3d\x69\x06\xac\xb4\x9a\x1c\xda\x09\x72\x1c\x3f\x53\x18\xc5\xcf\x14\x8d\x01\xd2\xc4\x0f\x09\xa7\xa4\xc5\x21\xfe\x90\xb9\xa6\xca\x7a\xe8\xa4\x28\xac\x71\x41\x7d\xa8\x7f\xf2\x1c\x24\x1d\x0f\xf1\x87\xcc\x35\x4e\x22\x43\x3b\x41\x42\xa1\x7c\x2b\xc7\x3a\xba\x0f\xdd\x24\xd9\x43\x07\xd2\x49\x92\x75\x4a\x61\x6c\x88\x7e\xe3\xfe\x26\xd3\xa1\xfa\x25\xd3\xf9\x9e\x3a\x54\xbf\xd4\xe8\xf9\x7a\x1f\xea\x9f\x6a\x4c\x6c\x97\x1c\xca\x1f\x32\x95\x6d\x58\x43\xf1\xbf\xaa\x83\xf1\xbb\xa1\xfc\x21\x53\x81\x6d\x0c\xe5\x8f\x1e\x2c\x30\xee\x9f\x4e\x3c\xea\x6e\x0d\xb7\xbe\xeb\x55\xba\xb7\xe9\xb5\x96\xc5\xe4\x69\x6b\xf8\xf4\x9b\xeb\xd3\xde\xf6\x56\x13\x87\x0f\xe6\x12\xde\xe3\x0b\xb8\x25\xfc\x1c\xb4\x86\xa4\xb5\xd9\xdf\xde\xec\x6f\xb5\xd6\xae\xa5\x27\xb8\xed\x46\x81\x8a\xef\x1d\x49\xdc\x3b\x92\xf8\x2b\x38\x92\x10\xb5\xac\xb9\xae\xe0\xfe\x4e\x27\x93\x8c\x5e\x92\x9f\xa3\x78\xfc\x99\x92\xef\x7f\xa1\x93\x89\xed\x4d\xa2\xa1\xc3\x38\x00\x8b\x82\x84\x1c\x31\x89\x3b\x00\xa8\x28\x48\x5c\xb0\x1f\x83\x11\x03\xfb\x47\x3a\xa5\x71\x5e\xd0\x38\xa6\x19\xf9\x7e\x02\x89\x2e\xf0\x4f\xc1\x19\xf9\x39\x4d\x43\xf2\xfd\xb4\xd4\xcb\xc5\x63\xed\xdd\x47\xb8\x82\x7c\x13\x24\xc1\xd4\x74\x3d\xd1\x1f\x30\x2c\x0c\x32\x0e\x30\xe7\x00\xd2\xc5\xc4\xe1\x08\x0e\x47\x36\x70\x34\x0a\x12\x09\xf2\x12\xac\xf8\x6d\x08\x2e\x79\xe5\x03\x5a\xcc\x24\xe0\x8b\xe7\x15\x70\xe1\x48\xb9\x9b\x9d\x55\xd5\x97\xcf\x54\x7d\x6f\xc1\x31\x79\x19\x60\x42\x0b\x09\xf8\x8e\x66\x39\xbc\xa4\x2a\x87\x5e\x08\x10\xd5\x89\xf3\x20\x9b\x57\x75\x83\xe5\x2b\x60\x5a\x14\x10\xb4\xc9\x85\xcf\x45\x96\x04\x95\x5c\xc5\x80\x94\xec\x82\x9d\xa8\xb4\x6f\x8f\x28\xb6\x2a\x44\x51\xe5\xcb\x5d\x84\x70\x20\xe9\x8c\x49\xbc\xdb\xa0\x49\xe8\xe9\x1b\xcf\x90\x60\xcf\xe1\xc4\xe4\x42\x8d\x58\xba\xc2\x64\x96\x2e\x68\x56\x5c\x7a\xe0\x16\x22\x4b\x82\xbe\x2a\x8a\xc5\xbb\x2c\x3d\x8b\x42\x2f\xb9\xb1\x85\xba\x10\xd9\x8a\xd8\x16\xe3\x8a\x12\xd1\x62\x6c\x17\x68\xe6\xd0\x70\x6d\x4d\xc9\xea\x3f\xd3\xd1\x0e\xe9\xc8\x6a\x4c\xa7\xbc\x99\xbd\x42\x12\x7a\x6e\x2d\x1b\x5d\x12\xf9\xe7\x15\x91\x56\x51\xcf\x25\x14\x02\xa2\xfc\xa9\x0b\x3d\x67\xcb\x05\xfc\xf4\xe3\x2a\xc2\x91\xc8\x7c\xf1\xdc\xc9\xcb\x67\xb2\xe4\x87\x99\x5b\x32\x81\x35\xc0\x72\xdf\xd2\xc2\xc9\x5d\x68\xc2\x67\x20\x72\x1d\x38\x70\xa3\x5f\x7f\x95\x6d\x30\xba\x76\xfb\xa0\x09\x1c\x80\xc4\x67\x07\xc3\x68\xca\xd6\x47\x8d\x60\x11\x0d\xd5\x66\x28\xfe\xe7\x47\x0e\xdc\x49\x81\xad\xdc\x28\x8a\xc9\x67\x68\x7c\xf5\x14\x0c\xa2\x97\x21\xfe\x70\x9a\xf8\xa4\xd6\x00\xff\xe1\x0c\x50\x00\x74\x74\xfb\x82\x9c\x23\x9a\x0f\xd1\xef\x0e\x37\xe6\xb9\xee\xee\x32\x89\x69\x30\x00\x0f\xbc\x39\x25\x7a\x0c\x29\xdf\x89\xc1\x25\xd0\x1a\x23\x37\xcf\xf8\xea\xc6\x56\x3a\x2e\x26\x34\xca\x3a\x65\x38\x4d\x8a\x29\x0f\x87\x0c\xae\xa7\x71\x5c\x78\x65\xd2\xf6\xf4\x25\xa3\x3c\x56\x84\xee\xc5\x67\x4a\x17\x87\xf9\x87\xcb\x64\x1c\x25\xd3\xca\xae\x40\x59\x0b\xbe\x19\x05\x7a\x3a\x82\xf9\xc2\x73\x6d\xbf\x62\x41\xc9\x57\x30\xdc\x9b\x14\x7c\x79\x60\xe4\x8b\x59\x09\x05\xdf\x1e\x38\xf1\xec\x5a\x82\xb1\x4f\x07\x0a\xbf\xc0\xe5\x80\x2a\xc5\x0b\x6b\xd4\x29\x13\x3c\x6d\xeb\xe7\x54\xb2\x79\x91\xe2\xad\xd5\x86\x46\x69\x9e\xba\x31\x2e\x65\xed\x55\x38\xe5\x16\x8e\x12\xf2\x67\xea\x1f\x19\x86\x12\xdf\x0e\x1c\x36\x6c\xe1\x90\x2a\xc5\x03\xeb\xde\x0a\xcb\x32\x07\xf6\x6d\xa1\xd3\xe7\xb2\xb2\x4e\x8e\xa7\xdd\xc3\xe7\xfb\x6f\x51\x63\xec\xd3\x81\xd2\xde\x69\x38\x98\xf8\xf6\xc1\x49\xc7\x29\x0a\x10\x12\xd8\x2e\x66\x2f\x7c\xbe\xf5\xe3\x87\xdc\xfc\x52\xc8\x74\xae\x68\x5e\xd7\xc1\x9d\xb4\x0d\x59\x76\x7d\x1a\x46\x19\xa8\x8a\xc7\xc1\x02\x1e\x5f\xa0\x0b\x4c\xcf\x8c\x1e\x1e\xec\xbf\x33\xd6\x3e\x2b\x87\x2d\xe4\x22\x2e\x4a\xb2\xe5\xcb\xa4\x4a\x9e\x6f\xbc\xf5\x64\x10\x7d\xd1\x8c\x5c\xd9\xe0\x4f\x46\xf1\xdf\xaa\x80\xa3\x27\x8a\x77\xc3\x5e\x27\xc4\x91\x8e\x79\xe7\x9c\x80\x0e\xa6\x2d\xf7\xa4\x24\x0d\x69\xbb\x67\x40\x4c\xc1\x2e\x64\x48\xda\x4c\xe8\xf8\x34\x8e\x23\x9a\x14\xff\xe0\xe0\x6d\x7d\x27\xdd\xed\xdd\xa4\x35\x5a\x9c\xa7\xd9\xe7\xb2\x06\x13\x5a\x7c\x12\xa0\x16\x88\x19\x2f\x60\x68\xaf\xf2\x5b\x76\x8b\x0a\x85\x76\x59\xbf\x68\x31\xfb\x04\x73\x3d\x4e\xe3\x7f\xfc\x0e\xfd\x3b\x9f\x45\xf9\x42\xb9\x46\x76\xba\x97\xcf\x66\xb7\x46\x1b\xfc\x3c\xf5\xee\x25\x51\x7e\x90\x26\x09\x77\xd9\x84\x96\x5b\xd7\xa0\xbd\x8e\x77\xbb\x7c\xf0\xc0\xbb\x8d\xe2\x2a\x3b\x5d\xff\x0e\xc6\x9d\x14\x48\x99\xbc\x94\xe6\xc1\x38\x14\x02\x27\x08\x89\xc6\xab\xb7\x65\x75\x4b\x67\xa2\xf8\x84\xc0\x55\x4e\xc6\xc1\xa2\x35\xdc\xde\x64\x49\xf8\x48\xd2\x1a\x6e\x6f\xb1\x34\x7d\x1c\x68\x0d\xb7\x1f\xab\x14\x2e\x3a\xb5\x86\xdb\x4f\x55\x12\x16\xee\x5b\xc3\x9d\x6d\x95\xc1\x56\x78\x6b\xb8\xb3\xa3\x13\xb4\x50\xdf\x1a\xee\xe8\x4a\xf5\xb1\xb0\x35\xdc\xf9\xd6\x49\xa6\xc5\xac\x35\xdc\x79\xea\xa4\x27\xb4\x68\x0d\x77\xbe\x73\xd2\xa5\x20\xdc\x1a\x3e\xde\x74\x32\xf3\xd9\xac\x35\x7c\xbc\xe5\xa6\x33\x59\xb8\x35\x7c\xac\xbb\x2f\xcf\x38\xad\xe1\xe3\x6f\x54\xa2\x79\x70\x6e\x0d\x1f\x3f\x51\x59\x52\x6a\x69\x0d\x1f\x7f\x5b\xad\xdb\xbb\x3e\xed\x6d\xef\xdc\x6b\xde\xee\x35\x6f\xff\x2d\x9a\xb7\x20\x8e\xc1\xbf\xc4\xed\xdc\xb8\x22\x05\x97\xa3\x0a\xf1\xe9\x42\x64\x94\x98\x97\x67\xdc\xa2\x1f\xe9\x18\xa0\x37\x12\x4e\xc7\x8c\xa9\x0b\x8e\xe4\xea\x69\xbc\x8a\x9a\x1f\xe1\x72\xd7\xaa\x0c\xd2\x24\xc4\x39\x0f\x7d\x64\x82\x48\x56\x24\x32\x95\x73\xd7\xfd\x38\x36\x86\x62\x0a\x46\xe6\xd1\xaa\x07\x37\xf5\x3d\x62\x99\x96\x95\x28\x3d\xcc\x04\x7c\x44\xfe\x85\x5f\xce\xb3\xff\x70\xb2\x63\x2e\xc9\x37\x21\xa7\x87\xd5\x51\xbe\x2d\xa9\x55\xba\x03\xdf\x53\xbf\xae\xae\x20\xfc\x0d\xb1\xdd\x3e\xb0\x44\x48\x3d\x69\x33\x29\x14\xc2\x0a\xb4\x7b\xa4\x5d\xa4\xfc\xe7\x69\x9f\xa3\x19\x85\x3b\x9c\x78\x6e\x43\x45\x33\x27\x93\x53\x30\x70\x51\xf6\xa1\xe2\x86\xb4\xeb\x89\x99\x6d\x55\xc3\xfa\xc3\x8a\xef\x21\xe2\xe1\x1e\x74\xa0\x23\xfc\xbc\xa4\x63\xe0\xe9\x06\xa5\xcd\x82\x7e\xb7\x05\xae\x28\x34\x5e\x0d\x3c\x9b\x8f\xbb\xb0\x73\x8a\x2a\x8c\x7b\x82\x16\x87\x41\x11\xc8\x11\xb0\xdf\x7d\xf6\x0f\xd9\x43\xbf\xaf\xae\xc0\x28\x56\x01\xc0\x55\x72\x2e\x41\xc4\xd7\xd5\x95\x0e\xbe\x09\xda\x46\xd6\xb4\xbc\x23\x47\x80\x27\x9b\xa7\xfd\x9c\x31\x04\xe5\x61\x9d\x41\xcf\x85\x80\xa3\x29\xcc\x9d\xae\x5f\x3c\xd3\x85\x5b\xd9\x13\xa6\xb6\x42\xba\x73\x2f\x6d\x3b\xbf\xa8\xe7\xe9\xdd\x93\xcd\x53\xf4\xf0\x6a\x1d\xda\xef\x92\x2f\xf0\xd4\x21\x48\x92\xb4\x20\x93\x28\x09\x79\xbf\xa2\x64\xca\x1b\x7a\xa6\x9a\x1f\xa7\x49\x9e\xc6\xb4\x7f\x1e\x64\x49\xa7\x8d\x4b\x70\x67\x39\x8c\x15\xc7\xe9\xb4\x8d\x4c\x5f\x45\x8f\x19\x2a\x1c\x87\x4b\x54\xb0\x21\x1c\x98\x0b\xe6\xae\xe3\x5b\x9d\x3d\xde\xad\x9e\x49\x10\xe6\x11\x0a\x6a\x94\xbe\x0e\x61\x8a\x1b\x2c\xc7\x0b\x3a\x66\x12\x80\x67\x3d\xf6\xc0\x21\xd3\x28\x18\x7f\x56\x21\x44\xc1\x13\x81\x38\xec\xca\xeb\xd6\x4e\x90\x4d\x97\xf0\x12\xe4\x44\xfd\x42\xce\x78\x4c\x2b\x74\x59\x23\x84\x7e\xae\x2c\x86\xdd\xc6\x75\x1c\x08\x36\xf1\x5b\xa6\x1b\x0b\xcd\x36\x92\x65\x1c\x3b\xe8\x4e\x25\xa5\x09\xe7\x77\xfa\x00\x2c\x21\x26\x28\xc8\x1a\xd7\xcc\x02\x26\xfb\xa3\xc8\x54\x1a\x22\xf1\x9b\x73\xf6\x4e\xda\x83\x83\x52\xbb\xe7\x65\xac\x3d\xc9\xde\xd9\x61\xab\xd3\xed\xe9\x86\x10\x86\xeb\x67\x2a\x28\x8a\x60\x3c\xfb\x98\x1e\x48\x3f\x58\x78\xca\xa4\x73\x2c\x7c\xe6\xd6\x53\xcb\xc7\xcd\x3f\x9d\xe1\xc8\xa2\xfd\x20\x8e\xd5\x7e\x22\x80\x4b\xce\x14\x4e\x37\xd5\x01\xc3\x73\xc2\xf0\x1e\x31\x80\x54\x5b\xc3\x6d\x90\xee\xf9\xaa\x6f\x0d\xb7\x41\x76\xc7\x21\xdb\x76\x00\xd8\xda\x08\x5b\xc3\xc7\x3b\x4c\x64\x7e\x7c\x2f\x32\xdf\x8b\xcc\x7f\x6d\x91\x19\x45\x7b\x81\xb3\xf7\x5d\x85\x7b\xf9\x7b\x9e\x26\xd9\x62\x6c\xca\x9b\xbf\xf0\x44\x75\x75\x98\x65\xa9\x2d\x02\xf3\x34\x25\x89\xba\x2a\x0a\x36\x58\x43\xc8\x74\x64\x4c\x40\xc7\xa7\x52\x49\x53\x64\xe4\x22\xae\x77\x8d\x9f\xc0\x20\x0c\xa5\x4b\x47\xc6\x8e\x45\x61\xf0\x92\x0d\x5d\x13\x09\x96\x45\x60\x10\x86\x1e\x1b\x5b\x22\xc6\xcf\x0b\x15\xda\xba\x75\xb0\x06\xe3\xc4\xac\x38\x0c\x7d\x32\xb7\x6f\xe0\x39\x0f\x0a\x2e\x21\x6a\x47\x24\x99\x76\x55\xff\x05\x8c\xb7\x6b\xbe\xfd\xdc\x74\x2e\xa0\xf0\x6b\x74\xd3\x9d\x02\x7d\x4f\x94\x84\x5c\xcd\x24\x61\x7b\xa8\x6e\x9a\x65\x3d\x21\x89\xe6\xae\x4c\xcc\xc9\x87\xff\x12\xc2\xa2\x06\x10\xf8\xc1\x1e\x26\x15\x2a\x7b\x04\x5e\xb7\x97\x3c\x60\x13\x55\x9e\x00\xcc\x29\x3e\x1e\x94\x0a\xec\xbc\x48\x49\xb5\x4c\xac\x91\xfd\x11\x95\xf6\x1d\xd9\xc7\x2e\xb0\x2e\x16\x51\x3f\xca\xff\x11\xc4\x51\xf8\x9e\xe6\x8b\x34\xc9\xa9\x68\xca\x79\x7c\xe7\x8c\xc1\xdf\x5e\x87\xaf\xb1\xfe\x61\x72\xe6\xad\x75\xd7\xa9\xf4\xda\xed\x5f\x69\xe5\xdc\x65\x93\x33\x58\xbe\xe7\x82\x6b\x08\x5f\x86\x68\xbc\x2f\xfa\x00\x4e\x23\x70\x82\x13\xc4\x5e\x4f\x85\x3a\xdf\x10\xbf\x28\x01\x94\xa5\xf5\x93\x7c\xf0\xad\xe1\x36\xe8\xd1\xc4\x8a\x6c\x0d\x77\xc0\xea\xad\x51\x90\xef\xfb\x0d\xff\x7e\xc3\xff\xf3\x6e\xf8\x7a\xbf\x57\x62\xf9\x1d\xa9\xc8\x1a\xea\xaa\xd8\x89\x27\xb3\xc0\x72\x21\xeb\x0f\x20\x73\x55\x75\x9a\x84\x43\xef\xa6\xb0\x1e\x4c\x3e\x88\x12\xd0\xfb\xe8\x10\x82\xc0\x94\xc6\xd0\x88\x38\xee\xdb\x3f\xb9\x7a\x09\x3f\x32\x83\x6d\xde\x7e\xa7\xcc\xe1\x0e\x34\xd8\x3b\x09\xa5\xe4\x02\x30\xf6\xbd\x26\xd2\x95\xb3\x99\xea\x6d\x40\x38\xfb\xf5\x57\x6d\x3e\xf5\x1c\x45\x3d\x51\xce\xba\xd5\x09\x46\x91\x47\x0d\x82\xdc\x3e\x13\xcb\xcf\x32\x8f\xef\xbd\xb7\x47\xda\xa8\x4f\x6d\xf2\xe0\x81\xe1\xc7\x19\x9d\x9b\x79\xb3\x86\xb3\xff\xeb\xae\xb5\x0d\x57\x35\xe8\xf1\x0c\x4d\x3a\x90\x58\xb2\x5d\x43\x1e\x77\x18\xed\xd9\x19\xac\x8a\x18\x58\xee\x69\x1a\x68\x4f\x1c\xde\x39\x42\x39\xa8\x42\x23\xd2\xf2\x48\xed\x55\x03\xe9\x51\xc5\xf3\x12\x9e\xa2\xf8\xd1\xda\xfb\xb2\x29\x08\x43\x49\xc3\xb9\x3e\x86\x63\xda\x90\x69\xd7\xaa\xa6\x52\x7a\xe2\xa4\xe2\xaf\xb2\xf2\x64\xaf\x8f\xeb\x37\x27\x14\xf4\x0a\x71\x95\xd9\xc7\x9a\x2a\xa5\xfd\x51\xfd\xf9\x44\x8b\x99\x54\x37\xeb\x4e\x9a\x7e\x2f\x6a\x55\xa9\x13\x47\xcd\xa1\x11\xa0\x55\xa5\x0d\xe6\x95\x73\x8b\x46\x93\xca\xf9\xcd\xdd\xcd\xa8\x5d\x5f\xbd\xa2\x46\x32\xbc\xbb\x98\x5b\xce\x7b\x2d\xb5\xb2\xe0\xac\x42\xdb\xa8\x78\xac\x39\x79\xae\xde\x8a\x77\xac\x74\x3a\xf7\xe3\xb8\x72\xba\x00\x48\x5c\xf4\xac\x4c\x60\x5c\x15\x5a\xd3\xc1\xd5\xa9\xcd\x78\x14\xe8\x2a\xd5\xca\xa8\xad\x8a\xdc\x94\x9b\x1c\xb0\xfd\x93\x93\x3e\xa5\x45\x2e\x8c\x57\xe2\x4b\x12\xd2\x45\x9c\x5e\xd2\x50\x9a\x08\xc2\xf3\xc1\xf1\x2c\x88\x12\xfb\xb9\x1a\xd4\xf6\x63\x9a\xc9\x1e\x79\x7c\x0f\xc8\x03\xab\x8f\x24\xe5\xba\xbc\x56\xaa\xc5\x35\xc3\x43\xee\xb1\xbc\xdc\xd0\xcf\xda\x4a\x5a\xc4\x06\x0f\xb2\x25\xa4\xb0\xd4\xe4\x0b\xf1\x9a\x21\x90\x8c\xa3\xe6\xfd\x11\x82\x94\xef\xc9\x87\x65\x90\x3f\x18\x90\xf3\x20\xe2\xea\x72\x10\xb9\x16\x85\x56\xc1\xca\x9b\x32\x73\xde\xc5\x52\x50\xf1\xa2\x75\xc7\x68\xd7\x74\xbc\xbc\x4e\xe1\x69\xb2\xd1\xbe\xbd\x2b\x41\x7f\x37\x36\x76\xcd\x63\xd3\x60\x40\xf2\x22\x5d\x70\x5d\x6d\x94\x4c\x49\x30\x61\x5d\xf9\x66\x93\xcf\x55\x4e\x3a\x45\x34\xa7\xe9\xb2\xe8\x3a\x47\x47\x8e\x80\x1f\xc8\x37\x9b\xde\xc3\x22\xef\x7d\x9f\xd5\xfe\xb3\xa8\x5c\x87\x54\xe8\x92\x2f\xd7\x9e\x33\x9d\x8d\x40\xfe\x60\xcf\x7b\x0e\x55\x33\xe2\x3d\x6d\xea\x93\x9f\xf6\x0b\xac\x18\x13\xdc\x97\x04\x7c\x65\x8c\x19\x61\x83\x8f\xe0\x11\x93\x98\x97\x49\x68\x63\xa0\xed\x3b\x7c\xd2\x18\x39\x14\xc1\x7f\x8e\x37\xe2\x1b\xb7\xca\x96\x1f\xae\x59\xf9\x13\x71\xb1\x66\x50\xcd\x94\x16\x1f\x75\x53\xef\x39\xa9\x69\x8e\x82\xba\xf1\x2a\xc8\x67\x98\xa8\x7a\x92\x30\xbb\xfe\x23\x7c\x34\xe9\x08\x00\x3f\xb5\x79\x0b\x79\x3b\x08\x11\x8c\x44\x5d\xfd\xb1\xb9\x00\xcd\x1e\x41\x98\x23\x7f\x77\xe4\x5f\x99\xf3\xf6\x27\xca\x79\x7b\xd9\x5f\x34\xe9\x98\x14\x77\x75\x45\xd6\xa1\xc5\xca\x62\x44\xb1\x6e\x0f\x6d\xe2\xbf\x9b\x2c\x01\xfc\xd7\x70\x39\xd8\x43\x4a\x43\x14\x21\x7a\xa7\x72\x66\xe4\xdf\x60\xa0\xee\xf9\xe2\x74\x8a\xa8\x16\x8e\x15\x92\x8d\xaf\xb7\xbb\x35\xcd\x13\x43\x54\x53\x1c\xb5\x64\xaa\x1b\x54\x36\x18\x10\xbe\x59\x49\x71\x21\x48\x42\x22\x6e\x46\x48\x30\x0d\xa2\x44\xac\x9c\x73\x2a\x02\xfc\xd5\xfc\xf9\x65\x4f\x7b\x03\xac\xa9\xc1\x96\x75\x9c\xed\xbf\x66\x48\x63\xee\x96\x4d\x5c\x0a\xb2\x2d\x81\xed\x8e\x39\x1d\xa7\x49\x48\x18\xc3\xad\xad\x04\x91\x6e\x3d\xb1\x12\x83\x23\x82\x2e\xac\x69\x87\xbd\x5e\x8c\xee\xb8\x43\xd8\x75\x3b\x12\x25\xc4\x89\x16\x71\xca\xbc\x48\x33\x1a\x2a\x37\xee\x5c\x02\x01\x8d\xcf\x34\xc8\x49\x30\x67\x1b\x52\xdf\xcb\xaf\xed\xbf\x52\xfe\x6d\xff\x79\xbc\xcb\xdf\x45\x17\xab\x7b\x78\x5d\x9a\x5b\xc6\x31\xdc\x12\x36\x24\xd2\x4e\x36\x3d\x50\xa0\x2b\x06\x49\xe8\x3f\x06\xec\x98\x7d\xa9\x7c\x69\x58\x52\x9c\x05\x56\x73\x68\xb0\x2b\xc5\x07\x06\x38\x55\x05\xa3\xc8\xb8\x5c\xe0\x2f\x8a\xa8\x3c\xbe\x43\x5a\x30\x8a\xc8\x1e\x83\x94\x72\xd6\x43\xae\x09\xad\x1f\x93\x3e\x21\x25\x24\x40\xa2\xa9\x28\x2e\x6b\x91\x63\x4b\xe8\xb9\x4a\x92\x63\x4a\x2e\xaf\x31\x31\x58\xba\x91\x2d\x69\x53\x10\xc4\xdd\x15\x8b\x6e\x57\x14\xb5\xe5\x60\x43\xb2\x10\xbe\x4e\xa4\xa2\x38\x74\x4a\xfb\x24\x65\x01\xa1\xa4\x65\x7d\xfc\x93\x49\xaa\x2d\x3d\xf1\x50\x68\xa0\x27\x82\xa1\xd4\x77\xfd\x42\x2a\xb6\xe8\x6f\x65\x0d\xec\x4f\xfd\xe0\xd2\xb5\x3a\x45\x62\xfa\xeb\x48\x3a\xe8\xa9\xd9\xc7\x1c\x6c\x30\xe0\xa1\x15\xb5\x95\x85\x51\xa9\xb6\x95\xf8\x72\xbd\xcb\x80\x25\x96\xd6\xcd\xb6\x05\x62\x50\xc5\x70\xc6\xcd\xe0\x2d\x0e\x10\x32\x7e\x94\x10\x47\x63\x0a\x57\x0d\xda\x5e\xc3\x8a\xfe\xe7\xb3\x1d\x01\xfb\x8f\x72\x8b\x11\xe2\x58\x8d\xe4\xfd\x45\xba\x30\x1c\xcc\x99\xdd\x8b\x83\xbc\x10\x90\x4e\xd5\xfe\xee\x70\x42\xea\xb0\x82\xe0\xbc\x68\x5d\xbd\x38\x81\x38\xb4\x90\x6e\xf7\x49\xa3\xb0\xa6\x4b\xac\x21\x01\xdc\xe7\x41\x49\x7e\x20\x9b\x76\x6d\x62\xa6\x25\xed\xef\xcb\xb5\x5c\xaf\x05\x90\x7f\xb7\x52\x09\x22\x34\x59\xcc\x52\xaa\xd3\x94\xa9\x1d\x1e\xd6\xba\xd9\xe5\xfe\x22\xb8\x0c\x46\x31\xf5\x75\xcf\x3d\x0e\x70\xfb\xa9\x9c\x26\xa1\x0e\x48\x95\xa4\xc9\x23\x51\x09\x46\x87\xbd\x4d\x5c\x97\x4d\x3d\xf8\xf6\x63\x9c\xd1\xaf\x82\xed\xc8\xa5\xd2\x83\x11\xa3\x5a\xe5\x04\x81\xed\xdb\xc6\x1e\xaf\x68\xd7\x9c\xc4\xd2\x1b\x41\x7c\xa2\x35\x74\x00\x52\xee\x83\x58\x08\xa6\x96\x20\xa4\xe4\x3c\xc8\x95\x40\xb9\x66\xe2\x8a\x2f\x6d\xb8\x7a\x45\x47\x18\x6d\x98\x65\xdd\xbf\xce\x82\x7c\xe6\x43\x3a\xeb\x35\xcd\xb2\xb2\x9b\x48\x7c\xe5\xe8\xbb\x57\xac\x92\x78\x98\x38\x1a\x86\xfc\xda\x0b\x71\x5d\xd6\x13\x7f\x5b\x25\xc7\x2e\xb2\x07\x65\x4a\x84\xaf\x52\x09\x71\x12\x65\x79\x51\x2e\x20\xae\x28\xe3\x95\x68\x40\x7c\x6a\x0f\xdf\xf5\xab\xf1\x55\xe7\xf8\x12\x02\x6d\xf2\x81\xd7\xcd\xb3\xd5\x58\x53\x94\xd7\xa2\x7a\x95\xa1\xfb\x79\x9a\xd2\xc9\x73\x20\xa1\x2b\x13\xd8\x95\x9b\x20\x3b\xdf\xbe\xe0\x76\xa5\x90\x24\x3e\x0d\x03\xb4\x1b\x0b\x5e\xb6\xd6\xac\x4e\x3b\xeb\xd9\xd4\x45\x4d\xd7\xa6\x0c\x34\x51\xf5\x0f\xd6\x06\x03\x6b\x07\x36\x2e\x70\xb4\xcb\x63\xa4\xbe\xb4\x2a\xef\xf0\x7d\x79\x30\x30\x7c\xe9\x96\x86\x9d\x1e\x8f\xc1\x2d\x6e\xca\xe3\x34\x45\xc9\xb4\x42\x36\x33\xd5\xd8\xe6\xc8\xf9\x24\x5e\xbb\x9c\x08\x8b\x43\x55\xa2\x10\xf9\x82\xa4\xae\xa6\x12\xd1\x84\x24\xa9\xae\x81\xb1\xb7\x45\x90\xe7\x34\xec\xb1\x2a\xb4\xeb\x3b\x06\x91\xa3\x25\x6d\xf2\x32\x45\x78\x30\x03\x16\x3a\x0d\x73\x48\x9f\xef\x54\xd3\x66\x95\xac\x2c\x43\x69\x4b\x79\xad\xad\x2c\x66\xc8\xb5\x24\xc4\xaa\x81\x08\x61\xd2\xa8\x40\x75\xa9\x27\x0b\x8c\xe8\x38\x58\xe6\x94\x9d\xc4\xc3\x34\x69\x17\xe4\x3c\x48\xc0\x28\x29\x5f\xa4\x51\xcc\xaf\xc3\x93\x82\x66\x93\x60\xac\xbc\x63\x37\x38\x8a\x37\x39\x6e\xdb\xfb\x54\x3d\x43\x24\x8e\x7f\x5d\xb5\xa8\xd1\xe2\xfc\x89\x16\xdc\x5d\x33\xdb\x20\x7b\xe4\x7c\x16\x8d\x67\x60\x35\xc0\xd6\x77\x91\x8a\x7d\x8c\x2c\xe2\x65\x5e\x7f\xf7\x2a\x18\x41\xcd\x04\x6b\xee\xe1\xb7\x64\xaa\x91\x61\x57\x17\x54\x55\xb1\x7a\x01\xf2\x36\xc2\x63\xb9\xe0\x88\xac\x95\x6f\x24\xc8\x54\x09\x31\xe6\x53\x87\x3e\xb7\x48\x6f\xce\x7d\x3d\xc7\x1e\xef\x79\xb7\xc1\xfd\x79\x19\x6f\x72\x4e\xc3\xde\x63\x70\xc9\x53\x16\xdf\x81\xd8\xdd\xfe\xb4\x61\x38\xc7\x9f\xfb\x7a\x85\x78\x4e\xd3\x5e\xbb\x25\x8b\x6e\x77\x95\xfd\xb3\x69\x2c\xd1\x1a\x7e\x5b\x66\x02\xad\x4c\x1a\x5a\xc3\xed\x1d\xd7\x26\x5a\x8c\xbc\x35\xdc\xd9\xba\x3e\xed\x6d\x3f\xb9\xb7\x7d\xba\xb7\x7d\xfa\x6b\xdb\x3e\x21\x63\x67\x61\x03\x79\x07\xd6\xce\x25\x7e\x2c\x85\x75\x25\x7f\x98\x75\x34\x91\x97\xce\xfb\xd9\x34\x1f\x96\xa8\x6e\x90\x90\x27\x8e\xb0\xa2\x12\x1c\xfb\x4e\x6e\x27\x8c\x7d\xca\x4a\x09\xb6\x71\x02\x3e\xdf\xf3\xf5\xe1\xfd\xbb\x03\xce\xdc\x6f\xd3\x01\x1e\x70\x09\x58\x2d\x85\x17\x8c\x45\x4a\xde\xbf\x3b\x10\x17\x05\xfe\x0e\x88\xf7\xe8\xe0\x45\x51\xb7\x3c\x4b\x73\x7c\xfd\xe5\x36\x7e\x70\xf4\xf6\xed\xcb\x83\x8f\x87\x47\x6f\xc9\xcb\xf7\xef\x8f\xde\x0f\xc9\x81\xd2\xff\x8e\x79\x95\xfc\x48\x1f\x52\xd2\xde\x20\xac\x3e\xb2\xd1\xee\xfb\xfb\xa0\x5d\xde\x34\x1d\xbb\x7a\x68\xcf\xb5\x08\x05\x5b\x3d\x11\xaf\xcc\xdf\x84\xb4\xa4\x1d\x12\xdb\x2a\x18\x0d\x13\xde\xa5\xd1\x3c\x0f\xa6\x94\xec\x91\xf5\x75\xf1\xd4\x90\x6d\xeb\xe2\x77\x9f\x87\x8c\x75\x52\xfa\xb2\xd8\x33\xe2\x4d\x1e\x12\x35\x5d\x7f\xff\x70\xf4\x16\x66\x25\x53\x5d\xf2\x84\x59\x15\x7d\x73\x1e\x93\x69\x1c\x88\xaa\xcd\xd1\xea\xd9\xfc\xc8\xef\xab\xf1\x78\xe7\x79\xd3\x29\xfd\x78\xf8\xe6\xe5\xd1\xf1\xc7\x21\x11\xb7\xde\x8c\xb8\x58\x27\xe7\x39\xd9\x20\x6d\xf6\x5f\x30\x9e\x31\x8e\xd1\x36\x62\xda\x08\x3f\x92\xdf\xde\xef\x56\xf7\xbb\xd5\x5f\x7b\xb7\x42\x9b\x15\x3c\xbb\xfc\xa3\x9a\xe9\x36\x7f\xcd\xde\xe8\x11\xfd\x1d\xbe\x65\x97\x4e\x87\xd8\xfa\x57\x87\x33\x1c\x93\x29\x37\x8e\x21\xe2\x91\x2d\xb4\xa5\x0f\x0b\xb6\x15\xf2\xd7\x7e\x08\xbf\x90\xb6\xbc\x48\x93\x8e\xf3\x79\xec\x0a\x52\xf1\x1e\x39\x4f\x93\x6e\xcd\x1b\x7a\x94\x99\xa4\xc9\xe5\x3c\x5d\xaa\x16\x55\x42\xc9\xe9\x4d\x22\x6d\x4a\x25\xae\x68\xc8\xe5\x01\x88\x62\xe0\x84\x6b\x12\x69\xea\x78\xf6\x3c\x4d\xe3\x6b\x08\xaf\x1a\x82\x0f\x72\xbe\x49\x50\x0e\x19\xa2\xd9\x81\x07\x22\x34\x34\x3c\xa6\xcb\x13\x1f\x44\x23\x60\x8b\x52\xd4\x3e\x58\x33\xa6\x09\xbb\xdf\x62\x10\xa6\xe7\x28\x5e\xaf\x1d\x81\x01\x21\xdf\xbd\x13\x89\x3c\xa2\x42\xd4\x17\x35\xc1\x05\x87\xf8\x5d\x62\xef\xea\x2f\xaf\x0d\x96\x4b\xaf\x88\x31\xb6\x39\x7d\x86\xdc\x07\x38\xb8\x31\xb2\x70\x1d\x6a\xf7\xe0\xde\x70\x41\xde\x0a\xca\x51\x87\xaa\xab\xf2\x12\xc4\x29\xd1\xf5\x50\xde\xd1\xf4\xda\x7c\x74\xb0\x42\x3d\x43\x2b\x84\x43\xf3\x8a\x71\xe1\xa2\xd5\xf4\xb0\xd2\x88\xa4\x2b\xf5\x1b\x0d\x27\x8f\xa6\x49\x50\x2c\x33\x7b\x38\x38\xbd\x6c\x3c\x18\xa6\x7c\x3c\x0a\xaa\x6a\x40\xe0\xc1\xa0\x79\xff\xc5\x13\x07\x49\xde\x82\x23\x05\x49\xa8\x54\x4b\x45\x0a\x41\x89\x27\x51\x12\xc4\x7e\xb3\x67\x5e\x87\xcf\xa8\x14\xaf\x6b\x2b\x4b\x54\x6f\x20\x45\xe6\xd1\x33\x9a\x5d\x16\x33\xae\xb2\x9e\x8f\x22\x60\x19\x29\x8f\x12\x0d\x7d\x13\x71\x16\x2a\xb1\xe5\xf1\x0d\x22\xba\xe3\xb8\xb6\x53\x8b\x5b\xfd\x42\xaf\x00\xef\x3d\x88\x68\x7f\x1d\xca\x41\x47\x9d\x6b\x11\xa9\xd7\x5c\xb7\x76\x1e\xb7\x9f\xa2\x72\xfe\xb2\x55\x38\x17\xe4\x8e\x3a\x25\xb5\x77\xba\xae\x4a\x53\xcc\xd3\x47\xd9\xb1\xdb\xb2\x74\x14\xc3\xa2\x92\x9f\x83\xe7\x65\x11\x4c\x5b\x94\x3f\x89\x20\xc6\x94\x65\x0d\x20\x80\xf0\xfc\x31\xba\xd1\xc9\xc9\x32\x8e\x4b\x9e\xb8\x68\xcd\x22\x71\x6f\xff\x4d\x85\x30\xd4\x57\x16\x9a\x11\x32\xad\xd1\x9c\x55\x5c\xf7\x0b\xec\x3b\x8f\x63\x3a\x7c\xfb\xea\x91\x33\xfb\xea\xbc\x6b\x07\xd7\x5b\xa9\x36\xe8\x7b\x0d\xc5\x99\x44\x32\x4e\x93\x71\x50\x74\x8c\xd9\xef\x96\x3b\xb2\x29\xe5\x7a\xc2\x8b\x4d\x39\xd7\xb3\x77\x5b\x5a\xc6\xe1\x42\x7e\xf7\xe0\xf2\x30\xc1\x15\x84\xe5\x10\x9c\x10\x78\x2d\xa1\x6a\xf6\xc1\x03\xd0\x37\x98\xbd\xa8\xde\xa6\xcb\xbd\xef\x00\x0e\xee\xd0\xfd\x4e\x90\x4d\xad\xd5\xa5\xc5\xc7\x67\x46\xc9\x21\xfe\x12\xae\x79\xb6\x90\x2b\x14\x31\x3e\x71\xff\xa2\xea\xb5\x9f\x6a\xf1\xc9\x24\x5f\x94\x94\x86\xeb\xdb\xee\xee\xb2\x95\xf9\x4b\x1a\x25\x9d\x56\xcb\xad\x5c\xbd\x8a\xe3\xe4\xc6\xf1\x84\xaf\x37\x40\x36\xec\xb0\x65\xde\xed\xe1\x1e\xe1\xab\x9a\x24\x2d\x0e\x8d\xbe\x2a\x14\x7a\x1c\x0e\x69\xe0\x86\x6d\xc3\xb3\x85\x6e\xcf\x6a\x05\xb7\xaf\x36\x12\xc4\xb5\xd3\x65\xb1\x58\x16\xaf\xd3\xa9\x66\xd7\xc2\x19\x0f\x5a\x2d\xd2\xfb\x0f\x77\x34\x83\xc4\x32\x13\x4c\x73\x6b\x18\x93\xed\x07\x8a\xc3\xf0\x5b\x2e\x83\x9f\x66\x34\x5c\x8e\x29\x9a\xab\x60\x3c\xee\x11\xe1\x8b\x12\xf3\x93\x60\x3c\x3e\x11\xc9\x9c\x27\x32\xa4\x88\x6f\x49\xe5\xcf\xcc\x29\xeb\xe7\xb3\x68\x52\x74\xba\x64\xe8\x60\x54\x66\x39\x4a\xab\x60\x3c\x96\x5a\x2a\x6e\xed\xcd\x49\x9b\xc6\xb4\xa0\x72\x1c\xda\x4b\x92\x99\xce\xa9\xea\x06\x2c\x03\xdd\x5f\x89\x87\x25\x62\x69\xb3\xad\x9e\x8b\x71\xa5\x9e\x15\xee\x4a\x2e\x32\x1a\xae\x16\x7e\x3c\x9e\x1b\x6c\xe9\xe7\x8f\xee\x92\x69\xbb\xde\x25\x53\x55\xf1\xad\x72\x23\x3b\xb3\x02\x62\x48\x80\x86\xf3\x07\x5b\xec\xb0\xfd\x3e\x39\x02\xe5\x1f\xca\x01\x54\x29\x2d\x63\xdb\xff\x06\xaf\x1a\xad\x67\x6d\xde\x27\x8d\x95\xd4\xf8\xb5\xbc\x4d\x31\x50\xf3\xe4\x5a\xc6\x01\xa5\x81\x21\xb4\x74\x82\x00\x4e\x0d\xea\xf5\x01\x60\x07\x56\x9a\x28\xbc\xa0\x27\x8a\xdd\xf3\xb6\x4f\x4b\x07\x60\x58\x4d\x78\xef\x84\x0d\x5c\x22\x97\x58\x55\x57\xc2\x75\x8e\xb2\x6e\xe8\x1b\xeb\x69\x13\x05\xfc\x6d\x9d\x5d\x0e\xfc\xba\xc9\x37\x9c\x06\x3d\xfa\xbf\xea\x48\x22\x38\x88\xc8\xda\x60\x40\x3e\x1e\xbd\x38\x1a\x92\x8c\x72\x8b\xac\x1e\xc9\x53\x61\x3a\xa3\xae\xb8\xb4\x31\x4e\xc0\x35\x5d\x7d\x56\x2e\x2a\xda\x39\x49\xe8\x98\xe6\x79\x90\x5d\xb2\xc5\x02\x21\xb0\x73\x46\x6e\x6d\x70\x58\x0c\xee\xa2\xc9\x79\x9a\x7d\xe6\x52\xde\x7c\x19\x17\xd1\x22\x46\xa1\x1c\xcc\xe0\x29\x7e\xff\x46\x83\x87\xc4\x6b\xcc\xfd\x8d\xb4\xe5\xe6\x75\x98\x66\x0c\xb2\x79\xc3\x88\x54\x37\x46\x43\xbe\x71\x98\x27\x13\x55\xaa\x2f\x71\xe4\xf3\x60\xb3\xce\x3a\x77\xe2\xc2\x9e\xfa\xce\x0f\x65\xb0\x16\x3b\x25\x8e\x81\xa3\xd9\x4f\xe1\xd0\xc9\x57\x53\x8d\x1d\xa4\xb7\x9e\xd2\x23\x94\xae\x5f\x10\xbc\x3d\x26\x07\xc0\x73\xe4\xe6\x39\x3e\x6c\xf0\x1c\xc5\xf4\x84\x49\x8f\xd9\x45\x8f\xe9\xa7\x28\x96\xd3\xc2\x0a\x15\xe3\x73\x72\x55\x79\x10\xab\x9e\xee\x88\x56\x8c\x57\xc3\x78\x86\x5c\x46\x2f\x44\x47\x39\xb9\x5c\x79\xd8\xaa\xe0\x1d\x0c\x9c\x20\xc3\x51\x7a\xd1\x37\xd8\x91\xfe\xd8\x25\x12\x40\x72\x21\xf8\x7f\x57\xa6\x2a\x96\xc3\x7f\xa8\x74\xc4\x68\xe4\x4f\x53\x8e\xa4\x17\xe2\x7d\xb7\xcb\xcd\x39\x1a\xb4\x6b\xa2\x12\xfe\x5c\xc2\x91\x5b\xc3\x1d\x70\x61\x84\xbd\x86\x33\xc6\xfc\xdd\xfd\xcd\xe8\xfd\xcd\xe8\x5f\xfb\x66\x54\x5c\x8b\x8a\x37\xbf\xff\x15\x01\xf6\xee\xd4\x65\x38\x1c\x02\x1e\x92\x83\x34\x39\xa3\x8c\x15\x05\x22\xe6\x31\x9c\x83\xe1\x2c\x00\x81\x8b\x65\x24\x17\x46\xc0\x41\x9c\xa7\x24\x88\xe3\xf4\x3c\xe7\xf1\xd9\x41\x51\x97\xf7\xd7\x58\x45\x52\xf0\x7f\x13\x5d\xd0\xf0\x9a\x67\xad\xb9\xf7\x1a\x6b\xe2\x46\xb5\x48\xed\x28\xc7\x42\x65\xa9\x0e\x9c\x1d\x53\x25\x4a\xae\xae\x64\x84\x74\x9d\xd1\x56\x3a\xd4\x76\xd7\x56\x06\xf0\xb3\x9c\x10\x91\xb8\x62\x96\xf7\xa1\x23\xf5\x8b\x46\x43\x5c\x0f\x71\x34\x01\x55\x73\x17\x6a\xdf\x74\xea\x04\x48\xc1\xf7\xf1\x93\x56\xe3\xce\x48\x46\x51\x52\xed\xc0\x91\x8b\x89\x9a\x8c\xd3\xca\xcb\x1f\xdb\x12\x36\x55\xfa\x7d\x71\xd8\xea\xb1\x49\x38\xa3\x59\x34\x01\xc7\x1e\x19\x1d\x07\x8c\xe3\xa0\x48\x35\x0f\x1e\x90\x38\xf8\xf5\x92\xc4\x69\x10\x92\xf0\x32\x09\xe6\xd1\x98\xa4\x09\xcd\xa1\x35\x31\x21\xba\x21\x11\xcd\x3a\x55\x7a\x02\x80\x92\x06\xf6\xb2\x71\x07\x8a\xcd\xd6\x94\x16\x47\xea\x90\xec\x71\xe1\xcc\x26\x46\x0b\xac\x75\xfe\x01\xb0\x32\x41\x4c\x89\x3c\x26\x97\xdf\x7a\x18\x9a\xfe\xd2\xab\x17\x9e\x9d\x9f\x47\x10\xb0\x04\xf5\x8a\x80\x0e\x22\xa7\xfc\x04\x3d\x74\x9e\x56\x71\xe1\x7d\x99\x51\xa1\x5e\xec\xc1\x05\xde\x98\xaf\x0e\x7e\x38\x9e\xd1\x0b\x9f\xda\x40\x6b\x4d\xad\x04\xcb\x15\x65\x83\x22\x86\xe6\x53\xc4\xd5\x2e\x55\xca\x5b\x0a\x7f\x19\x85\xfb\x99\x88\x4f\xce\xaa\x12\x8b\xac\x4b\x86\x72\xbd\x09\x30\x57\x56\xf2\x5d\x13\x78\xde\xd7\x41\x37\x87\x56\xb7\x7b\x0e\x1c\x5b\x02\x1a\x8a\x7d\xb9\x30\x45\x8a\xeb\x71\xf3\x03\x19\x96\x59\x02\x05\x38\x28\xb3\xdd\x1a\xdc\x5f\x0d\x57\xba\xd6\xea\xab\x72\x5d\x5f\xef\x6e\x52\xa3\x28\x65\xea\xa7\xd0\x41\x87\x53\x60\x3e\x63\x14\xe8\x41\xb8\x45\xea\x52\x55\xb3\x1f\x86\xfc\x59\x84\x52\xa2\x05\x49\x48\x72\x5a\xe4\x64\xb9\x80\x0c\x71\x1a\x01\x96\x11\x15\x34\x63\x7b\x47\x7a\x26\x84\x2d\xe1\xc7\xb4\xbf\xb6\x86\x9e\x46\xbc\x4e\xa7\xf9\x7e\xf1\xa1\x08\xb2\x62\xcd\xd6\x34\xe6\x34\x9e\xa8\xc4\x89\xfb\x80\x59\xb0\x70\xb3\x16\x23\x50\x18\x8d\x27\x8e\x13\x1f\xf9\xca\x6e\x4a\x0b\xae\xcf\x62\x85\xad\xa7\x76\xa0\x5f\xd0\xc3\xcc\xa1\x7b\x44\x9e\x3c\x2d\x9e\xc1\x5a\xe9\xfb\x18\x07\x64\x4c\x69\xd1\xb1\x1e\xfd\x08\x4b\x46\xe7\x94\x33\x18\x88\x17\x34\xf0\x4c\x94\xf5\x51\xa0\x0d\xcc\x26\xe1\xa2\x5b\x26\x4a\xb3\x23\x70\x85\xd1\xef\xf7\xc9\x2f\x4b\xee\x09\x98\xb5\xc9\x78\xaf\x73\x5e\x2e\x79\x19\x59\xf1\x2a\xf2\xda\x7e\x02\x6b\xad\x74\x35\x0c\xff\x19\x93\x67\x7a\x0f\xa6\xdc\x90\xb3\xee\x9d\x26\x7f\xbc\x63\x9a\x7d\x1a\xfd\xab\x77\xc4\xfa\xf5\x48\x77\x91\xc6\x31\x27\x1f\x3f\xd9\x0a\xda\xd4\x60\x36\x5d\x2a\x95\x08\xa8\x6d\x93\x37\xca\x0c\xd7\x20\x96\xb4\x84\x5c\xc4\x8c\xa6\xce\x9c\x4a\x23\x0b\x46\x7a\x72\xac\xbe\x49\xf0\x3d\x9b\xf2\xd1\x44\xda\xf8\x24\xdf\x94\x3a\x6e\x46\x19\xda\x4c\x19\x86\xa6\x95\xd7\xcf\xac\x04\x5d\xc9\x50\x16\x72\x49\xe7\x56\xe8\xb9\x1d\x91\x96\xea\x03\xa0\x4f\xb6\x37\x6a\xc6\x78\xde\xa5\x71\xcc\xf8\x8c\xee\x09\xa7\xc1\x21\x2f\xc2\xce\x69\x74\x4e\x93\x02\x8e\x9c\x7d\x46\x71\x30\x34\xbd\x97\x2c\x84\xa1\xfd\x09\xc7\x14\x90\xe3\x61\x78\xda\x93\x57\x54\x46\x72\x4f\x13\xa3\xc8\xc1\x7e\x8c\xb8\x82\x18\xe8\x97\x6d\xd6\x32\x6c\xa1\x43\xe2\x96\x4c\xd6\x23\x4e\x7c\x0f\xb9\xdc\x3c\xb7\x03\x3d\x71\x9a\x3a\xc8\x28\x8c\x09\xec\xb5\x0f\x3c\x2f\x1d\x81\xd9\x71\x0d\x36\xba\x70\x35\xf0\x81\x34\x7c\xab\xa8\xca\x4a\x75\x5d\xa5\xca\x1e\xbf\x52\xcd\xec\x0c\xb2\x25\x20\xa5\x2e\xe3\x4b\xad\x31\xb5\xb0\xa9\xc5\x60\x4b\xf4\x45\xd0\x0e\x1a\xcc\x04\x04\x29\x67\xde\x7d\x32\xa6\x56\x88\xb0\xac\x51\x19\x62\xcb\x3d\x28\xcb\xd7\x6c\xcf\xc9\xc2\xd7\x4e\xea\x77\x69\xbf\xfb\x09\x3d\x17\xb7\x4e\x18\x07\xd8\x59\x18\x67\x92\x51\x68\xf8\xc6\xf3\x33\xc7\x9a\x65\xdf\x19\x8f\x3c\x62\xee\x78\x54\xcb\x07\x89\xe0\xc8\xe2\x5c\x58\x41\xbd\x96\x47\x52\x97\xbd\x54\x94\xf5\x77\xa3\x5a\xef\x6c\x2c\x6d\x46\x04\xa1\xeb\x08\x10\xfb\x6a\xc8\x28\x5c\x32\xb0\x33\xc7\x82\x26\x21\x18\xb8\xa9\x49\x0e\x72\x50\xb4\x24\x39\xa3\x50\xe5\x0c\x46\x57\x94\x4e\x00\x98\x15\x62\x52\x4f\x97\x2b\x57\x54\xeb\xcb\x24\xc8\xf3\x68\x9a\xd0\xb0\xef\xf6\xd1\xa6\x28\x1f\x4f\xf6\xcd\x8e\x92\xb1\xc6\xa3\x9a\x09\xf2\x36\x83\x4d\xc6\xd0\x48\xb4\x3d\x31\x89\xb1\x74\x18\xc4\x19\x0d\xc2\x4b\xfd\x60\x5d\x0b\x8a\xf9\xed\x29\xcd\x14\x64\xa5\xf4\x5a\x37\xae\x68\xd2\xb1\x5a\x53\x4e\xe0\x36\x5d\x97\x5c\x7a\x65\x72\x2e\xee\xf3\x0b\xc9\xa4\xe8\x22\x15\x63\x8b\xe6\x73\x1a\x46\x41\x41\xe3\x4b\xbb\x59\x41\xee\xe3\xa6\xb4\x6d\x4a\x27\x50\x7d\xa7\xc4\xd5\x84\xcf\x6d\x15\xd6\x64\x73\x96\xcf\xb6\x1f\x3e\x18\x74\x97\x7b\xee\x84\xe9\xb0\x37\x73\x93\xb7\x71\xc3\x3e\xd4\x0f\xa9\x8e\x31\x98\x23\x1e\x8d\x35\x4f\xe2\xba\xd4\x1d\x08\xc2\x35\xba\x13\xbe\x6e\x3a\x10\xbc\xef\xd6\x8f\xc7\x91\x1c\xd2\x85\x14\x1c\xcc\x81\xd4\xf0\x77\x78\x5a\x3e\x4f\xcf\xa4\x4a\x93\x04\xf9\x65\x32\x56\x87\x1f\x9f\x60\xe4\xe3\xdb\xcb\x04\xde\x4e\x1b\x08\x40\x32\x86\x85\x2d\x87\x77\x61\x43\xf8\x55\x6a\x36\x04\x7f\x07\xa3\x53\x2b\x66\xbb\xcf\x7b\x82\x23\x53\x78\x4d\x4e\x54\x49\x5b\x28\xb7\x76\xd4\x12\x3b\xca\xc1\x80\x1c\x4e\x34\x67\x8c\x72\xf5\xae\xef\x92\x0a\xff\x2b\x24\x2a\x88\x76\xd3\xa5\xcb\x9d\xcf\x28\x18\x63\x88\xd1\x77\x09\x67\xaa\x39\x89\x0a\x93\xad\x7a\x37\x6a\x87\xd8\xd5\x32\xf3\xed\x1e\x3e\xf4\x8b\x1a\xed\x09\xc5\xfb\x31\x84\x48\xf1\xf0\xb7\xaf\xe8\xa0\xc7\x92\xc7\x33\x6a\x5b\xef\xc5\xe9\xb4\xac\x5d\x62\x31\xa6\x8a\xb3\x05\xd4\x32\x64\x7b\x42\x89\x3f\x3e\x7f\xc4\x12\x13\xc4\x39\x00\xd8\x03\x6b\x4e\x47\x8e\x9f\x29\x21\x88\x1f\xbe\xe0\x09\x43\x41\x63\x9d\x6e\x9f\xef\xc8\xe3\x40\x7a\x2c\x04\xbf\x2a\x34\x24\x6c\x75\xcf\xb2\x34\x49\x97\xb9\x72\x5f\x28\x0c\x03\xd8\x6e\x6f\xbb\x22\xe2\xd5\x08\x61\xb7\xed\x35\xaf\x05\xa7\x12\xa9\xb6\xd2\x6b\x42\x40\xae\x0d\x1d\xab\xa1\x7e\x0e\x6f\x31\x6f\xd7\x35\xfc\xd8\xb9\x22\xe5\xb8\x75\x82\xbf\x55\x5c\x90\x5e\x9f\xf6\x76\x36\x9b\x5c\x81\xb6\x97\x39\xd7\x8b\x8f\x8b\xf6\xda\xfd\x85\xe8\xfd\x85\xe8\x9f\xf8\x42\x54\x3f\x15\x45\x2a\xeb\x9b\xbc\x17\x15\xc0\x2b\xdc\x64\xfa\x82\xbf\x35\x7e\x62\x9a\x4c\xa2\xa9\x17\x8e\x67\x49\xc0\xc3\x51\x60\x05\x75\x89\x46\x41\xe2\x09\xd4\x02\xda\x64\x1e\x69\x8a\xdb\x48\xf3\xcb\xcc\x51\x34\x15\x1e\x0c\x2c\x2b\x46\x0e\xf4\x3c\x9a\x5a\x4a\x7d\x6c\xcd\xc8\x35\xce\x57\x1c\xe2\x4a\xc1\x5e\x9b\x5e\xab\x74\x3a\xb6\xc4\x05\x3d\x63\x49\x1b\x86\x54\xc4\x7b\xe7\x7d\x86\x56\xa4\xaa\xac\x04\xdb\x55\x4a\xa0\x28\x7f\x97\x51\x71\x0d\x8a\x6e\x27\x8c\xba\x47\x3a\xdd\x6a\x60\x84\x4b\xb0\x83\x84\x70\x7f\x4f\xae\xae\xdc\x3c\x71\x36\xf5\x67\xd2\x20\x8b\x23\x56\x14\x75\x2d\x59\x2c\x8b\x17\x74\x12\x2c\x63\xef\xc5\x49\x5d\x1f\xd9\x8e\x6c\xb7\xa3\xae\x7c\xbd\xe1\x5b\x18\xc9\xf4\x43\xd4\xa2\xc7\xf7\x54\xf9\x3d\x0e\xee\x82\x35\x8a\xdf\xa2\xfb\xf6\x8b\x2e\x2e\xa0\xb0\x5a\x4a\xe6\xd8\x68\xd4\x53\x21\xca\xf6\xe0\x41\xd2\xd6\x2b\x7a\xe1\x19\xb9\x58\x55\x7c\xb0\x39\x32\x8a\x4c\x27\x24\x30\x7c\x03\x82\xe3\x49\x65\x47\xa0\xec\x02\xd8\xba\x7b\xf5\xf2\x9f\xd6\x72\x83\x3a\x98\x5c\xec\x5d\x68\x52\x97\x6f\xf8\xd8\x75\x0c\xdf\xe5\x15\xb9\xd4\xf6\xbb\x75\x7a\x23\x7f\x7f\x31\x2e\x8f\xe1\xfa\x0f\x5d\xc1\xc2\xe7\xd5\x95\x45\x43\xfb\x63\x88\xbb\x80\x1c\x9f\x61\x78\x8f\xc7\x2d\x59\x2d\xf4\x49\xb8\xa1\xf2\x5f\x3d\x9a\x72\x10\xae\xba\x48\x45\xc0\xe8\xa8\x20\xf3\x68\x3a\xe3\x82\xa3\xf2\x5e\x2c\x94\x54\x4e\xcb\x45\x5a\xdb\x6e\x91\x9a\xad\x9e\xb4\xe7\xc1\xc5\x8f\x94\xbe\xa3\xd9\x4f\x41\xde\xee\x11\xf6\xfd\x2e\x8b\xd2\x2c\x2a\x2e\x8d\xf4\x69\x90\xbf\xcb\xa2\x31\x15\xbf\xd9\x7f\x30\xcd\xec\x47\x92\x26\x63\xea\x7b\xc5\xf8\x99\x5e\x56\xbc\x63\xfc\x4c\x2f\x9b\xbe\x64\x84\x9a\x1c\x5c\xf3\x1a\xf6\x90\xdd\xc5\x0b\x3a\x8e\xe6\x41\xdc\xc1\x00\xee\x4b\x32\xf3\xb2\xf5\x6b\x13\x3b\xf2\xb9\x79\xd7\x34\xef\xab\xfa\xee\x49\xff\xa6\xd4\x7d\x4f\xd7\x7f\x44\xba\x16\x42\x91\x43\xd8\x70\xff\x2a\x83\x09\x09\xaa\xf6\x8a\x4a\x8d\xe9\xf9\xc2\x14\x8f\x44\xfa\x9a\x21\x13\xd5\x52\x70\x71\xd1\xfd\xa2\x34\x83\x17\x7d\xbc\x9d\xae\xcb\xd3\xb9\xd6\x88\x99\x00\xca\x43\x46\x2a\xf1\x67\x02\xa8\x37\x20\x2c\x1d\xe1\x02\x5e\x9b\xf9\xab\x77\xa0\xbc\x6d\xd8\x50\x52\xf9\x77\xd1\x07\x92\xf2\x17\x82\x2c\x0d\x39\x0d\x72\x3f\xdc\x34\xc8\x0d\x28\x20\x5f\x04\xaa\x45\x45\x94\x6f\x0c\x15\xaf\x0d\x93\x50\x35\x55\xdb\x60\x25\xf5\x63\x18\xc3\xcf\xa7\x6a\xc9\x59\x75\xd5\x2d\xba\xe0\xe5\x2d\x3b\xb0\x46\x0f\x8a\x8b\xbe\x34\xfc\xf3\x56\x80\x9f\x19\x4b\x2d\xc4\xc5\xca\xcb\x46\x86\xe8\xb9\xc9\xf2\x11\xd1\x82\x2a\x57\x91\x0a\x5e\xb5\xca\x52\xb2\x2b\xb6\x5c\xd6\xe0\xa8\x43\x3a\xca\x50\xcd\xda\xf2\x41\xb9\xf4\xe9\x81\xd2\xa4\x27\x33\x1b\x2c\xb5\x52\xd0\xf2\x26\x4b\x16\x9d\x0a\x86\xb3\x9c\x2f\xe3\xa0\x88\xce\xe8\x4f\x41\x7e\x9c\xc3\x9b\xbe\xb2\xaa\x1c\x58\xab\xae\x69\x6d\x0d\x53\xa3\x1c\x1a\x3b\x9d\x4c\xe8\x58\xd4\xcc\x57\x6e\xc9\x72\x28\x2f\xe0\xa3\xe7\x52\x68\x7b\x51\x9a\xd6\x22\xb2\x58\x9c\x4e\x6d\xe3\x4b\x9d\x81\x22\x0a\x39\xda\x41\x50\xf1\x79\x75\x83\x9e\x07\xca\x0c\xb6\x4e\x11\x28\x5a\x6a\xb4\x0e\x81\xc8\x9a\xaf\x3c\x38\x38\x55\x2e\x36\xa8\xb0\xc1\x52\x33\x6b\xc2\x26\x46\x50\x83\xf6\xd9\x4d\x94\x73\x26\xf0\x52\x28\xf4\x0f\xf8\x68\xd8\x1f\x05\x39\xad\xe5\x8d\x3e\x50\x1f\x19\x78\xe0\x0c\x02\xe0\xf9\xd3\x20\x7f\x1d\xcd\xa3\xc2\x43\xbf\x26\x80\x28\xab\x12\x4b\x88\xde\xc8\x37\xca\xe4\xd1\xaf\xbe\xdd\x4e\x67\x1a\xd0\x45\x34\xa7\x79\x11\xcc\x17\xa5\x45\x14\x84\x5e\x58\x3c\x23\x29\x63\x5b\x46\x76\x59\xb5\x4a\xa7\x82\x3a\x13\x46\x93\x49\x34\x5e\xc6\xf0\xae\xa7\x0c\xd3\x1a\xc8\x1c\x48\x5a\x04\xf1\x8b\x26\x15\x58\x90\x58\x6a\x36\x57\xaa\x00\xd7\x3c\xce\x5c\xaf\x6e\xb6\x2b\x6b\x46\x05\x9d\x77\xed\x17\x7d\x8e\x59\x25\x40\xb9\x17\xd8\xc6\xaa\xf6\x49\x6d\xbc\x60\xdd\xf2\x1e\x71\x9d\x4c\x83\xc5\x1d\xa7\x53\xef\x2a\xc6\x1c\xc5\xb7\x86\xe3\x74\xaa\xd5\x6f\xee\x42\x86\x7a\x8d\xc5\x8c\x2b\xc4\x4b\x19\x5d\x7b\x44\x13\xf6\x65\x6c\x6a\x6a\x9c\x56\x86\x87\xc6\xec\xa2\xbb\xb8\x4e\x67\xd7\x32\x2a\x6e\xb0\xfd\x79\x2b\x31\x9a\x88\xd3\xa9\xa7\x6a\x99\x5a\x52\xa5\x2a\x64\x9e\x2e\xe0\x2a\xa7\xfe\xc4\x7c\x3e\x8b\x72\xc6\x8d\x17\x69\x5e\xdc\xe0\xc8\xfc\x2e\xcd\xab\xa5\x22\x37\x06\x53\x25\xd7\x76\x2b\xc5\x13\xcd\x3a\xa9\xcc\x42\x07\x03\xe8\x74\x7f\x11\x5c\xc2\xbb\x8a\x3d\x43\x4d\x86\xb3\x04\x92\x21\xa9\x28\x62\xef\x79\x4d\x66\x62\xd8\xf3\x34\xfb\xfc\x31\x7d\x97\xa5\x67\xb4\xbc\x0c\x02\xc2\x65\x17\x42\xe2\x2d\x2f\x28\x21\x50\x68\x81\x09\x8e\x3c\x65\x58\x52\x73\xd6\xc2\x3b\xc9\xdd\xac\x60\x9e\x82\xd2\xc9\x9e\xf1\xf5\x8c\x9c\xa0\xcf\x53\x32\x54\x66\x0c\xd7\xba\x55\xae\x83\xe7\xea\xf8\x38\x4e\xcf\xe1\x59\x89\xd4\x6b\x54\x55\x5f\xfd\x0c\x82\xc7\x4e\x64\xc4\x44\xd2\x24\xbe\xe4\x01\x21\x0a\xe3\x75\x86\x7c\x21\xc1\x5f\x42\xf8\x1e\xf6\xc8\x67\x12\x64\x68\x3f\xda\xc1\x0f\x24\xec\xa3\x35\xeb\x63\x23\xde\xa5\xee\x83\x80\xfe\x85\x95\xaa\x97\x9b\xd5\x51\xba\x9f\xac\xcd\x7b\x85\x6a\xc2\x16\x74\x0d\xf8\xa5\x17\x8b\x28\xbb\xf4\xac\x78\x94\x8b\xc9\x2d\xe7\xee\x63\xbc\xd0\x2c\xaf\x6c\x09\x58\xa0\x9e\x05\x00\x94\xed\x93\x25\x2c\x88\xee\xae\x6f\x55\xbe\x0f\xce\x25\xc9\x88\x14\x2f\x18\xaa\x7e\x3f\x1f\x47\x91\xbd\x7c\x65\x19\xbc\xdb\xfe\x3d\x17\x88\x53\x70\x48\x9a\xd3\xeb\x50\x35\x00\xfe\x94\x21\x0a\x9a\x8f\x39\x0c\x06\xab\xac\x08\x58\x9b\x78\x35\x96\x2e\x46\xbd\xdc\x6e\xb1\x92\xac\x6b\x00\x8e\xa2\x66\xf4\xaf\x98\xaa\xad\x91\xf3\x85\x4b\xc1\x66\x3e\x11\xbf\xc6\x4b\xe8\x39\xdc\xe8\x75\xcc\x60\xda\x70\xd5\x31\x0a\x92\x7e\x94\xff\x23\x88\xa3\xb0\x03\xb1\x2e\x44\xca\x8b\x28\xa3\xe3\xa2\xe3\xbb\xe7\x10\x2e\xc5\x00\x50\xd4\xd8\xe9\x3a\x97\x28\x58\xd0\xd1\x31\x88\x64\x0f\x3c\xd5\x1a\x5e\xeb\x3c\x15\x35\xa8\x42\xf4\xcc\xac\x89\xab\x27\x6c\x1b\x12\xe1\x57\x5c\xc2\xb6\x65\x50\x70\xbd\xd0\x3f\x5c\x26\xe3\x28\xf1\x4b\x2b\xc2\x91\x37\xba\x79\x5a\x37\x93\x88\xeb\xd7\xc8\x10\xcf\xc0\x0b\x12\x18\x21\x46\xc9\x14\x0e\x38\xde\xe3\xad\x0b\x66\xfa\x92\x12\x6e\x9d\x6a\x2a\xc0\x50\x66\xf9\x59\x34\x9d\xd1\xbc\xae\x3c\x86\x32\xcb\xe7\x97\xc9\x98\x86\x42\x77\xed\xd3\xa0\x79\xe1\x2a\xea\x78\x2e\x82\xa4\x34\xa9\x07\x60\x7d\x75\xb1\x8c\x71\x1a\xd6\x57\xa4\x00\xab\x6a\x69\xd4\x25\x03\xd8\x57\xdb\x87\x22\xcd\xf8\xd3\x8e\xca\x7a\x04\x58\x45\x0d\x8d\xba\x83\x61\xad\x19\xa7\x41\x4c\xc3\x8f\x59\x44\x93\x6a\x0c\x59\x80\x55\xb5\xd4\x74\xc9\x03\xec\xab\xad\xc1\xac\x59\x80\x55\xb5\x34\xea\x53\xc5\xac\x31\x80\x28\x99\x36\x44\x14\x86\xf4\xd6\x23\x5b\xaa\xad\x46\x02\x56\xf8\x53\x2c\x89\x7f\x51\x77\xfd\x3f\xac\x85\xe8\xe9\x7a\x2a\x2a\xa8\x29\x69\xdc\x0c\x0f\x3d\x69\x08\xd6\x77\xb2\x1d\x96\x67\xa1\x92\xf6\x4e\x38\xf4\x27\xa3\x12\x86\xdc\x36\xf4\xa4\x71\xd8\x32\xe3\x91\x61\x69\x0e\x2e\xe7\x1f\x50\x79\x5e\x49\x59\x5b\xa3\xec\xa9\xc2\x06\x31\x7a\x6f\x28\xcc\x86\xde\x54\x0c\x8f\xcf\xe4\x43\x4f\x1a\x86\xb5\xd0\xe8\x49\xc4\xd0\xf6\x1e\x3a\x2c\x49\xe7\x7b\xaf\x61\x71\xc8\xaf\x47\x5b\xc3\xad\xa7\x65\x7e\xb4\x98\x00\xd2\x1a\xee\xec\x5c\x9f\xf6\x76\xb6\xee\x7d\xb0\xdc\x9b\x1c\xfe\xd7\x98\x1c\x0a\x4a\xbf\x8b\x60\x4a\xab\x45\x9e\x68\x68\x67\xc8\x63\x3d\x99\x06\x84\x3c\xed\x2b\x84\xb0\x68\x1e\x74\x22\x88\xe3\x81\x15\x97\x15\x5e\x93\xdb\x41\x9d\xdc\x50\x14\xf2\x49\x84\x1b\xbf\xae\x22\x04\x85\x2f\x80\xdd\x27\xbe\xb9\x89\x10\x09\x38\x74\xf3\xea\xe1\x0b\x74\xa5\x62\x77\xc0\xb5\xf2\xa4\xdb\x55\x0b\x51\x1f\x03\x50\x4a\x40\x9d\xf2\x1b\xc3\xc8\xd0\xce\x02\x44\x7c\x62\x88\x3b\x09\x9f\xc1\xf6\x07\x7b\x32\x0c\x47\xab\x60\xd5\xa2\x1f\x20\xe2\x73\x71\x36\xcd\xf1\x49\xed\x06\x01\xcb\xe5\x09\x55\x07\x77\x04\x2f\x26\xc0\xeb\xf9\x93\xb9\x6c\x9a\xf3\x18\x19\xeb\xe2\x9c\xd7\xac\xc3\x58\x18\xab\xec\x34\xee\xde\x0f\x0e\x29\xc9\x1c\x1c\xaa\x52\xbc\xcd\x75\x07\xe7\x1f\x9b\xed\x79\xa3\x42\x4c\xec\x68\x3c\x34\x44\x44\x55\x40\x4a\x1c\x43\xdb\x17\x75\x2d\xca\xc9\x38\xcd\x32\xd7\x21\x2a\x9c\xdf\x83\x82\xee\x67\xd3\xdc\x17\xa3\x52\x47\xc9\x7f\x48\xfe\x06\xe7\xff\x9c\x7c\x81\xd3\xff\x35\x6b\x2f\x2a\xc4\x8b\x24\xc3\x67\xaa\x67\xaa\x70\x3b\xa5\x73\xa4\x55\x34\x1c\x0a\x50\xe4\x58\x32\x05\x1a\xf1\x83\x81\x7c\x7a\x06\xea\x4c\xc3\x19\x11\x6c\x9e\xe0\x02\x53\xc7\x9e\x63\x5b\x6d\x00\x0f\x57\xb3\xe0\x52\x3e\xc3\x14\x73\xb7\xde\x71\x42\x97\x06\x5d\xe5\x50\xbf\x4b\x1e\x3c\x70\x2e\x98\xac\xfb\x2e\x01\xce\x9d\x63\x57\xc2\xeb\x2b\x35\xa3\x8c\x55\xc0\x7a\x03\x0e\x9a\x26\x89\x1d\x49\x88\xeb\x7b\x7b\x65\x84\x6c\xbe\xbb\xcb\xd2\x73\x19\x3d\xb0\x22\xc6\x5f\xc7\x71\x87\x51\xe5\x40\x5a\xaa\x14\xc1\xc2\x0e\x93\x8a\x11\x02\x25\x7d\xc7\xc1\x3c\xe4\xe5\x6c\x1a\xda\x71\x7c\x89\x33\xe9\x20\x56\xad\x6a\x83\xc3\x4a\xca\x53\xed\x57\x92\x9d\x11\x34\x77\x75\x86\xb1\x2a\xbf\x30\x63\xdd\x96\x04\xd3\xbd\xd6\xdc\x1c\x2f\x9f\x8e\x27\xb2\x6d\x91\xfa\xe3\x56\x18\x91\x6f\xf7\x48\x49\x4c\x0a\x5f\x68\x03\xf1\xea\x0a\x0d\xd7\x08\xa9\x5b\x61\xb9\x57\x12\x73\x49\xa2\xfe\x66\xb1\x65\xbc\xc5\x2b\xe7\xfd\x46\x11\x66\x84\x73\xfc\xcd\x1e\x79\x2a\x75\x99\x15\x4d\x2c\x93\x45\x30\xfe\x7c\xc4\x6f\x51\x0c\x1b\x56\x48\x32\x34\x9b\x66\x92\xee\x82\xa1\xd2\x4c\x65\x55\xfc\x87\x22\xbd\x3d\xb2\x4d\x9e\xc9\x44\xe9\xbf\x9f\xc8\x73\xa0\x76\x68\xa1\xbc\xee\x97\xb9\xef\xc7\x42\x4e\x4f\x14\x37\x67\x54\x28\x52\xb0\xf3\x71\x15\xb9\xf1\x64\xf3\x94\x0c\x7d\x2e\xe6\x0f\x20\x72\x79\x80\x82\xc5\x4b\x64\xd9\xe1\xe8\x83\x38\xc6\x8b\xbb\xdf\xef\xcb\xf5\x7d\x60\x97\xb5\x36\x1f\xc7\xb9\xd3\x21\xdf\xee\x20\x28\xb5\x04\x65\xbb\x51\xa0\x6a\xe8\x71\x3f\x3e\x76\xc5\xdc\x93\x21\xbc\x9c\x95\x87\xae\xc0\x78\xdb\x18\x24\xa1\xe9\x01\x48\x82\xf1\xb0\xed\xfc\x64\xc4\xea\xe0\x21\x2f\x19\xb8\x40\x9b\x97\x76\xc5\xac\x42\xd4\xe9\x3a\xaa\x85\x5e\x95\x45\xf6\x5e\x25\x6c\xb7\x7f\xdf\x94\x32\x98\x65\xeb\xab\xf6\x18\x38\xc8\x68\xf9\x4f\x38\xfc\x36\xc4\x42\xcc\x7e\xc0\x69\xb9\x29\x7d\xe1\x22\x58\xfc\xb1\x8b\xe9\xeb\x28\xee\x68\x5c\x72\x69\x09\xa7\x8d\x4a\xd6\x7d\x0f\xc9\xf5\x0d\x83\x62\x7c\xb4\x98\x71\x24\x88\xaa\x7b\x46\xd7\xdc\x67\xa3\x50\x0a\x2f\xe1\x8e\xb1\x1e\x90\xab\x7c\xe7\xa1\x77\x93\x06\x7b\xae\x7b\x26\x97\x07\x20\xe7\x4c\xf2\x3d\x90\xe1\xc2\xa3\xc7\x4d\x83\x76\x4d\x97\xd8\xbc\xd3\x34\x74\xdc\xff\x17\xd9\xa5\xf5\xea\x15\x81\xc2\x43\xd7\xf2\xf1\x12\xe3\x65\xee\x18\x5c\x23\x74\x1c\x07\x4b\x9c\xe2\xf7\x08\xc5\x85\x50\x29\xb3\xf3\xb2\x75\x24\xc9\x54\x6e\x14\x4d\xce\x95\xf6\xb6\x61\x16\xa9\xdd\x15\xac\x16\xfe\x54\x4b\xad\x76\xcd\x48\x92\x12\x80\xc2\x98\xf8\x07\xb2\x09\x87\x1a\xe3\xac\xe9\x4a\x87\x38\xde\x6d\x90\x70\x2f\x03\x49\x28\x3c\x91\x42\xc0\xe2\xe4\x91\x3c\xa8\x3a\x91\x9b\x6b\x96\xab\x11\xad\x90\xad\x1b\x6b\x1e\x3a\xe6\x1d\xb4\xa8\xae\x16\xbc\x79\x98\x08\x9a\x17\xd1\x3c\x28\xe8\x4f\x01\x28\x10\xeb\xa8\x0a\x81\xd7\x51\x14\xae\xf9\x2e\xa8\xe9\xeb\x53\x47\xb3\x19\x42\xe3\xaa\x9b\x1d\x0f\x68\xd9\xcc\xbc\x97\xcd\x50\x19\xf8\x0e\x02\xf8\x48\x5d\xa0\x90\x0f\xf0\x54\x4c\x69\xf1\xc2\x0e\x4c\x25\x77\x56\xbb\x9a\xba\xb9\x12\x75\xdd\xf1\x3c\x35\x42\xbc\xb4\x47\x10\x2b\x93\xc7\xf4\x69\x2e\x35\xdf\x22\x7c\x26\x2e\x2a\xf1\x8c\xc8\xbe\x12\x61\xbf\x6d\x2c\x4d\x55\xff\x8d\xc2\x69\xaa\x42\xab\x0e\xf2\x6b\xc6\xd6\xd4\x3a\x1a\x36\xc0\x6c\x31\x96\x4e\xdb\x72\x7e\x6a\xae\x63\x44\x02\xba\xdc\xa6\xaa\x62\x5c\xa2\xec\x1f\x9b\x2b\x11\x23\xc6\x90\x04\xc3\x62\x8a\x11\x2a\x07\xcf\x89\xeb\xe5\xd0\xd2\xb8\x3e\x03\xd7\xc9\x9f\x58\x8f\xdb\x64\xc8\x3f\xac\x9d\xa4\xdd\x73\x84\x97\xa1\xf6\x2e\xa8\xf2\x94\x5f\x44\x31\x9c\x53\x9d\xc5\x3b\x2e\xbd\xee\x72\x06\x59\x4b\x0c\x32\x28\x50\xd9\xf6\xa3\xc2\x6d\x55\x6f\x3d\x9e\xc8\x58\x78\x82\x0b\x43\xd0\x59\x37\xb1\xa3\x6d\xc9\x60\x9b\x2f\xb0\x0c\x25\x3d\xcb\xe8\xb4\xb2\xad\xc2\x42\x67\x3f\x58\x2c\xe2\x4b\xe1\xe7\xaa\x11\x61\x75\x6d\x23\x4c\xbe\x05\x58\xcd\xb0\xc4\x1b\xd5\x5d\x33\x0f\x22\x7a\x94\x66\x3c\x3a\x80\xd4\xad\x23\x47\x79\x26\xec\x6b\x05\x8f\x92\xe9\x7a\xc5\x63\x47\x5f\xa5\xe0\xe2\xb0\xa9\x31\x5c\x06\xe8\x4a\xcd\xde\xc9\x2f\x2b\x6e\x8a\x48\x7c\x24\x3a\xa9\xb4\x98\xde\xad\xa5\x83\x2a\xf6\xf9\xa7\x8c\x9c\x25\xcb\x02\x81\x47\xd9\x78\x19\x07\xd9\xfa\xfa\xfa\x7a\x75\xbc\x2c\x49\x41\xbb\x77\x12\x31\x8b\x6b\x7f\x5b\xc3\xed\x27\x7e\xf7\x43\xdb\xf7\xb7\xff\xf7\xb7\xff\x7f\xed\xdb\x7f\x71\xf5\xcf\x60\x65\x44\x33\x7f\x1c\x96\xdf\x2d\xc2\x8a\xcf\xb2\xa0\xda\x10\x60\x6d\x30\x80\x88\x6d\x41\xc6\x48\x99\xed\x60\xcb\xdc\x1c\x22\x23\xb8\x30\x9a\x4c\x68\x46\x93\x82\xd0\xe4\x2c\x87\x42\xa3\x2c\x3d\xcf\x69\xb6\x86\xdc\xd1\x9e\x47\x49\x98\x9e\x83\xc6\x02\xc5\x29\x21\x0f\x1e\x88\x9c\xfe\x3f\xdf\xbc\x7e\x55\x14\x0b\xe1\xe9\x98\x73\x4d\x33\x8d\xec\xf9\x61\x81\xf5\x89\x30\x1b\xd1\x34\x49\x19\x23\x88\xa3\x84\xb2\x9e\x24\x69\x48\xd7\x90\x6f\x3b\xa7\x46\x35\xf0\x8b\x79\xcc\x46\x26\x36\xb6\x76\xb7\x69\x23\xd7\x1c\x93\xff\x7c\xf5\x7e\xdb\xa8\x6e\x96\x6d\xb7\xbb\xa5\xa5\xa4\xe4\xc0\x5a\x78\x27\x91\xe9\x9a\x44\x80\xfc\xc4\x44\x7b\x70\xee\xca\x5d\xc1\xb3\x5e\x2a\x03\x08\xa3\x3c\xde\xf2\x67\x69\x5e\xf4\x48\x11\xcd\x69\xba\x2c\x7a\xac\xc2\xac\x07\x4a\xe6\xf3\x34\x13\x4f\x29\x61\x33\x61\x70\x64\x8f\xc0\x7f\x57\x57\xa4\x2d\x88\x3d\x4e\xc7\x41\xcc\x12\x87\x4f\xbf\x79\xfc\x0d\x84\x45\xe6\x7b\x0f\xaf\x90\xed\x84\xe2\xd7\xd5\x15\xd9\x54\xd9\xac\x19\xb2\x07\xad\xa9\x34\xd9\x28\xd9\x53\xed\xd7\x0a\x4f\x8b\x8c\x2e\x20\xce\x20\x3d\xb7\xa6\xcc\x92\x9d\x04\xe0\x7b\x74\x96\x11\x92\xd3\xf3\x34\x8d\x69\x90\x5c\xc3\x1d\x2b\xdb\x9f\xa5\x04\xa3\xb1\x2c\x9c\x8a\xa2\x03\x9f\xd9\x96\xe1\xb9\x0a\x63\x1a\xc9\x5d\x66\x07\xcc\x8b\x40\x56\x3d\x47\x35\xbf\x41\xe1\x84\xc4\x78\x18\xdc\x00\xea\x6c\x42\xb4\x78\x05\x43\x7e\xf5\x7e\x5b\x47\x25\xe6\x92\x16\xc2\x3c\x9a\x08\x06\x63\xb8\x6e\xb4\x2a\x32\xc6\xc3\xab\x04\x79\x58\xd6\x9a\x2e\x68\xd2\x69\xbf\x3b\xfa\xf0\x51\x06\x52\xe5\x84\xc3\x3b\xb7\xbb\x86\xfc\x40\xc2\xdc\x3e\x78\x60\x4e\xaa\x71\xe8\x5b\x82\x41\x4d\xfb\x79\x90\x47\x63\xd2\x26\x1b\xd0\x85\xe7\x4b\xc6\x1e\x50\x15\x1b\xa4\x3d\x54\x57\x85\xaa\x9e\x7e\x91\x8a\x27\x97\xed\x51\x90\xd3\x27\x8f\xdb\xd6\xf8\xb5\x17\xf4\x57\x34\x08\x69\xd6\x69\xef\x03\x5f\x8d\x7e\x0d\xf8\x69\x0b\xda\xe7\x23\xac\x28\xc4\xe4\x63\x9a\x14\x8f\xd8\x41\xbb\xdd\x23\x6d\x26\xf9\x47\x63\xa8\x62\xf0\x4b\x2e\xd5\x8e\xea\xc6\x4a\x4c\x59\x0d\xb9\xf2\x78\x39\x97\xc9\x18\x1d\xaa\x6d\x4d\xb2\xef\xe2\x79\x81\xae\xaf\xfd\x91\xd1\xab\x48\x2f\xb7\x23\x65\x4a\x5d\x9a\x4d\x72\x92\x66\x4c\x5a\x15\xa1\xb6\x81\x1e\xb5\x76\x5f\x63\x2e\x09\x3b\xf0\x9c\x87\xc7\x40\x8a\x26\x97\xaa\x7e\x81\x64\xa9\xc8\xc7\x4e\xce\x7d\xd6\x00\x07\x69\x92\x50\xf1\xe8\x46\x52\x98\xa6\x44\xe3\x72\x51\xb6\x2e\xc3\x8d\x7c\xa4\x17\x85\xd3\x41\x01\x8b\xde\x1a\x89\xb7\x1d\x66\xb7\xaa\xba\xf4\x5e\xd4\xdf\xf1\x35\x88\x57\x49\xf3\xc8\xd7\x40\x03\x41\x0d\x11\xec\x2b\x8e\x53\x41\x09\x22\xeb\x47\x27\xd6\x0c\x29\xb2\x68\x3a\xa5\x19\x0f\x90\xc5\x66\x1f\xc4\x16\xe5\xed\x96\xe1\xa0\x8e\x60\xa0\x07\x3e\xaa\x31\xe3\x5c\x37\xa1\x1f\x30\x5e\xd9\x35\xb8\x49\x02\x9e\xc9\xf3\x22\x28\xe8\x78\x16\x24\x53\xbf\x02\x81\xb1\x14\x8d\xf8\x20\xbc\xfc\xc0\x0a\xc0\x8d\xf0\x63\xc6\x61\x6c\x96\xb7\x6e\xc6\xa9\x6e\x40\x31\x1a\x50\xde\x2a\xa1\x00\x68\xf6\x65\x56\x0d\x45\xc1\x99\xcc\x7b\x6b\xa5\x6e\xac\x56\xa4\x2d\x82\xaf\xb6\xec\x8b\x2d\xa3\x65\x76\x16\xbc\xb6\x50\xac\x37\x02\x17\xb3\x66\x65\x79\x5f\x2f\xbd\x8f\xbc\x54\x07\x6f\x1e\x62\x21\xdf\x2d\x07\xb0\xbb\x50\xc5\x04\xc4\x4a\xc3\xeb\x4a\x5f\x96\xc7\x97\x8c\xde\xf9\xcb\x68\x58\x5c\x8c\xaa\x4b\xd6\x56\x94\x8b\xfa\xa9\xc9\x4c\x95\x10\x20\x15\x9c\xb6\x30\xc0\xce\x0f\x49\xbb\x20\x93\x20\x8a\x69\xd8\x27\x47\xec\x9c\x76\x1e\xb1\xb3\x47\x00\x31\xed\xca\x57\x13\x6a\xd3\x33\x17\x1a\x9f\x4a\x9f\xa1\x62\xa7\x44\xe1\x90\x7c\xa7\xfe\xa4\xbe\x8f\xed\x3e\xd9\x62\x3c\x24\xed\xed\xfe\xa6\x52\x1e\x4a\xfd\x63\x3b\xa1\xc5\xa7\x38\xca\x0b\x9a\x44\xc9\x54\x65\x2b\xed\xe1\xa9\x61\xd0\x25\x15\x5c\x19\x0f\xd0\xe7\x92\xaf\xb4\x2a\x64\x83\xd4\x93\xe0\xa8\x0b\xf0\xd0\xa5\xaa\xc0\x38\xed\x33\x31\xb7\x35\x7c\xca\x7e\x19\xf2\x73\x6b\xb8\xf5\x2d\x3b\xf9\xef\xdc\x9f\xfc\xef\x4f\xfe\x7f\xf1\x93\xbf\x36\xfc\x87\x27\xb7\x77\x64\xf4\xaf\x0c\x39\xf1\xa9\x72\x14\x4d\xb9\x0d\x6e\xff\x17\x7e\x42\xe7\xf7\x20\xe1\x6b\x3a\x31\x37\x04\x15\xa9\x14\x5e\xce\x21\x21\x5d\x6d\x76\x1c\x82\xb3\x8b\xf3\x19\xeb\x7d\xc7\x34\xd0\xfa\x9e\x17\x26\x0f\xc9\x76\xd7\xb7\x5b\xb6\x37\x99\x14\x6f\xbe\x9e\x25\xfe\x17\x71\x82\xb9\xbf\x13\xa7\xba\x20\x21\x87\xcf\xf7\xdf\x8a\x49\x0e\xc9\x77\xdf\x92\x71\x3a\x5f\x2c\x45\x94\xa0\xd1\x25\x99\xa7\x67\x51\x32\x45\xb1\xf0\x1e\x93\xf1\x2c\xc8\x60\x2f\xe0\x37\xb3\x21\x37\xa5\x92\xe6\xea\x12\x3a\xa6\xfc\xd1\x42\x91\xb2\x06\x39\xae\x72\xd2\xd9\x27\x7b\x64\x6b\xb3\x47\x9e\xb3\xff\xb7\x7a\xa4\xdf\xef\xf7\xc8\xff\x91\x3d\xb2\xf3\x4d\x97\x1d\x76\x48\xbe\xa0\xe3\x68\x12\xf1\x85\x74\xf8\xe1\x68\x6b\xe7\xc9\xd6\x13\xdb\xc4\x2c\xca\x53\x48\x17\xe3\x70\x7d\x22\x5f\xf3\x17\xdd\xac\x23\x6c\x80\xe6\xd5\x1a\xbe\x59\x16\x92\x54\x28\xc1\x84\x63\x0e\xb3\x7e\x63\x42\x59\xc5\x78\x1e\xd9\x88\xda\xfb\xed\x3e\x43\xcb\x41\x1a\xd2\xfd\xa2\xb3\x89\xb4\xd6\x6c\x6c\xed\xff\x73\xb2\x39\x03\xe4\xaf\xce\x81\x58\x8b\xf4\x78\xb1\xa0\xd9\x41\x90\x6b\x55\x36\xca\xce\x97\xa3\xbc\xc8\x3a\x8f\xbb\xf2\x3d\xb9\x48\xd8\xec\x3d\xb6\x6e\xcc\x78\xee\x22\x8e\x8a\x4e\xbb\xdd\x35\x9f\xda\x27\x5d\xd3\xba\x4a\x3c\xb4\x4c\x7c\x9d\x97\xf2\x21\xc0\xfc\xb0\x47\xf6\x99\x40\x08\x1f\xdf\xef\x91\xff\xeb\x3a\x11\x2c\x3c\x33\x2b\x26\xd6\x80\x54\x2e\x91\x43\x4a\x1e\x91\x7d\xb2\x41\xb6\x36\x91\x9d\x91\x2f\xaa\x83\x8c\x9c\x6b\xdb\x30\x5d\x77\xfb\xbf\xa4\x51\xc2\x86\x69\x5b\x2a\x8e\x97\xe0\x2f\x1a\xa6\xf8\xcd\xd1\x0b\x46\xd8\x5b\x9b\x92\x29\x09\x0b\x3f\xa0\x7c\x0f\xc5\x7d\xbb\xf9\xe4\xb1\x4d\x70\xf3\x34\xfc\xee\xdb\xad\xcd\x32\x42\x33\xe9\x4b\x7b\xe1\xe6\xd4\x24\x0a\x57\x52\x51\x46\xe7\x41\x94\x70\xdd\x11\xcb\xd3\x77\x8f\xc2\x35\x91\xc9\x1e\x04\xb0\xb6\x5b\xde\xee\x5a\x4e\x97\x80\x59\x49\x30\x65\xf1\xfa\x9d\x61\x22\xa7\x9b\x04\x59\xfb\x30\x29\xb8\x3f\xa7\x1e\xd9\xda\xec\x92\xff\x3f\xc3\xda\x86\x53\x0b\x77\xe9\x24\xcc\xcf\x7d\xbe\x94\x54\x5d\xaa\xa4\xae\xcf\x98\xa7\xfa\x77\x48\xdc\x04\x1d\xd6\x81\x30\xf8\x87\x0b\x75\x48\x10\x6f\x1d\x04\xfb\x94\xf3\xe5\x9f\x9c\x01\xf6\xd5\xee\x9f\x04\x61\x09\xad\x97\x9c\xdb\x55\x27\x46\x72\x5d\x3f\x29\x84\xd0\x5a\xce\xe5\xeb\x1c\x8b\xa8\x18\xcc\xbe\xca\x71\xfa\x1e\xa0\x2c\x29\x46\xb3\x21\x5c\x2b\xb6\x86\xb5\x62\x2c\xa7\x8f\x6a\xac\xf3\x78\x11\xe4\xcf\xa5\xc7\x0b\xf4\x52\x41\xc4\xb2\x25\x5b\x4f\x10\x0b\x1b\x05\x39\xdd\x79\x42\xf6\xa0\x8c\x56\x0f\xed\x3c\x31\x4c\x00\xc2\x90\x72\xcd\x22\xec\x81\x1d\x5e\xa8\x47\xb6\xbe\x31\x25\x61\xd5\xcf\xe7\xa3\x20\xe9\xf0\x62\x26\xf3\xb3\x16\xb3\x70\xaa\x83\x16\xee\x73\x36\xf4\x22\x35\x76\x2f\x36\x7d\x04\x9c\x1d\x64\x97\x72\x45\x73\x65\x12\xd8\xeb\xbe\xe3\x91\x4c\x92\xb4\x10\x42\xd9\xf7\xd1\x0f\xad\x29\x48\x24\xdc\x59\xd3\x44\x23\x35\x9f\x05\x5c\x5a\x83\xfd\xed\x62\x1c\x2f\xf3\xe8\x4c\x05\x5e\x8d\x46\x51\x1c\x15\x4a\xc0\x19\x05\xc9\xe7\xc1\x28\x0b\x92\xf1\x8c\xe4\x34\x3b\x8b\xc6\x72\x03\x0c\xb8\xef\x85\xd6\xf7\x83\xe8\x87\xbe\x4d\x43\x2a\x08\x4a\x2e\x77\xa1\x09\xcd\xd8\x36\x14\xc4\xd3\x34\x8b\x8a\xd9\x9c\x84\x34\x1f\x67\xd1\x88\xb3\x25\x21\xff\xd0\xa4\x7f\x1e\x7d\x8e\x16\x34\x8c\x02\x10\x82\xd8\xd7\xe0\x30\x29\x68\x96\x04\xfc\xe9\xc4\xa7\xe7\x41\xf2\xf9\x93\x70\xfc\xf0\x89\xcf\xeb\xff\xef\x27\x31\xd2\x64\xfa\x89\x0d\xf1\x13\xbc\x25\xfa\x14\x46\xd3\xc8\x79\xca\x21\xa7\xc6\x47\x91\x23\xb9\xa7\xca\x19\x90\x1e\x8f\x8a\xd4\xb3\xcd\x36\xa0\xd5\xe7\xf6\x8a\x1c\x59\x6c\x51\xcc\xe8\x01\xdf\xa7\xda\xff\x7c\xd9\xde\x5d\xf3\xf2\x4c\xc1\x63\x3b\xd6\xce\xdd\xc1\x15\x6c\x90\xf6\x26\x88\x4a\xd0\x0a\x36\x77\x61\xe8\x78\xc1\xb0\x41\xf6\x48\x87\x8b\x53\x9d\xef\x9e\x92\x47\xba\x89\xae\x7c\x36\xf0\x68\xdb\xda\x6f\x95\xcf\x18\xb3\x29\x54\xa7\x68\xb0\x46\x6d\x25\x98\x08\xc2\x15\x10\x36\x0f\x7f\x1f\x25\x79\x11\x15\xcb\x42\xba\xfa\x8e\x42\x9a\x14\x6c\xd3\xb2\xc3\x46\xf0\x5a\x0e\x93\x30\xca\xa8\x69\xc0\x60\xbe\xb1\xc9\x7b\x52\x96\x55\x8f\x6c\xe0\xd5\x54\x0b\xb5\xd4\x82\xa6\x5a\xba\xad\xd6\x2a\xbc\xc8\xec\x89\xd7\xfd\xb7\x79\x04\x36\x39\x43\xfb\xe5\xc7\x57\x6c\x1e\xe4\xeb\x16\x8c\x01\x94\xaa\xfa\xd6\xb5\xf8\x75\x5a\xc5\xaf\xe5\x53\x3a\x8e\x5c\x11\x5b\x3e\xca\xf9\x4b\x39\xcc\xc7\x1d\xb9\x13\xfc\xff\x94\xca\x9b\x6a\x2f\xf2\x28\x3e\xa4\xc2\x83\x3f\xa7\xe3\x2d\x29\xa1\xf3\x10\x79\x17\xaa\x94\x13\x22\xec\xa5\x28\xe2\x64\x85\x85\x3f\xed\xa2\xa8\x56\x57\xae\xb0\x00\x5d\x2f\x7d\x3d\x88\xc7\xac\x63\x8a\x78\x47\xd5\x23\xa9\x47\x6b\x03\x63\xc3\xda\x1a\x77\x94\x16\x25\x0c\xfe\xf3\xcf\x97\x27\x9b\x8f\xbe\x3b\xfd\xb2\x7d\xdd\x79\xf9\xf1\x15\xfb\xbd\xff\xe8\xff\x4e\xbf\x6c\xed\x5c\x5f\xa9\x8f\x9d\xcd\xde\xce\xd6\x75\xf7\x7f\x06\xfd\x02\x94\xa0\x6a\x03\x37\xde\xe5\x95\x31\x06\x04\xce\x9f\xe7\x6d\xad\x88\x30\xf1\x04\x13\x4e\xff\x5e\xb4\xbd\xd0\x4b\xf0\x6e\xf0\xf6\xc2\x5d\x49\x16\xe2\xf4\xa0\xf0\xe3\x9e\x9d\xc7\xe4\xea\xaa\x2c\xef\x9b\x1b\x0e\x7b\x42\xa2\xa4\x64\xe0\x06\xf7\xb9\x9b\xa1\x7b\xd9\x48\xa3\xc1\x6f\x6f\x36\xb2\xda\xe4\x22\x25\x1b\x69\xbe\x9c\x33\xc0\xe3\x5c\x1c\x1f\xe6\x69\xf8\xe8\xbb\x6f\x1f\x6d\x6d\xaa\x6c\x38\xe3\x42\xef\xc6\x69\x4c\x3a\x87\x1f\x8e\x06\x87\x2f\x0f\x08\x3b\x37\x0c\xb7\x37\x37\x77\xba\x36\x4f\x46\xd5\xba\xa7\x50\x94\xeb\x0c\x5c\xe6\x35\x1c\xb6\x38\x13\x6e\xf7\xc8\x76\x33\x5b\x55\xcc\x54\x8d\x2d\x85\xd0\x69\x9f\xfc\xf3\xfd\xcb\x9f\x1c\x37\x98\xaa\x80\x7f\x34\xa5\x35\xba\x93\x8a\x20\xeb\x86\xa7\x09\xa0\x03\x3e\x12\x9d\x21\x7f\xdb\x23\x8f\xbb\x64\x48\xda\xed\x46\xe3\x1e\xc7\x11\x3c\x24\x53\x1d\x04\xe5\x53\x94\xd8\xe3\x63\x58\xf8\x69\xff\x1f\x47\x3f\xfe\xeb\xe8\xfd\xff\xda\xb3\x0a\x75\x94\xcc\xa9\x5d\xbf\x77\x72\x39\xd0\xad\xc7\xbe\xb5\xb5\xfa\xc8\xc5\x6a\xf2\x9f\x4b\xdc\x83\x87\x3b\x34\xa7\x02\x67\x78\x81\xe7\x1c\x82\xef\x9d\xc4\xe0\x7c\xee\xf3\x8c\x43\x87\x3b\xe0\xc7\xe8\x10\x5b\x7a\x94\x91\xe7\x0f\x75\x4a\x31\x4e\xa8\xfc\x8c\x62\x9e\x67\xb6\x9e\x74\x7b\x64\x7b\x53\x39\xe8\x33\xa4\x3c\x89\x5e\x6b\x90\xb2\x70\xb3\x05\x5a\xe2\x95\xea\x10\xb2\xb8\x52\x1f\xeb\x15\x5b\x43\xf3\xf3\xfa\xb4\xb7\xf3\xf8\x5e\x8d\x7f\xaf\xc6\xff\x8b\xab\xf1\x85\x0a\x7f\x31\xae\xb6\xdf\xbb\x85\xc5\x5d\x4b\x07\xd8\x6c\xed\xae\x14\x18\xb0\xc6\x4e\x8f\xeb\x99\x16\x63\xaf\x25\xd8\x22\x28\x66\x3d\x92\x50\xc3\xfa\xfb\x13\x68\x2e\x9c\x87\xa7\xf2\xaa\x1a\x87\x26\x97\x5e\x0b\x84\xbd\x0e\xd8\xf8\xb0\xff\x78\xaa\xce\x1a\xab\x1b\x5e\xe0\x8a\x85\x4c\xe8\x7c\x61\xd0\x43\x5d\x5e\x39\x52\xb5\x8a\xf5\xd3\xa4\xd3\x86\x51\xb5\x71\x28\xdf\xae\x61\x3f\x9d\xa7\x8c\x89\xf1\xb7\x84\x87\xef\x0e\x88\xbe\x57\xe6\x2f\x0c\xdb\x3d\x42\x11\xeb\xfd\xc4\xd9\xa0\xb8\xf0\xee\xd8\xae\x5c\xbd\x3d\x48\x42\xdc\x3e\x6a\xbe\xb4\x32\xb2\xa6\xde\x18\xbc\x3e\xfc\xf0\xf1\xe5\x5b\x58\x41\x07\x47\x6f\xdf\xbe\x3c\xf8\x78\x78\xf4\x96\xbc\x7f\xf9\xe1\xdd\xd1\xdb\x0f\x2f\x3f\x94\xb6\x1a\x06\x45\x80\x9b\x65\xdf\x78\x73\x1a\x3c\x14\x66\x84\xf3\xe0\x62\x9c\xce\x17\x31\xbd\x88\x8a\xcb\x21\x79\x02\x94\x65\xf5\x10\x74\xa1\xca\x0e\x81\x55\xa5\xf7\x9b\xae\x27\xee\x92\xb0\x39\xf8\x62\x86\x61\x87\x83\x5f\x68\xdb\x4e\x88\xee\xf0\xf0\xf4\xc0\x5f\x42\x72\x3e\x8b\xc6\x33\x32\x0f\x8a\xf1\x4c\x88\xaf\x7c\x13\x62\x0c\x2d\x34\xca\x79\x62\x6e\x40\xd3\xfe\x38\xed\x70\x1d\xe5\xf4\x16\x2c\x10\xfc\x51\x94\xa3\x49\xe7\x93\x9f\x90\x4f\xe0\x6d\x5c\x14\x9e\xba\xce\xfc\x55\x61\x36\x56\x01\xb6\xeb\x40\xd9\x11\xef\x4b\x23\x41\x43\x35\xa2\xef\x76\x45\xd7\x0e\x16\x27\x51\x46\x0d\x8f\x00\x36\xba\xca\xc6\xc3\x86\xe2\x69\xbd\x02\x5c\x87\xa5\xc6\xa6\x2d\xfa\x2f\xa4\x31\x2d\x68\x55\x0d\xf6\x60\x6c\xdc\xe0\x57\xd8\x3f\xb3\x5d\x0b\x08\x51\x10\x04\xaf\x0f\x94\x3b\xdc\x56\x2a\xe1\xce\x72\x48\xca\x1d\x85\x47\x45\x7f\x6d\x4d\x0a\x83\x26\x09\xaf\xd9\x6a\x0f\x78\x91\xc9\x84\x3f\xcd\xf3\x90\x78\x64\x16\xc6\x7e\x81\xf1\xaa\xb2\xd9\x60\xcf\x92\xd7\xfe\xc1\xfd\xf2\x6b\xf7\xd2\x72\x89\xbf\x78\xf9\xe8\xe0\xd5\xf1\xdb\xff\x7d\xf9\x5e\xd5\x13\xd2\xf1\x6c\x99\x7c\xa6\xa1\x78\x55\xc2\x5f\x8c\x8a\xbf\x7e\x46\x17\x71\x30\xa6\x9d\xc1\xbf\xaf\x4f\xfe\x9d\xfc\x3b\x3b\x7d\xf6\xef\x2f\x83\x69\xaf\x7d\x7d\xf5\xe8\xd1\xd5\x97\x76\x17\x1c\x63\x7f\xf1\xc2\xff\xfb\x54\x96\x38\x11\x65\x4e\x59\xa1\x13\x59\xea\xf4\xc4\x5f\xce\x2e\x65\x14\x2a\x29\xa3\xdb\x42\x2d\xa9\x86\x50\x19\x71\xcd\xc7\xb2\xdb\x92\x93\x1a\x18\x70\xd7\x2c\x20\x1e\xf1\x97\xc1\x00\xee\x40\xa9\x70\x87\x01\x9e\x36\xa0\x82\x35\x87\xf4\x59\xde\x01\xcb\x32\x57\xae\xf0\x3b\x63\xc1\x90\x0d\xc2\xdf\xbf\x1a\xa2\xba\xba\xb3\xb6\x38\x99\xeb\xd4\xc0\x67\x0b\x06\x7d\x47\xa5\x84\x35\x0d\x37\xa6\x59\x73\x17\x9f\xee\xcc\x9e\xdd\x19\x31\x74\xf0\x18\xad\x2c\xa8\xc1\xf5\x5d\x32\xa6\x31\x84\x83\x90\x8f\x38\x8d\x32\xe3\x98\x06\x99\x34\xe1\xb2\x5a\x11\xc9\xd6\x82\xf6\x03\x81\xaf\x86\x42\x56\xe4\xdb\xe3\xcc\xf2\xf6\x5e\x87\xff\x2a\xed\x2a\x05\xce\x30\xfc\x75\x8f\x6c\x6d\x6e\x6e\x92\x87\xfc\x72\xc6\x73\xd7\xea\x75\xfc\x00\xef\xf6\x00\x3b\x12\x5f\x8c\x83\xe4\x54\xd0\x0b\x8f\x24\x24\xde\xf5\xad\x8e\x2a\x77\xc6\x2c\x12\x81\x60\x55\xc2\xb2\xd2\xe9\x30\x67\x11\xfd\xc5\x32\x9f\x99\x16\x83\xb6\xaf\x78\x0c\x2e\x9c\xff\x30\x1e\xf9\x93\xd8\x42\x83\x30\xcc\x71\x9c\x7b\x61\xe5\xe0\x4a\x63\x5c\x3d\xdc\x5b\xe3\x1b\xae\x3c\x18\x88\xb3\x76\xc4\x83\x2d\x08\xae\x07\xbb\xb1\xbc\x15\x52\xa9\x87\x21\x2f\x15\x64\x59\x74\x46\x31\xc3\x0d\x42\x35\x7b\xb2\xbd\x0a\x0e\xeb\x81\x36\x62\x2d\xf8\x6d\x4a\x91\x4c\x21\x5f\xab\x47\x21\xb9\xba\x92\x5f\x27\x9b\xa7\x6a\xcb\x84\x2b\x6c\xde\x37\x0d\x2d\x12\xcc\x12\x3c\x11\x4b\x74\xde\xcd\x8b\xec\xa9\xde\x54\x49\xbc\x0c\xb4\xaf\x1a\x96\x75\xcb\x5d\x4d\xae\x23\xbc\x52\xc9\xf9\x8c\x4a\xbf\x03\x21\x17\xcb\xe1\xf4\x05\x1a\x77\xb6\xbf\x87\x08\xcd\x82\x88\x2b\x50\xeb\xda\x77\xaa\xa3\xfd\x24\xcd\x3a\x0c\x2f\x9f\xe9\x25\x3f\x29\xfa\x06\x60\x3a\x81\xe9\xf8\x81\xfa\xb3\x20\x3f\x3a\x4f\xde\x41\x98\xb0\xe2\x12\x22\x6f\x5a\x5c\xa0\x04\x3d\x9f\xe9\xe5\x69\xb9\x6d\x67\x3b\x4d\xc8\xe1\xbb\x83\x76\xd7\x5a\xfc\x42\xb6\xa8\xa8\xd3\x31\xb3\xd0\xcb\xe4\x00\xfb\x20\x14\xce\xf2\x09\x3a\x6e\x44\x39\xc9\x8b\x88\x87\xd2\x89\x42\x44\xd4\xd8\x2c\xb4\x14\xe1\x7e\x3b\xce\x4e\xf9\x69\x49\xca\x01\x6c\xf7\xc8\xa8\xe8\x47\x8f\x53\x81\xd9\xab\x69\x9a\x50\xa1\x79\xea\xac\x7f\xb2\xc5\xfe\xf3\x2c\x2a\xc0\x5f\x8a\xc5\x8d\x10\x88\x75\x84\xfa\xe4\x9e\xa1\xa4\x8b\xc1\xf5\xb2\xda\x85\x02\xc9\x3b\xf4\xaa\x17\x04\x6b\x98\x7e\xac\x7a\xe9\x07\xf4\x74\x85\x18\x9b\xec\xae\xc1\xb9\x57\x40\x91\x44\x53\x3d\x96\x88\xe7\x08\x55\x7b\xd6\x94\xbd\x0c\xd1\xb3\x5f\xdf\xa8\x2a\x2c\x9e\x6f\x26\x36\x28\xaa\xc6\x52\x83\x39\x94\xda\x7d\x94\x58\x7f\xbe\x7d\xd2\x32\xbb\x13\xda\x44\xeb\x8c\xe2\xb8\xe3\xf9\x57\xba\x04\x2b\x6b\xfd\xda\xac\xd5\xde\xb0\xd9\xed\x46\xbb\x45\x72\x6c\x98\xdd\xc7\x76\xda\x9a\x0f\xc2\x8b\xad\xb4\x20\xf9\x72\xb1\x48\xb3\x02\x74\x6b\xfc\xa6\xf6\xdd\x01\x51\x5a\x95\xb6\xe1\x08\xb2\x9c\x30\x1b\xbf\x54\xb8\xc9\x62\xac\xa7\xb2\x95\x28\xcc\x7b\xac\x07\x9a\xaa\xb4\xa0\x47\x0e\x75\xed\xdd\xb4\xd4\xdb\x8d\xab\xc7\xd5\x18\x74\x9c\xb4\x97\xbc\xd2\xbe\x3e\xed\xed\x7c\x73\xaf\xd2\xbd\x57\xe9\xfe\x57\xa8\x74\xc5\xc3\x8a\x5b\x3d\xc7\xde\x0f\xb2\x34\x21\xff\xbb\x9c\x07\x67\x51\x4e\xbe\x0f\xd8\xe7\xdf\x3e\xf3\xcf\xfe\x9c\x7a\xd5\xbd\x83\x01\x39\x4c\xa2\x22\x0a\xe2\xe8\x57\x4a\xfe\xce\x7b\xc1\x08\x35\x20\x39\x58\x62\x49\x83\x1b\x18\x28\x5b\xaa\x86\x93\xf3\x3e\x68\x75\x65\x31\x19\x45\x44\x84\x4f\x3b\x0c\x87\x64\xb3\xee\xe6\x8d\x5b\x7b\xb0\xe1\xdb\x6e\x75\xbd\x66\x26\x5e\x77\xba\xfa\x15\x9a\x8c\xd4\x36\x91\x08\x85\x96\xb4\x41\x8f\xc7\x09\x2f\x7f\x9d\xd2\x43\xaa\x9e\x89\xac\x46\x66\x49\xdf\xbb\x5e\x37\x44\x68\x04\xac\x3d\xa7\xf7\x83\x35\x81\x9e\x12\x57\xbc\xbc\xad\x9e\x68\xcc\x70\x9a\xca\xb3\xba\x65\xaa\x65\xd9\xa4\x63\xcc\xa3\xcc\x76\xd7\xdb\x28\x66\x5a\x10\x9e\xb1\x33\xaa\x9c\x1d\x72\xf8\x02\x72\x64\xef\xd4\xa4\x6d\x6c\x94\xf9\x19\xf2\xbf\xfe\xe1\x6f\x85\x9c\x6a\x74\xb6\x7c\x1e\x24\x46\xaa\xd2\xe5\xbb\x20\xfe\x3f\x3b\x30\xc9\x17\x42\xcd\x0d\x2f\x24\x0e\xd4\xe1\x51\x1a\x10\xf9\x4d\x75\x94\xb2\xae\x2e\x6e\x9f\xe7\x65\xb6\xd5\x80\xdf\x3c\x43\xa2\xc1\x6a\xcf\x0a\xe7\xcd\x13\xad\xcb\x50\xee\xd3\x07\xe9\x9c\x05\xd0\x33\xd5\x76\x9f\x9e\xd1\xec\xb2\x23\xbd\x21\x7f\x88\x92\x69\x4c\xdf\x70\x84\x77\xc9\x90\x78\x33\x74\x4d\x62\x5a\x55\x47\xfc\xe0\x62\x02\xd5\x41\x4b\x09\xef\x92\x6e\x90\x05\x91\x4c\xe3\x14\x69\xd8\x16\x89\x0c\x39\x3f\x7b\x7b\x7b\x9c\x6a\x30\x90\x70\xbb\x20\x61\xd9\x99\x9b\x81\xf1\x6b\xdd\xb6\xaf\x3a\x21\xc3\x5a\x3e\x25\x07\x03\x1e\x58\x52\x25\x09\xaf\xec\x98\xb9\xc8\xf5\xd8\xc8\x9f\x3c\x67\x44\x23\x78\x8f\x56\xc3\x8e\x9e\x33\xa0\x72\x17\xdf\xa2\xe3\x16\x7f\xe1\x75\xe5\x9c\xa9\x8a\xaa\xa4\x80\x13\x76\x41\x79\x24\x16\x45\x47\xf2\x9e\x2e\x99\x44\x34\x0e\x2d\xd3\x03\xd1\x8a\xd1\x53\x8b\xe7\xe0\x0e\x5a\x8c\x87\x77\xcd\x22\x43\x99\x6c\x45\x7d\x90\x64\xe1\x3a\xc2\x72\xd8\x9b\x84\xed\x4b\xd6\x26\xbf\x05\x8b\x33\xf5\xf0\x8e\xac\x28\xea\x13\x72\x22\x13\x03\x9f\xdc\x8b\x81\xf7\x62\xe0\x5f\x5b\x0c\xd4\xef\xf3\xf8\xa2\xb9\xab\x17\x7a\x77\x73\x77\xcf\x40\xde\x48\x75\x63\xa9\xb1\x32\x9c\x13\x45\xa4\x16\x69\x85\xcc\x3e\xd1\x29\x52\xb8\x5c\x93\xb9\xec\xd3\xb8\xb8\x07\x9e\xa7\xf3\xb5\x64\xb0\x89\xc0\xc0\x27\x3f\x0e\xa9\xa9\x0d\xa1\x71\x06\x2a\xc1\x3d\x3d\xfb\x8a\x58\x39\x86\xd2\x15\x34\x06\x6f\x82\x24\x98\x52\xfd\x3a\x9f\xb1\x2c\x8e\x0a\x43\x15\x20\x5d\x78\x68\x70\xb4\xdf\xcf\x0d\x0c\x39\x15\x67\xf3\x1a\xfb\xf7\x90\x32\x0e\x13\x25\xa6\x7f\x4f\x4b\xfc\x1b\x05\x39\xf7\xb9\x50\x16\x89\x62\x4a\xc1\x4b\xa5\x67\x93\x32\x3d\xcd\xdb\x8e\x45\x65\x9b\x66\x7b\x40\x62\x0e\x22\x44\x1b\xa5\xb1\x26\x0c\x77\xa2\x28\x7c\x8e\x22\x0e\x65\xc7\x27\x7d\x19\xe6\x4c\xb0\x51\x29\x75\x6e\x8e\xb9\x33\x4e\x7d\x49\x21\x42\x73\x88\x6d\x57\x8d\xb3\x4f\xde\x30\x56\x1e\xd1\x5c\x84\x40\x07\x7c\x38\x5e\x28\x0d\xcf\x9e\x8d\xf1\x26\x07\x75\xf5\x76\x19\xc7\xda\x31\x46\x8f\x49\x91\xf4\x22\x82\x6b\x33\x1f\xee\xfe\x98\xf1\x87\xee\x2c\xec\x0e\x59\xfb\x5a\x71\x77\x1c\x4c\x36\x8a\xb6\x63\x07\x38\x51\xa1\x64\xcc\x83\x18\xa9\x09\x1f\xf3\xfe\xdd\x81\x88\x30\x51\x1d\x3b\x46\xa3\x4d\xb8\x7a\xe5\x84\x07\x48\x57\x27\x4e\x1b\x4d\x1c\xf4\x90\x41\xba\x58\x32\x88\x4e\x25\x79\xd0\x81\x6a\xa9\xc4\xc6\xba\x87\xbb\x96\x50\x90\xef\x71\xa3\xa7\xb4\x25\x43\x2a\xa7\x8b\x3d\x02\x21\xde\xab\x42\x48\x91\x67\xfa\x37\xa7\x6e\x28\x72\xca\xd8\x01\xfa\xac\xf1\xac\xef\x60\x9d\xf3\x7b\x15\x7b\x19\x63\xde\x45\x3c\x77\xc0\x5b\x7d\x56\x34\xdd\x11\x97\xe0\xde\x13\x23\xc5\x0c\xf5\x8c\x51\x68\x6f\x56\xe0\x6c\x06\x8e\x3d\xcf\xbc\x00\xaa\x2a\x6f\x6c\x12\x81\x0b\x5f\xc8\x22\xf9\x7e\x4a\xd2\xe1\x0a\x91\x8b\x02\xb9\x6e\x1b\x21\xa1\x59\x0c\x22\xec\x8e\x55\xec\x23\xb6\x97\xe4\x95\x9d\x2f\x0b\x79\x02\x80\xd1\x32\xc0\x80\x90\x67\x04\x18\x52\xc7\x14\xbf\x16\x44\xaa\x33\x40\xb3\x54\xa2\xcc\xa8\x72\xab\x8c\x55\x1c\x0e\xaa\xa4\x8b\x5c\x8e\x4f\x53\xda\x1a\xfd\x82\xd1\xc5\x32\xe4\xd0\x46\xcb\x28\x0e\x01\x61\x62\x50\x2c\xd3\xf1\x6f\x0b\x0c\xff\xe3\xd1\x8b\xa3\xf5\xf5\x75\x10\xef\xdb\x39\x59\x4e\xe3\xcb\xbe\x88\x22\xc6\x0e\x04\xcb\x9c\xed\x89\x85\x6a\x25\x41\x2e\x65\xd9\x6f\x69\x57\xa3\x6e\x48\x18\xe3\x80\x0c\xf5\xde\x7a\xcb\x88\xf4\x34\xfa\xe5\x84\x65\x9f\x6c\x9e\x9e\x32\xb1\x0b\x7f\x5e\x5d\x29\xbb\x4d\x1b\x94\xff\xd8\x82\x32\x6c\x2c\xbb\xfe\xab\x22\xab\x76\x80\x24\x88\x0b\x3b\xe8\x55\x88\x2a\xbb\x45\x55\x97\xea\xda\xe8\x94\x87\x40\x49\xfc\xcf\xb2\x88\xe3\xe7\x5b\xc8\xef\xfa\x34\xbc\x8a\x1f\x68\x62\x45\xb0\xf0\x85\x2a\x30\xce\xea\xd0\x96\x29\x51\xea\x8b\x29\x7d\x3f\x63\xc4\x62\x51\xe6\x75\x1e\xd3\x3c\xbb\x61\x0e\x2f\xda\xc1\xcc\x4c\x19\x45\x5a\x06\x34\xde\x70\x2a\x66\x77\x8d\x6a\xca\x87\x60\x5f\x43\x09\x52\x61\x59\x4d\x3d\x3d\xcb\x30\x57\x34\xa9\x77\xe7\x28\x39\xe4\x32\xa3\x70\x43\xfa\xfe\xdd\x81\xf2\xc0\xc4\x4d\x59\xc6\x41\xa2\x84\xcd\x28\x11\x4a\x17\xbf\xaf\xa7\xcc\xf5\xf5\xd8\xef\xf7\xaf\x71\x7c\x37\xdb\x97\x9e\xd6\x64\xca\xa2\x1e\x4e\x5a\xe7\xd3\xbe\xd4\xdd\xfc\x2a\x44\x28\x69\xc0\xf4\x49\x8f\x67\xad\x0c\xd1\xa2\x64\x89\x62\xe7\x8d\xb4\x81\x69\x7a\xfd\xf7\xed\xbd\xde\xe7\x5e\xef\xf3\xd7\xd6\xfb\x08\xa5\x4f\x38\xba\xc5\xcd\x9f\x4f\xef\xa3\xb4\x35\x58\xf1\xc3\x99\x93\xd2\xe8\xbc\x78\x6e\xf0\x11\x36\x0c\xd3\xe5\x87\xa3\xa9\x80\x91\x5a\xc9\xbb\x15\x81\xc2\xd6\xb4\xbc\x94\x77\x3c\x36\xfd\xe2\x82\x8b\x7c\x21\x96\x74\x65\xc9\x41\x1d\x56\x33\xda\x59\x04\x90\xa3\x76\xe9\xf8\x3a\x68\xe9\x9b\xf5\x2e\x5f\x1e\xb0\x68\xb1\x2c\xd4\xe3\xb5\x84\x9e\x0b\x6c\x76\xf4\x76\xc9\x84\x8e\x21\x69\x2b\x38\x2b\x8e\xc6\x90\xb4\xc3\xd1\x27\x5f\xae\x14\x13\x77\x54\x9f\x54\xa3\x53\xda\xac\x51\x05\xe7\x6d\xd4\x97\x2b\x1b\xdd\x76\x1b\x5d\x2c\x8b\x57\xf4\xa2\x7e\x98\xaf\xe8\x45\xd9\x18\xcd\xac\xea\x01\xd6\xb7\xc5\x81\xca\x86\xe6\x6f\xcb\x1a\x97\xd8\x8c\x4e\x34\x9c\x9c\x88\x9e\x46\x72\x4f\x0c\xbd\x27\xba\x05\xc0\xa7\x25\x3b\xd7\x8b\xe7\x7a\xd7\xe2\xb4\xd3\x1a\xee\xc0\x16\xf5\xf4\x7e\x8b\xba\xdf\xa2\xfe\xda\x5b\x94\xbe\x9a\xa0\xc5\xec\x46\xf7\x12\x02\xf8\x6e\x5f\x25\x96\x44\xff\xf7\x85\xff\xf7\x5d\x82\xf8\xef\x41\x6a\xb6\x4d\x06\x22\xcd\x91\x2d\xa0\x85\x48\x96\x60\xe3\xb2\xf6\xc6\x69\x32\x89\xa6\x12\x0c\x85\xc2\xc1\xd0\x32\xb2\x8a\x04\x3b\x17\xcf\xd6\x8c\x0b\x1a\x91\x28\x61\x7e\xe4\xa1\xc0\x2d\x64\x40\xa2\x04\x39\xcc\x3f\x5c\x26\x63\xbe\xc5\x60\xa8\x9c\xa7\x4a\x30\xc6\x8a\x33\x6a\x03\x89\x54\x55\x17\x77\x50\x84\x21\xa2\x51\x90\xc8\x6c\xee\xf5\xd0\xe9\x8f\x4c\x56\x42\x08\xf8\x4c\x6b\x72\x67\xa0\x74\xde\xe2\x8d\x20\x28\x01\x37\x4f\xbb\xe4\xc1\x03\x22\x7e\xf7\x41\x27\x78\x34\xe9\xb4\x37\x2f\xda\xdc\x75\xc9\x66\x97\x3c\x23\x2d\x5a\xcc\xd8\xee\x01\x81\x49\x9f\x5f\xbe\x0a\xf2\x59\x8b\x0c\xed\x64\xae\xd1\x6d\x69\x29\x01\xc5\x7f\xfa\x31\x4b\xe7\xcf\x7f\x83\x9e\xb6\x45\x97\x50\x58\xa1\xe7\x97\xd0\x30\xeb\xf4\x7e\x12\x1e\xb2\x72\x2a\x9a\x97\x17\x92\x8f\x43\xc1\xea\xf1\x2c\x93\x71\x4c\x7f\xa3\x01\x1c\xb3\xb6\x6a\xba\x8e\x61\x4a\x3b\x2d\xe7\x07\x8d\xf3\x20\x5d\x26\x8d\xae\x99\xee\x60\x1c\xde\xb6\x39\x09\xe1\xa1\x94\x80\xf1\x51\x39\x53\xf0\x1b\xf6\xff\x58\x35\x88\x26\xc3\x99\x04\x0c\x60\xf4\x59\x75\xef\x65\x31\xbb\xeb\x03\x42\xe3\xc3\xc1\x1d\x9d\x0d\x20\x00\x70\xf9\xd9\x80\xab\x3e\x38\x17\x8f\xa8\xb7\x47\x0b\xdc\x99\x45\x4d\x3f\x16\x37\xe8\x02\xba\xe3\xe6\xdc\x95\xfb\xbf\x20\xd8\x43\xf7\xe1\xf3\xfd\xb7\x56\x30\x32\xc1\x53\xb9\x56\x86\x3f\xa0\x15\xba\x99\xeb\xb5\x35\xde\xbb\x3e\xb7\x8c\x52\x6f\x69\x5e\x16\x33\xad\x0d\xea\x91\x36\x0e\xdd\xdc\xee\x89\x61\x4e\x69\x31\x2c\xd1\x79\x4a\x5f\xa5\x7d\x5c\x50\x8c\xa4\x27\xf4\x74\x46\xe1\xb3\x20\x36\xa2\x8c\xf5\xad\xc0\xd9\x67\x41\xec\x38\x23\x51\x69\xd7\x6b\x80\x9e\x95\x86\x22\xfc\xfc\xdd\x64\x30\xa2\xe8\x4d\x86\x23\x8a\x36\x1c\x50\x93\xb3\x28\xe3\x2e\x41\x0c\x96\x9b\xb5\x27\x27\x01\xe8\x9e\x9e\x24\x9b\x72\xf2\xd5\x11\x0a\x59\x73\x1a\x57\x78\x43\x72\xa2\x05\x2a\x7e\xbd\x27\xdc\x68\xfe\xa8\x6f\xf3\x6c\x08\x1c\xf9\x9c\xf3\x13\x05\x8c\x42\x47\x5a\xf7\x58\x43\x5c\x0d\xcf\x53\x3e\x6b\x14\x50\xc9\xb1\x39\xcd\x82\x29\xdd\x2f\x9a\x9c\x9c\x05\x68\x29\x8e\x7c\x10\xea\x50\x5b\x81\x25\xbe\xee\x38\xc7\x2e\x52\x38\x59\xae\x82\x16\xef\xc0\x84\x73\xc7\x9a\x31\x31\xa8\xd2\xe1\x58\x99\xbf\xfd\x7c\x7b\x07\x26\x57\x7d\x1d\x3d\x73\x76\x64\x0d\x4d\x89\x8c\xb7\x1b\x96\xaf\xb7\x3d\x67\x89\x6b\xfb\x57\xb6\x78\xc9\xf5\x6a\xf4\xcb\x9a\xa8\xa6\x5d\xd8\x7f\xeb\x31\x01\x60\x0e\x26\x94\x44\xf7\x35\x30\x81\x48\xf9\x16\x83\xee\xad\x95\x50\xf6\x7c\x11\xc5\xfc\xf0\x56\x4b\xde\x02\xb4\x82\xc6\x5d\x08\x89\x87\xcd\x72\xfa\xb3\xe5\xb5\x86\xf4\x68\x17\x73\xba\x55\x25\xb2\xba\x1d\xdc\xba\xe5\x44\x55\xcd\x8d\x9c\xc2\x17\x74\x1c\xcd\x83\xb8\x1c\x15\x5a\x0a\x6c\x88\x04\x5d\xa0\x84\x28\xff\xb8\x03\x36\x85\xa7\x9a\xc1\x96\xc7\x4a\x2e\x39\x82\x81\x7c\x5d\x39\xe8\xfa\x15\x84\x2a\xac\x66\x1e\x1f\x3d\x27\xd4\x95\xc6\xa4\x4a\x39\x83\x2b\x3b\xfc\xfe\x91\x38\xcd\x4d\xf0\xf4\x9e\x8e\x69\xb4\x68\x40\xe6\x6e\x99\x26\x04\xe0\x82\xde\x96\x02\x44\x8d\x8d\x07\xd8\x70\x15\xd7\x72\x31\xcf\xe0\x6c\xc0\x26\x14\xc0\xc5\xa2\x3b\x12\x10\x6b\x97\x37\x3b\x20\xbd\x0f\xce\x9b\x2f\x71\xb7\x80\x1f\x11\x95\x70\x4d\x38\x1b\xc3\x83\x47\x16\x72\x43\x4b\xd7\xf5\xb6\x51\x57\x6f\xde\x4f\x7b\xa6\x7c\x6b\xcc\x37\x8e\x68\x9a\xac\x30\x0e\x13\xba\x64\x1c\xa5\x40\x5f\x79\x1c\x0d\x3a\x5f\xde\xe3\x3b\x97\xb5\x4b\x08\x47\x18\x77\x55\x75\x14\x02\xff\x7b\x3b\x6a\xe5\xdc\xa4\xa3\x6c\x2f\xb8\xb3\x13\x81\x19\x21\xbd\x6a\x4c\x08\xd2\x3f\x34\x3f\xc0\x4d\x28\xc6\x18\xe1\xad\xb8\xd2\x98\xcb\xa7\x32\xae\x79\xdd\xb4\x71\xe8\xbe\x0c\x76\x5e\x32\x85\x66\x9d\xbe\xb1\x96\x76\xe4\xf5\xeb\xd7\x0d\xfb\x10\x97\x52\x90\xaa\x69\xa5\x96\x3f\xd0\x6c\x41\x6b\xb7\x27\x85\x01\x0e\x5d\x8d\x00\x07\xa6\xa2\x17\xf9\x72\x34\x8f\x8a\x9f\xd3\xac\x4e\x4a\xd2\x80\x25\x2b\xdd\x97\x5f\x7d\xf5\xdd\xa0\x55\x01\x55\xba\x15\x97\xb4\x67\x1d\x71\x9c\xeb\x6f\xad\xf8\xe9\xe1\x34\xa5\xe8\x30\x52\x0f\xd2\xd0\x04\x83\x25\x6c\xa4\x80\xec\x6f\x15\x12\x07\x30\xb7\xa4\x2d\x3e\xb8\x10\xfa\x28\x61\xe4\xa1\x82\x65\xe9\x4a\x30\x2d\x03\x10\xb2\x53\x59\xb6\xd5\xa8\x69\xd0\x8b\x18\x89\x4e\x74\xc5\x00\x94\x67\xee\x57\x66\xa1\xd2\x12\xa8\x79\x73\x45\x3b\x19\xaf\x5f\xbf\x76\x81\x39\xf5\xa3\x2a\x15\x61\x1a\x83\x66\x09\xf0\x2d\x2c\x1c\x78\x4c\x36\xa5\xec\xae\xf2\xd1\xac\xe9\x88\x91\xae\xd2\xba\x9a\xa6\xa3\x6a\xe1\x46\xc9\x28\xc8\xb1\xa2\x42\x74\x00\x18\xa5\x58\xb7\x02\x46\x81\x5c\x77\x7b\x2b\xb4\x31\x8f\x12\xd3\xba\xc5\x69\x41\x40\xdc\xb0\xfe\x59\x90\xcf\xb2\xa0\xa8\x1c\x43\x09\x4c\xa3\x9d\x61\xf5\x1e\xc9\xdb\xd9\x8a\x0e\xf9\x41\xea\xcf\x19\xe2\x3a\xd8\x3c\x5c\xac\xde\xc3\x69\x90\xbf\xcb\xa2\x71\x25\xce\x4a\x60\x6e\xac\x04\x5e\xbd\x97\x22\xec\x50\x5e\xd5\x4b\x05\x73\xc3\x36\x46\xe8\x0a\xad\xa2\x99\x72\xb0\xaf\x44\x43\x32\x26\xc3\x3f\xb8\xad\x4d\x55\xdf\x6c\x50\xd4\x22\x66\x21\xc6\xb5\x4b\x7f\xac\xed\x18\xd0\x95\xe6\x28\x32\x5e\x2c\x04\xe3\x22\xcd\xa4\xf8\x23\x2d\x1f\xc0\x8c\xb8\x47\x18\xac\x61\x4b\x2c\xa0\x7d\x8d\x4d\xa4\xa5\x83\xf3\x14\xb5\x87\x9e\xdb\x71\xa8\x83\x8c\x82\xa5\x12\x3c\x1e\x3b\xb0\xcc\xe9\x51\x9c\x22\x6e\x3d\xa1\xeb\x61\xa8\x90\xe1\xe6\xad\x1b\xbb\x9e\x34\xc7\xe8\xd3\x62\xd6\xe9\xf6\x5c\x92\x7d\x9d\x4e\x91\x6c\xdc\xac\x4b\xbe\x81\x6a\x43\x8c\x6a\x17\xfa\x12\xfb\x1d\x51\xa0\x3f\x8d\xd3\x51\x10\xf7\x19\x52\xfb\x81\x9b\x2c\x62\x9e\xf9\x9a\x8c\xc6\xc1\xff\xc7\xde\xbb\xef\xb7\x6d\x33\x8b\xa2\x7f\x27\x4f\x81\x66\x9f\xaf\x91\x63\xc6\x16\x75\xb3\xa3\xc4\x5d\xcb\x91\xed\xd8\x2b\x71\xec\x6d\x3b\x6d\xbf\xed\x9f\x9b\x1f\x25\x42\x16\x1b\x89\xd4\x22\x29\x5f\xda\x78\xbf\xcf\x79\x8e\xf3\x62\xe7\x87\xc1\x85\xb8\x52\x94\x2f\x69\xda\x65\x7f\x6b\x35\x22\x09\x0c\x06\xc0\x60\x30\x18\xcc\x65\xfa\xf1\xb6\xcd\x92\xca\x46\xa3\xf4\x65\x59\x93\x92\x55\x4a\xd1\xa0\xee\xfa\x20\xe7\x94\xe2\x15\x6a\xf6\xe9\x99\x17\x8d\x6a\x3b\x1f\x15\xf6\x7c\x9a\x61\xce\xb3\xae\xbf\xee\x3d\x33\x0c\x84\x98\x81\x7a\x61\x99\xf3\xac\xdb\x68\xc3\x0b\x3a\xa7\xcf\xba\x8d\x57\xf4\x51\xd0\xc2\xb3\x6e\x93\x56\x89\xfa\x41\xfc\xac\xdb\x6c\x7a\xaa\xf9\x20\x3c\xb2\x41\x7a\xd6\x6d\xb5\xe0\x99\x9b\x11\x3d\xeb\xb6\x28\x78\xc6\xd9\x9f\x75\x5b\x14\x2d\x7e\xd9\xfb\xac\xdb\x22\x0d\x72\x23\xa0\x67\xdd\x56\xf3\xe6\xcc\x6b\xbe\x7a\xb4\x47\x7c\xb4\x47\xfc\x67\xdb\x23\xba\x8c\x11\xef\x6c\x33\x5f\xdd\x4c\xb0\x82\x0d\x20\x94\xfb\x88\xf3\x87\x34\xb1\x87\xb7\xf3\x4d\x56\x0a\xe3\xfa\xdb\xd8\xac\x54\x30\xa9\x5f\x5d\x5d\x2d\x62\xd2\xd8\xe2\xdc\xb0\x84\x8d\x84\xc5\x03\x38\x9c\x8f\x50\x30\x8d\x24\xdc\x1f\xe8\x40\x62\x26\xa3\xd7\x04\x1e\x35\x63\xfd\x6d\x85\x2b\x8c\x53\x5d\x37\x6e\xb4\xe2\x2a\xb4\x80\xc0\x27\x8b\x5f\xc6\xa6\xf6\x11\xe7\x96\x4d\x4d\xdd\xbc\xe4\xdd\xe5\xe6\xcc\x6b\xd5\x1f\x77\x8b\xc7\xdd\xe2\x9f\xbd\x5b\x7c\xa7\xd6\xeb\xf7\x67\x68\x5e\xd1\x0e\xbe\x30\xe5\x3c\xc4\x69\x96\xc4\xc1\xf8\xd1\x9e\xf3\xa1\xed\x39\x6f\xaa\x59\xf8\xc5\xf8\xb2\x30\x1b\x2c\xd3\x0f\x17\x05\x4d\x15\xf1\x94\xcd\xea\x67\x6b\xa1\x3b\xdc\x50\x46\x13\xb2\x11\x1c\x05\x97\xef\xf1\xbc\x9b\x0b\xb9\xe8\x73\xef\xe9\x93\x27\x3a\x6e\x46\x81\x12\xcf\xb4\xea\x37\x71\x66\x3b\xe2\x83\x64\xb8\xf7\xe4\x49\xc5\xbb\xe9\xca\x57\x70\x78\x70\x84\x07\xc9\x05\x0d\x0e\x55\x76\x67\xc5\xcb\x59\x71\x55\xbf\x96\x0c\xc8\x2c\x1e\x27\x83\x2f\xd5\x28\x45\x29\x5b\x42\x2c\xae\x72\x55\x0c\x1e\xab\x8d\x9b\x73\xf4\xee\xf9\xe6\xbb\x98\xfb\xb9\xd7\xdf\x8b\x5c\x73\xda\xae\x8d\x9d\x5d\xaa\x3e\x3f\xd5\x66\xa7\x7c\x6e\x16\xb9\xcb\xd4\xe7\x46\x43\xde\x26\x59\xb3\x86\xa5\x46\xa4\xc5\x5b\xbc\x55\x28\x48\xba\x3d\xe1\x54\xed\xba\xed\x70\x5e\x8a\x48\xe0\x64\x79\xf7\xe1\xce\x07\x9b\x73\xd4\xc2\xc5\x74\xc8\x85\x1d\x62\xb9\x29\x97\xf3\xed\xb6\x10\xce\x2d\x2a\x22\x4d\x2b\xa4\xcb\xe9\xfe\xa3\x9c\xfe\x28\xa7\xff\xb3\xe5\x74\x26\xa4\x67\x23\x87\x56\x67\x8e\xf8\x8d\x53\x3c\x9b\x10\xd0\x3f\xcd\x51\x02\x0d\x92\x14\xaf\x44\x89\x2a\xa7\xaf\x55\x0e\x9c\x50\xd1\xd1\x72\x9e\xbf\x26\x14\x3a\x1e\x8d\x1e\x5c\x3b\xf4\xfd\xc8\xe3\x84\x3b\x1e\x8f\x94\xdb\x0d\x7c\xc9\x82\x4e\xef\x7c\x8b\x0b\x9d\x6c\x34\xff\x42\x27\x1b\xc1\x85\x0e\x15\x5c\x16\xb9\xb7\x29\x93\xf3\xdd\x9b\x93\x21\x1e\x48\x5b\xd3\x85\xf5\xa6\x8e\x89\x08\xd9\x68\xf4\xd9\x5e\x40\x35\x0a\x41\x16\x5d\x56\x59\xa3\x51\x3c\x4c\xdc\x2d\x5a\xbe\xde\xad\xb9\x0c\xe7\xfb\xc1\x15\x23\x82\xe3\xe8\x0f\xfd\x72\x58\x6a\x7b\x5e\x51\xd5\xea\xe7\x36\x88\x44\xf1\x61\xf2\x4b\x39\x02\xb6\x22\x77\x6b\x78\x12\xa4\x5f\x4e\xd2\x59\x96\xe3\xf0\x10\x1b\x97\xc1\x52\xf3\xe5\x05\xef\x86\x44\x8c\x89\x4c\x77\x18\x44\x25\xed\x3b\xcb\xdc\x8d\x02\x82\x30\x3c\x4c\xa3\x8b\x20\xc7\xf4\x48\xe8\x68\xbd\xac\xd8\xdd\xfa\x4e\x93\x7e\xcd\xed\x7e\x59\xb1\xbb\x21\x30\x0a\xb2\xb9\xad\x3b\xcb\xdc\xad\xe9\x73\x9c\xd3\x0d\xbd\x74\xec\x4b\x4a\xdd\xbd\xf9\x0a\x73\x5f\x56\xec\xce\x74\x7f\x7c\x3d\x29\x6d\xdc\x55\xe4\xce\x54\x3f\xaf\x61\x57\x91\xbb\x0e\x39\x91\xe3\x72\x4c\x41\xef\xa4\xc9\xe4\x30\xc8\xb2\xcb\x24\x0d\xcb\xc6\xbf\x62\x9d\x3b\xaf\x83\x79\x63\xe2\x2a\x72\x67\x32\x9c\xd7\xb0\xab\xc8\x7d\xb0\x9e\x79\x6d\x97\x94\xb2\x37\x2f\x1e\x56\x57\x51\x36\xeb\xc3\xcd\x1b\x4d\x69\x3c\x8b\x8b\xe7\x49\x94\x65\x51\x7c\xfe\xb4\x32\xb6\xd3\x24\xd3\xaf\xae\x24\x2c\x2d\x5f\x2d\x7a\x0a\x54\xae\x77\x44\xf3\x6f\xb9\x8e\x47\x23\x29\x81\x98\x66\x7b\xa1\x9c\xa2\x35\xcb\x88\x56\xe3\xf1\x0c\xfd\x78\x86\xfe\x67\x9f\xa1\x8b\xbb\xae\xfe\x1f\x7f\x68\x77\x5d\x9b\x63\x7c\x85\xde\xe2\x14\x9f\x67\x7f\x04\xd9\x1f\x11\x7a\x13\x8c\xf1\xd5\x7f\xa6\xf9\x30\x5b\x19\xcd\xd4\xe3\x70\x87\x45\x33\x3d\xc2\x43\x9c\xe2\x78\x80\xbb\x88\xb4\x9f\x75\x57\x57\xcf\xa3\x7c\x34\xeb\xaf\x0c\x92\xc9\x2a\x3f\x75\xaf\x9e\x27\x2f\xc5\xef\xfe\x38\xe9\xaf\x66\x97\x41\x3a\x59\x8d\xe2\x1c\xa7\x71\x30\x5e\x25\x5d\xc2\x57\x39\xff\x77\xe5\x3c\xf9\x5f\x1f\x9a\xcd\x07\xbe\x1a\x2b\xee\xbb\x8e\x09\x36\xff\xf0\xc3\x35\xfc\xf8\x5b\x5c\x76\x51\xcb\x57\x9c\x5f\x26\xe9\x97\x23\x0c\xa1\x6a\xcb\x14\xe5\x7a\x71\x53\x5b\xde\xff\xe3\x8f\xcf\x25\xa5\xee\xe2\x9b\x77\x1d\x0f\xb6\xe3\xa0\x3f\xc6\xf3\xb0\x94\x4a\xda\x11\xb4\x17\xb8\x0b\x6e\x97\xc1\xb4\x22\x6e\x45\x49\x07\x6e\xd6\x02\x77\xc0\x2d\x4c\x2e\x63\x16\x85\xb8\x0c\x31\x5e\xcc\x8e\x95\xe5\x6b\x75\x77\x53\x07\x62\xb3\x69\x05\xb4\x68\x21\x3b\x52\xc6\xb7\x3b\xa3\x94\xe2\x3c\x8d\xf0\xc5\xbc\x08\x10\xbc\x98\x1d\x2d\xcb\xd7\xbb\x90\x56\x4e\x76\xbb\x39\x44\x45\xca\x38\xc8\x49\xfb\x74\xe7\x21\x3a\xc7\x15\xdc\x99\xed\xb8\xa8\x1f\xee\x30\x26\x34\x7b\xc3\x9c\x18\xa9\x76\x1c\xd4\x0f\x77\x1e\x0d\x96\xb0\xa5\x1c\x19\x5a\xc8\x8e\x8f\xf1\x8d\xa3\xd4\xaa\x84\x52\xc9\xad\xae\xa1\xe2\xd4\xd9\xb2\x74\xfb\x57\xf0\x43\xe9\x65\xc1\x88\x8a\x97\x9c\x0f\x48\x37\x8e\x53\xf5\x99\x53\xbf\x04\x88\x90\x60\xf1\x78\x8e\xa5\x8b\xc9\xe9\x4c\x7a\x90\x64\xf1\x07\xbd\x66\x1c\x45\x17\x4e\xdf\x18\x32\x27\xf0\xdd\x79\x86\x2c\x87\x6d\x51\xca\x2a\xb0\xe1\xbb\xe3\x78\x65\x39\x5f\x11\x61\xc9\x16\xec\xd6\x7a\x2f\xd9\x7c\x3c\x53\x3d\x9e\xa9\xfe\xd9\x67\x2a\x76\xa0\xe2\x17\x44\xdf\x36\x4a\xfb\x6d\x0c\xab\xb9\x77\x54\x30\x8d\xb8\x30\x4e\x53\xfc\xe5\xa3\x32\x0b\x34\x7a\x5d\x56\x1a\x95\x92\x97\xce\xaf\xa7\x44\x3e\x60\x11\x28\x5f\x3f\x95\x18\x78\x94\x0f\x46\x35\xf2\x5d\xcf\x2d\x32\x08\x32\x8c\x9e\x13\x8a\xcf\xf2\xe7\x5d\xe5\x13\x4c\x56\x7a\x9e\xad\x64\xa3\x68\x98\xd7\xb4\x84\x22\xc8\x48\x0e\x58\x37\x0b\x30\x96\x0c\xee\x6b\x31\xbe\xa4\x11\xa8\xe8\x85\xec\x6b\x0b\x1a\x53\x1c\x87\x51\x7c\xfe\xe0\x78\x1c\xd2\x76\x64\x1b\x22\x1b\x52\x2c\x74\xa0\x89\x8d\x06\xce\xa8\x4c\x13\xac\xdc\x48\xd2\x81\x28\x35\xdf\x92\x90\x41\xd3\x65\x04\x85\x14\x2c\xb2\x93\x45\xaa\x8e\xa3\x38\xcb\x83\xf1\xb8\x52\xcb\x5a\x69\xbb\xaf\xbb\xbb\x50\x09\x1e\xe7\x38\xff\x90\x9c\x57\x08\x28\x46\x4a\x39\x7d\xec\x69\x8b\x5a\x91\x92\x56\xa7\xc9\xdc\x38\x1c\xa4\xc8\x9c\xf6\x7a\xa3\x20\x3e\xc7\x15\x9a\xb4\x09\x1f\x14\x84\x6c\x92\xa5\x8c\x9e\x22\x08\x91\x8e\x49\x8d\x24\xe3\xb1\x2c\x0f\x2c\xcc\x6f\xb2\xd1\x68\x05\x58\xa3\xc1\x6e\xb2\x91\xc9\x6e\xdc\xe2\xd3\x9c\x5b\x1a\x83\x0c\x90\x71\x4b\xa3\x58\x12\xdc\xab\x9a\xde\x4d\x8c\xc8\xa6\xa9\x7f\x38\x44\x4c\xd2\x45\xc6\x35\x05\x6d\x96\xe1\xa0\x17\xbd\x5b\xf3\x1a\x19\xdf\x43\xdb\x32\xe9\x19\x92\x28\xc5\x01\xe7\xa3\x2e\xf9\x0f\x05\x96\x8d\x46\x5d\xf2\x1f\x2a\xbc\xda\x12\x32\xb4\x5a\x8f\x22\xe9\xa3\x48\xfa\x0f\x17\x49\x0b\x3d\x3f\xf7\xb1\xbe\xa7\x6c\xd1\xd4\x3f\xfc\x08\x9f\x93\x79\x0e\xd2\xcd\x7e\xe4\xc8\x4b\x90\xad\xbe\x53\x8b\x42\xa2\x7f\xae\x9e\x8f\x06\xc1\x54\x06\xe2\x82\xb1\xd7\xdb\x3c\x34\x21\x48\x98\x30\x47\x74\x66\xbd\x8c\x36\xd0\xf3\xfa\xd5\xa0\x13\xbe\x0a\x1b\x83\xb0\xd5\x7a\x15\xac\xb5\x5b\x83\xd6\xab\x56\xa3\xd3\xc2\xfe\x7a\xfd\xd5\xa0\x5d\xc7\xcd\x56\xd8\x69\xb5\x3b\x8d\xfe\xf3\x02\x17\x1b\x98\xc0\x0f\x7c\xdf\xef\x0f\xea\x6b\xad\xc1\xab\xc1\x30\x58\x5b\xf7\x87\xf5\x41\x73\x1d\x77\x9a\xfd\xb0\xed\x0f\x5e\xf9\xfd\xf5\x60\x58\xaf\x3f\x77\xf3\x26\x8a\x63\x57\x92\x74\x83\x7e\xd4\xb5\x0c\x62\xc1\x09\x99\x1b\x7c\xd7\xda\x3f\xba\xd3\xd3\xc2\x04\x6d\x03\xb2\x3e\xae\x16\xb8\x66\x77\x29\x54\x85\x63\x96\xcf\xe2\xb3\xae\xef\x3d\x9b\x33\x4f\xcf\xba\x0d\xc2\x6c\xdb\x8f\xcc\xf6\x91\xd9\xfe\xb3\x99\x6d\xc1\x6b\xb9\xf2\x4b\x63\xb6\x65\x86\xc9\xc3\x34\xf9\x03\x4f\x82\x78\x25\xc4\x3f\x7d\xab\x74\xfe\xfa\x05\xe9\x5d\xd2\xf5\x53\x45\xa2\xf4\x9d\xbe\x50\x32\x12\x68\x25\x32\xbd\xc4\x6d\x72\xef\x2f\x9e\xe1\xbf\x24\x5b\x3f\x1f\x8b\x87\xcf\xd7\x5f\x35\xc7\xf7\x9d\x53\x7c\x5b\xba\x54\x92\xe4\xdb\x16\xba\x45\x1b\xe1\xff\xb0\xbd\xa5\x75\x21\xdd\xf6\x77\x92\x5e\xdb\xd9\xef\x7b\x4a\xb0\xfd\xc3\x06\x25\x1c\xed\x15\xd9\x50\x86\x51\x8c\xc3\xbb\x64\xe0\xe6\x99\x63\xf3\x04\xb1\xb4\xd5\x45\x3a\x6b\x48\xc7\x2d\x0e\xdb\x22\x21\xeb\x0a\xda\x27\x1b\x5b\x84\x33\x46\x49\x30\x4c\xda\x58\x6a\x39\xb0\xe7\x66\xd3\xe7\xe3\xba\xc3\x46\xea\xeb\xc7\xd9\x78\x7c\x23\x19\xbb\x47\x43\x84\xaf\xa2\x0c\x8a\x5b\x87\x5c\x6b\xb1\x34\xa7\x3c\xcf\x60\xc3\x5b\xa3\x39\x6c\xe4\xbc\xfb\x2f\x91\x7f\xb6\xe4\x48\x33\xbf\x32\x4d\xa6\xb5\x25\xc8\x5b\xcd\xee\xbd\x08\xff\x87\xf5\x04\xa3\xf5\x83\x2c\xdc\xa8\xc3\x4d\xed\x1b\x0a\xcc\xf2\xc4\x4e\x8a\xaa\x1d\x84\x8b\x18\xd9\x2b\xde\x0b\x27\x35\x56\x4f\xdf\x0d\x75\x84\x28\x89\x78\x82\x92\x22\x9d\xf7\x3b\x9c\xd7\xa4\xd3\x39\x8e\x67\x13\x9c\x06\xfd\x31\xee\xa2\x3c\x9d\x61\x53\xf5\x17\x4c\x70\x56\x9a\xaa\x5b\xca\xe7\x0d\x85\x41\x79\x8b\xa4\x1c\xde\xd9\x9c\x24\xde\x99\x96\xc5\x3b\x73\xa4\xf1\xd6\x8b\xbc\x56\x34\x62\xa2\x79\x9f\x67\xef\xa7\x9d\xb0\x67\x77\x49\xfa\xbf\x7b\x50\xde\xa3\x43\xc6\xfa\x42\xe0\x07\xd9\x75\x3c\x78\x07\xfb\x0d\x11\x79\xa1\x0b\x4b\x67\x4a\x4e\xf4\x4d\x56\xa4\x26\xb9\x69\x68\xd5\x94\x49\x02\x10\x2a\xcb\x80\xdb\x65\xb4\x0c\x38\xac\x0c\x46\x41\xba\x99\xd7\xea\x4b\x2b\x79\xf2\x69\x3a\xc5\x69\x2f\xc8\x70\x6d\x89\x7f\x86\xac\xca\x35\x7f\xc9\xb9\xf1\xf0\x99\x75\x67\x1e\x2d\x36\xee\x22\x8d\x2a\x8f\x88\xc6\x6b\x9c\x93\x0e\x99\x2b\x46\x08\x28\x4a\x82\x6d\xf1\xd6\x96\x62\x5b\x55\xf4\xf0\xcc\xf6\xa2\x0a\xdd\xee\x25\x8d\x4d\x91\x8a\xbb\xac\x83\x7c\xd4\x17\xeb\x65\x71\xd7\xef\x0e\x02\x86\x0a\x33\x27\x6b\x87\x68\xda\xf3\x05\x7b\x55\x31\x3d\xba\x9a\x12\xdd\x3e\xd8\x66\x52\xf4\x1b\x35\x7b\xf9\x39\xce\x17\x4c\x5e\x7e\x8e\x5d\xdb\xc9\xf7\x9d\xbb\xdc\x42\x1c\xd5\xb3\x97\xeb\x66\x73\x5d\x59\x1e\x35\x95\xe4\xa7\x67\xaa\x7a\x9d\x4c\x13\xab\xa2\x6d\x56\x15\x13\xa1\xcb\x53\xf6\x50\xe9\xd0\xf9\x00\x49\x07\x73\x2d\x66\x0f\x39\x62\x77\x1e\x8f\xd8\x8f\x47\xec\x7f\xf6\x11\x5b\xd2\x67\x32\x0e\x31\x61\x2c\x5d\x3d\x69\xff\x17\x1e\x0e\x53\x7c\x8d\x7e\x89\xc6\x83\x2f\x18\xbd\xf9\x1d\x0f\x87\xae\x68\x3d\x0b\x85\xf6\xd9\x0f\x52\x72\x84\x3f\x08\xe2\x01\x0e\xa0\xac\x2d\xa8\xcf\x2d\xe2\x00\xb1\x2a\xef\x82\x0b\xf4\x4b\x92\x84\xe8\xcd\xb9\xf3\x90\xdf\x2a\x0e\xf9\xff\xc5\xb8\xa9\xe2\x3c\xcc\x58\x6c\x59\x4a\x5b\x4b\xa0\x3a\x3d\x0b\xad\x2d\x05\x2d\x4e\xd3\x44\x0b\x1e\xb4\x4a\xdf\x51\x1b\x04\xba\xed\xec\xe5\xcf\x33\xb2\x31\x4e\x93\x38\x8b\xfa\x63\x4a\x60\xd3\x00\x9c\x48\xd0\x84\xdd\xf9\x90\xbd\x68\x9a\x26\x17\x51\x88\xd3\x4c\xd4\x0a\xc6\x59\x62\x56\x4d\xc6\x63\x52\x95\x50\x1b\xb7\x1e\x47\x71\x12\xd2\xaf\x51\x3c\x48\x26\x32\x64\x02\x8c\xe5\x14\xa0\x57\xae\x79\x34\xc1\x64\xb1\x45\x19\xf2\x51\x86\x07\x49\x1c\xc2\xee\x18\xc5\xe7\x63\x9c\x27\x31\x0c\x27\xe9\x5e\xc9\x41\x9f\xa3\xaa\x1c\xf7\xf9\x4b\xb4\x21\xba\x22\xe9\x19\x48\xdb\xa0\x01\xbe\x91\x5e\x72\x5c\x64\xad\x83\xf3\xf0\x47\x24\x94\x51\x9a\xc4\xc9\x2c\x1b\x5f\x43\x18\x0c\xc7\x3e\x4c\x3e\x59\xce\x23\x28\x0c\xf2\xc0\x79\x42\x56\x7b\xab\xa8\x3c\xe2\x50\xe9\x3c\x01\x23\x9f\xd4\x7e\x50\x7a\xaf\x24\x37\x4c\xe2\x2c\x21\x5b\x17\x21\x8a\x1a\x25\x8d\x95\xbd\xf8\x22\x18\x47\xe1\x21\x2b\x5f\x93\x65\x1e\xee\x85\x0d\x83\x21\x49\xf8\xea\x1e\xcf\xc8\x7c\x25\x4f\x0e\xe9\x3b\x40\x69\x85\xf6\xde\x83\x6e\x32\x63\x0b\xe9\xfc\xc2\x4e\xe5\x1b\xea\x5c\x51\x61\x96\x81\xe6\x57\xe5\xd0\x29\xde\x48\x94\xfd\x4c\xd0\x3d\xa2\x54\x88\x85\xa0\x26\x75\x33\x1f\xa5\xc9\x25\x52\xbb\xa7\x97\x57\xba\xc3\xba\x49\x3f\xad\x54\x3a\xf9\x07\x0b\xcd\x3e\x48\xb3\xa5\x24\xa0\x9f\x4b\x85\xf4\x33\x9f\x18\x00\xb8\x41\x11\x52\xf0\xdc\x4a\xb4\xc1\x53\x67\x4a\xb2\x71\x19\x75\xdc\x0f\x21\x98\x73\x4f\xe5\x7e\x06\xb2\x82\x3c\x4f\x3a\x85\xd3\x54\x17\xf1\x2d\xbd\x59\xd2\xcd\x6d\xc8\x9f\x02\x67\x11\x1a\x9b\x3f\x64\x46\x6d\xb9\x7d\x43\xc8\x65\xd9\x5e\x15\x12\xd4\x83\x73\xba\x8f\x0d\x36\x6a\x2c\x3b\x19\x90\x02\x6f\xc9\x77\x8b\x92\x89\xd6\xbb\x0f\xc2\x84\x16\xbe\x33\xc2\x04\x9c\x64\xea\xe4\x4c\xe6\x76\xa4\x98\xdd\x03\x2d\xaa\x34\xc8\xf5\x6c\x30\x1b\x35\xde\xca\x9d\x48\x2f\x9b\x47\x7b\x4a\x87\x04\xd1\xa1\x39\xdb\x1f\xce\xc5\xbe\x4a\xa4\x4d\x7e\x26\x64\x22\x9f\x41\x71\x39\x9f\x2a\xbb\x6a\xae\x94\x96\x44\x5d\x75\xd7\x77\x6e\xf7\xf3\x76\xee\x9c\x1c\xa9\x98\xe0\xa2\x23\x4a\xbe\x1d\x8a\x4f\x73\x39\x36\x8d\xfd\x7f\x03\xd0\xf6\xc2\xb9\x4b\xc6\xf2\x55\x98\x25\x71\x4c\xf2\x24\x4c\xd0\x60\x8c\x83\x78\x36\x45\x31\xc0\x27\x03\x2c\x8e\xed\x65\x43\x25\x61\x6f\x59\x79\x14\x49\x39\x20\x8a\x68\x5c\x1d\x4b\x22\x1c\x9d\xd2\xd2\x67\x44\x48\x22\xd5\xbb\x88\x02\x89\xc2\xae\x01\xa8\x6b\x03\xd9\x2d\x7e\xde\xf0\x9c\xd8\xab\xab\xfa\xe8\x2b\x0c\x80\x09\x60\xea\x6e\xce\x10\xaa\x89\x15\x3e\x67\x72\x93\xa9\x10\x4a\x89\x08\xca\xcc\x68\xe1\x74\x73\x1e\x91\x23\x5d\xa4\xeb\x8e\x49\x1d\xcb\x9c\x1b\x73\x5b\x3a\xf2\x02\x84\x4a\xa4\x50\x97\x77\x88\x5a\x96\x59\x06\xf9\xb5\x34\x3c\x05\xfe\x6c\x74\x6a\x4c\xa3\xfa\x05\x5f\x67\xb5\xa2\xee\x12\xd7\xf2\x42\xb2\x78\xf4\xe3\x8f\xc8\x35\x86\x84\x98\xd2\x13\xfa\xbe\xa6\x14\x7a\xad\x8e\xb3\x2e\x00\x97\x8c\x77\xb1\xfb\xa4\x98\xf0\x02\x22\xff\xf3\x61\x9f\xe0\xc1\x28\x88\xa3\x6c\xc2\x8f\xa1\xe5\xcc\x01\x00\x94\x0f\x2f\x6d\x43\x1e\xd8\x2f\x18\x4f\x45\xfe\x00\xde\xd9\xd5\x17\xbf\x67\xa3\x28\x26\x0d\x5d\x0d\x92\xc9\x74\x8c\xaf\xa2\xfc\xba\xdb\x86\x23\x19\x29\x40\x08\xa2\x46\x36\x87\x2f\xf8\x9a\x6a\x0a\xc4\x68\x4a\xe3\xb5\xba\x8a\x52\x3c\x49\x2e\x30\x0a\xc6\x63\xe8\x55\xe6\x21\x7c\x35\xc0\xd3\x1c\xc4\x7e\xf6\x4a\x2e\x9f\x8f\xf0\x35\x8a\x31\x1d\x91\x3e\x66\xf5\x43\xd2\xe3\x59\x30\x1e\x5f\xa3\xfe\x35\x0c\x19\x19\x1e\x96\x0a\x00\x68\xe6\x17\xb2\x21\x45\xf1\x79\x6d\x49\xda\x07\x6a\x3f\x28\xbd\x43\x5f\xbf\x12\x7c\x57\xa2\x38\xc4\x57\x07\xc3\x1a\xb8\x29\x12\x62\xfb\xfc\x7c\x09\x26\xff\xa5\xaf\x6f\x10\x12\x85\x7d\xc1\xd7\x67\x2b\x62\x25\xea\xe6\xd0\x26\x45\x92\xf2\x86\x69\xf2\xdf\x98\x3c\xe1\x94\x49\xe6\x7d\x40\x6d\x73\x51\x12\x57\xe1\x09\xd4\xa4\xb6\x8c\x26\x99\xc5\xb0\xa9\x02\x75\x50\x21\xea\x10\x70\x96\xce\x64\x38\x57\x7a\x4f\x00\x4b\xaa\x48\x0f\x0d\x56\xb6\x4f\x76\x3f\x1f\x1e\x7c\xf8\xb0\xf7\xf1\xdd\xe7\x93\xbd\xfd\xed\x83\x4f\x27\xf2\xf1\xa8\xca\x0c\x98\x42\x95\x22\x31\x3d\xc8\xd1\xd1\x94\xc9\x08\x5e\x5b\x41\x1e\xa0\x0d\x74\x7a\xf6\x5a\x7d\xbf\x07\xee\xc6\xfc\x75\xb5\xa5\x2a\x00\xae\x4c\x67\xd9\xa8\xa6\xd3\x3d\x13\xf1\x94\xd2\x7b\x61\x46\x0b\x7f\xc1\xd7\x4b\xc6\x18\x14\x00\x17\x18\xbc\x4a\xe2\xa6\x80\xcc\x1a\xe5\x4b\x6a\x12\x4c\x15\x26\x19\x01\xd9\x02\x43\x01\x12\x23\xa4\xa9\x0e\xd3\x7e\x30\x95\x54\x17\x92\x5e\x5b\xf5\x14\xa7\x82\x2b\x70\x8d\xa5\x3f\xf5\x31\xd8\x0f\xa6\xa7\x50\x2d\x82\x2d\x9e\x8f\xcc\x29\x14\x3f\x93\x3c\xd2\x45\xe3\x8a\xdf\x3c\x5a\x58\x66\x4e\x54\xa9\x59\x09\x6f\x72\x72\xb0\x75\xd0\xe5\x44\x86\xc6\xc9\xf9\x7f\xe8\x52\x75\xe2\x90\xab\xef\x2a\x49\x57\x50\x16\x64\xd6\xa3\x23\xfb\xb6\x32\x09\xa6\x35\x97\xb1\x02\xff\x03\xfb\xc5\xb0\x18\x65\x32\xf6\xec\xa8\x17\x85\xb2\xe3\x8d\xa0\x88\x2f\x18\x65\xb3\x14\xf4\xc4\x9c\x59\x45\x19\xca\xf2\x88\xd0\x03\xe5\xe4\x38\x44\xc1\x10\x1c\x84\xd2\x34\xba\x08\xc6\xda\x5e\xab\xc0\x24\x03\x02\x6e\xff\x74\x69\x44\xe1\x99\x8e\x62\xd1\xa5\x95\x41\x61\x0f\xa0\xd6\x11\x5f\x9c\x0e\x33\x5c\x77\x22\x7f\xba\x41\x78\xcc\xf4\xcc\x96\x1a\xc3\x60\x9c\x61\xf9\x96\x8d\xb9\x3d\xcd\x1d\x53\x91\xce\x9f\xb5\x89\x6e\x01\x83\xcc\x0b\xcc\xb8\xb4\x68\x1d\x87\xff\xd7\xc6\x78\xfe\x00\x35\x2b\x8c\x63\x75\xc5\x00\x52\x28\x4c\xea\x25\x54\x54\x47\x49\x5b\xec\xee\x61\x52\x71\x71\xeb\x19\x90\x7c\xc9\xe9\xca\xb8\x74\xa4\x07\xd5\x50\x6f\xbc\xb4\xd4\x4b\x66\xea\x0a\xa6\x90\x7e\xd6\x6d\x40\x68\x1f\xa6\x0c\x7f\xd6\x6d\x82\x1b\xea\x5a\x95\x3b\x32\x16\x73\x13\xe7\x79\x14\x9f\xdb\x3d\x7b\x81\x31\x85\x52\xe6\x5a\xb4\x21\x7c\xd6\x5e\x1b\x25\x8a\x48\xcf\xc2\x3e\xc8\x15\xb4\x88\x35\xca\xfa\x4d\x50\x5e\x7f\xbc\xd6\x7b\xbc\xd6\xfb\x87\x5f\xeb\xb1\x88\xbe\xec\xd4\x72\x9b\xa8\xbe\xf3\xcc\x61\x1d\xb9\x2f\xb4\xd4\x17\x8b\x18\xce\xf2\x25\xed\xb3\xc3\xc1\x66\x18\x66\x30\x74\x62\x77\x0b\x62\x50\x4b\x65\x68\x46\xc5\x2f\xe6\xf4\xe6\x11\xe1\x2b\xca\x21\x52\x1e\x82\xa4\x00\x74\x53\xa5\xbb\xfd\xd3\xa7\xf2\xf9\x80\x9d\xcf\x9e\xea\x4a\x22\xb2\x6d\x3e\x65\xd7\x56\x52\x39\x89\x57\xd1\x38\x3d\xdc\x95\x8e\x94\x4b\x62\xe6\x70\xa5\x70\x34\x26\x37\x91\xb1\xb7\xa8\x1a\x5d\x42\x11\xdd\xb7\x79\x4f\x33\xcb\x66\x61\xb3\xc7\xe1\x7f\xea\xbe\xa5\x6f\x4f\x2e\xdd\xa5\xb0\x10\xe4\x81\x88\x00\xe5\x1f\x7f\x04\xdc\xa9\x62\x2a\x8a\xcf\x81\x1b\x2f\x29\x10\xf9\xf5\xc5\xbc\x94\xa6\x14\xa2\xec\xa5\x7c\xdb\x4e\x0a\x69\x68\x1c\x64\xd0\xcc\x71\x4e\x26\xfb\x87\x8d\x0d\x63\xa0\xf9\x9f\xf1\x62\x75\x95\x66\x6e\x57\x48\x0a\x96\x5a\x9e\xce\x88\xcc\x96\x66\x39\xca\x12\x6a\xe7\x38\x9d\x02\xeb\x86\xb3\x73\x10\x5f\xe7\xe4\xc0\xef\xa1\x3e\x1e\x12\x06\x40\x97\x38\xbf\x42\x85\xd1\xa0\x4a\x46\xed\x2f\x1a\xd6\x7e\xb0\x60\xfd\xe3\x8f\xc8\x36\xf2\x4b\x46\x7d\x64\x5e\x37\x10\x54\x2d\xee\xd1\xce\xce\x26\x94\x6f\xc6\xf8\x2a\x47\xbd\xc3\x4f\x68\x70\x3d\x18\x63\x4f\x74\x13\x86\x5d\x6c\x36\xd0\x13\xe8\x32\xb3\x59\x9a\xa6\xc9\x80\xf0\xac\x8c\x8e\x8e\xd1\x8a\x74\x0c\x16\xcb\xc4\x36\x17\x96\x8e\x30\xd2\xd0\x4b\xdd\x78\xa8\x5e\xa5\x7f\x96\x61\xa5\xa4\xe0\x12\xcd\x24\x63\xb0\xa7\x02\x80\x6e\xc6\x26\xe9\x62\x6b\xa6\x1d\x94\x23\xdd\xaf\x6e\x09\x75\xe3\x15\x42\xf8\x5e\xe8\x15\x6c\x82\xbd\x97\x75\x48\x54\x67\x00\x9c\x85\xac\x13\x6e\x27\xb9\x67\x4d\xcb\xe9\xcc\xb5\x59\x6e\x32\xaf\xc9\x7f\x48\xd6\x35\xed\x11\x39\x5a\x52\x4e\x2d\x53\x2e\xbc\xbc\x2c\x95\x13\xeb\x55\x3a\xe9\xc3\x87\x20\x0c\x85\x6d\x97\x94\xf7\x53\x7c\xd7\xa7\x47\x3a\x38\x48\x2c\x96\x1b\x6f\xc1\x7b\xc9\x56\x9c\x0a\x74\x62\x24\x64\x4b\xdf\xa2\xdd\x52\x8b\xc5\x68\x58\xbc\x52\xb5\x52\x05\x0b\x02\xad\x82\x86\x7c\x25\x24\xe4\x59\x74\x4b\xb4\x06\x81\x09\x95\x73\x4d\x9a\x83\xa5\x92\xd1\xb6\x4a\xb5\x02\x21\xb7\x01\x1b\x91\xd5\xd5\x6c\x17\x44\xf6\x7d\xcc\x51\xfa\x28\xfb\xfe\xd3\x65\xdf\xc2\xa4\x8d\x27\xec\xbd\x2f\x1f\xdd\xbd\x7e\x10\xab\xd2\x6e\xd4\x0f\x84\xeb\x2d\xbe\xa2\xea\xea\x32\xd7\xdd\xe3\x49\x90\xe6\xdb\xac\x60\xe1\x76\xeb\xbc\x1a\x03\xb5\x12\x34\xcb\xfb\xa2\xe9\xbc\xa5\xd7\xe2\x12\xec\x38\x4f\xa3\xf8\xfc\x06\x5c\x5b\x6c\xef\x89\xb4\xdc\x0f\x62\xf9\xd3\xcf\xc1\x78\x86\x6f\xd0\x05\xf9\x87\x5d\x87\x10\xc8\x43\x9c\xe2\x39\x37\xa4\x9e\x6a\x5e\x00\x41\x6a\x18\x4e\xaa\x58\x9c\x8f\x3c\xc0\x88\x48\xeb\x1e\x6d\xc9\xdc\xc2\x40\xed\x46\x47\x19\xb2\x4d\xf7\x83\xb8\x96\x27\x4b\x4c\x55\x04\x3a\x1c\xf2\x99\xab\x7c\x6a\x16\x2b\x22\x52\x0f\xd2\x44\xd4\x9e\x47\x54\x7d\x43\x21\x32\x3f\xdd\xe7\xa6\xfe\x98\x41\xdc\x8a\x52\x22\x8b\xd9\x1c\x62\x78\x8f\x4e\x12\xe6\xd9\x2b\x77\x07\xaa\x33\xe8\xb5\x25\xb3\x6b\xbc\x3d\x21\xc7\x40\x37\x6c\x92\x2e\xb8\x48\x08\x4f\x69\x9c\x8f\xe4\x94\xe0\xb5\x25\x68\x84\x61\x1b\x67\x79\x94\xcf\xa8\xc0\x65\x9a\x7f\x85\x78\x9a\x64\x51\x2e\x63\xc9\xe0\x0a\xf4\x00\xcc\x60\x1c\xe1\x38\xd7\x2d\x31\x2a\x37\x6c\x98\x58\xf0\x54\xe3\xe6\x08\x2e\x8a\x91\x39\x7e\x5c\x05\x5f\x7a\x95\x2c\x48\x6f\x38\x8b\x43\xb0\x89\x1c\xe0\x34\x0f\x22\x31\xfd\x8e\xe5\x23\x26\x76\xb1\x75\xf4\xe0\x4b\x48\xe0\x75\x8b\xb5\xc4\x46\x9e\xcc\xa6\x96\xf1\x4b\x92\x6d\x85\xf7\x7a\x9e\x14\x12\x2d\x01\xdd\xa5\x0d\x48\xb4\x39\x9e\xe1\x2e\xfd\x87\x8b\xb9\x5a\xb2\x77\xe7\xac\xb0\xc9\x2f\x26\x05\xe2\xda\x47\x03\xc4\x39\x21\xe2\x1c\x12\xd5\x26\xb3\x2c\x87\xad\x0e\x4f\x70\x9c\x0b\xba\xe9\x5f\xe7\x38\x6b\x36\x96\x98\x30\xfe\xc3\x92\x36\x91\xac\xdc\xbd\x4f\x1f\x99\x04\x6d\x02\x79\x7d\x4a\xaa\x68\x16\x47\xff\x3d\xc3\x28\x0a\x71\x9c\x47\xc3\x48\x65\xc5\x95\x26\x9b\x0f\x4f\x85\x29\x86\x26\xed\x6c\x33\x80\x6d\x47\xda\x84\x5e\xeb\x54\xc0\x07\xb9\x16\xf4\xa3\xa5\x95\x20\x27\x9c\x75\x85\x0f\x30\x07\xfd\xe7\x5d\xa9\xc0\x10\x56\xf9\x30\x5a\xa3\x20\x98\x1b\xe2\xb3\x6e\x93\xc8\xae\x3c\x73\xff\xcd\x99\xd7\xae\x94\x2b\x99\xa9\x77\xdb\x95\x12\xb6\xbd\x96\xb5\xf0\x09\x11\x30\x86\xc1\x20\x4f\xd2\x6b\x8f\x6a\x94\xc9\xc0\x3e\x21\x7c\x9a\xc8\xfa\xc9\x10\x89\xde\x6c\x6c\xa0\x67\x34\x22\xd3\x33\x28\xf3\x64\x75\x15\xf5\x92\xc9\x24\x89\xff\xeb\xf8\xe9\x93\x27\x46\xe7\x8b\x5f\xac\x01\x8e\x53\xed\x19\x19\x86\x14\x3f\x5b\xf2\x90\xf4\x0a\xc7\x83\x97\xfd\x20\xc3\x9d\x96\xf6\x61\x12\xb6\xf5\xa2\x17\xd3\x2f\xe1\x50\x7b\x39\x88\xa6\x23\x9c\xbe\xa4\x90\x97\x5e\x3f\x7d\x72\xf3\xf4\x09\x1e\x67\x18\x49\x9d\xa1\x1a\x73\xda\x17\x3e\x0c\xcf\xd0\x8f\x3f\xb2\x0f\x2b\xc1\x24\x14\x7d\xdb\xdc\xdf\x7a\xfa\xe4\x09\xfd\x50\x3b\xe5\x38\x7b\x48\x45\x15\x9e\x09\x86\xf4\x03\x45\x0c\x7e\xcb\xf8\x9c\x89\x51\x96\x11\x63\x0d\xd1\x70\x18\xa8\xd6\x4f\x93\xcb\x0c\xa7\x4b\x4f\x9f\x3c\x11\x23\x96\x24\xf9\x4a\x2f\xbd\x9e\xe6\xc9\x7f\x1d\xd3\xaa\x37\x70\x7c\x92\xf7\x1f\xf1\x1d\xfd\xf9\xf4\xe9\x93\x9a\x7a\x1e\x7b\x82\xa8\x4a\xe4\x78\x94\xa4\xf9\x60\x96\x67\xf4\x0d\x59\x36\x3d\xb4\x81\x78\xdd\xd7\xd2\xeb\xcf\xe3\xa8\x4f\x3e\xad\x8c\xa3\xbe\xf4\x1e\xb4\x61\x3d\xe8\x14\xf9\x4a\x4a\xad\x48\xef\x14\x08\xc1\xf8\x3c\x01\x10\xe4\xc7\xeb\xa7\x02\x8b\x0f\x49\xf2\x65\x36\x45\x79\xd0\x1f\x63\x09\x93\xe3\xb7\x07\xbf\xb2\x43\x9f\x78\xb7\xf7\xf1\xe7\xcf\xb6\xf7\xc7\x9f\xde\x7e\xde\xdf\xfb\xf5\x73\xdd\xf5\xc1\x77\x7d\x68\xb8\x3e\x34\xad\x6d\xbb\xda\x91\x3f\x1a\x6d\xc9\x1f\x8d\xf6\xe4\x8f\xbc\x4d\x31\x34\xbd\x64\x32\x25\x27\xc5\xb1\x39\x44\xb6\x29\xd5\x6a\x85\xc9\xac\x4f\xc4\x7e\x52\xab\x28\x00\x2c\x56\xc6\x02\xc9\xa6\x0a\x11\x84\x13\x44\x11\x7a\x83\x1a\xed\xce\x6b\x14\x2d\x2f\x2b\xe0\x85\x90\x88\xde\x20\xbf\xb1\x6e\x7c\x23\x7f\xe1\x69\x74\x86\x36\x08\x8c\x37\xc8\x7f\xad\x7e\xa7\x77\xa9\x25\xb5\x6a\xb4\xda\x12\xfa\x0d\xd5\xaf\x7c\xbf\xaf\xd7\x2f\x1e\x6f\x9e\x2a\xbd\xfe\x25\x18\x7f\x41\xef\x76\x6a\x8d\xdf\xd6\x97\xd4\xde\x5e\xd1\x10\x89\xea\xbb\x48\x7b\xb9\xd0\x08\x48\x83\x9c\xf5\x93\x2b\xf5\x23\x58\x1a\x90\x36\xaf\x22\xf4\x1b\xaa\x5d\x15\x1d\x62\xbf\x1b\xd2\xef\xa6\xf4\xbb\xb5\xa4\x75\x16\xa0\xd4\xb2\x2b\xf4\xd3\x4f\x3f\xa1\x75\x28\x99\x5d\xa1\x1f\x51\xfd\x6a\x38\xa4\x03\xd4\x69\x6a\x55\xc8\xea\x38\xbd\x22\x03\x99\x5d\x69\x9f\xf8\xe2\x39\xcd\xe0\xfb\xd5\xeb\xa7\xce\x4e\x4d\x66\xe3\x3c\x9a\x8e\xa3\x01\xa8\x09\xcc\xee\x5d\x11\x32\x0e\x4f\xaf\xce\x5e\x5b\xbe\xb5\xe8\xb7\x86\xf5\xe3\x3a\xfd\xd8\x3a\x2b\x69\x3d\x9b\xf5\x11\x08\x38\x1e\x9a\x44\x57\x68\x90\x8c\x67\x93\x38\x53\xa8\x5f\x86\x49\x24\x85\x5a\x08\xbd\x7a\x41\x68\xa6\xee\xf3\x91\x62\x8f\x75\xbf\x5e\xd7\x87\x56\xac\x64\x3a\x58\xb5\x1c\x26\xa6\xb5\x84\xbe\x92\xdf\x74\xbc\x1d\x55\x7c\xb9\x8a\xdf\x91\xaa\xf8\x1d\x57\x9d\x86\x5c\x67\x7d\x09\x15\x75\x1a\xc6\xac\x0b\x6e\x40\xeb\xe4\x25\x23\x15\xc5\x17\xf2\x68\x91\xc7\xca\x23\x76\xb5\x2e\x8d\x0f\x23\xcf\x16\x7b\x55\xe7\x2f\x1a\xca\x90\x96\x8e\xa8\xc2\x1f\x19\x8d\x55\x19\x56\x85\x75\x2a\xf5\xe6\x8c\xad\xc2\x56\x95\x8a\x73\x06\x58\x61\xb9\xac\x62\xd9\x28\xc3\x6d\x01\x28\x82\x71\x6a\x72\xc2\x1f\xae\xac\x4c\x90\x31\x80\x8d\x05\x38\x20\x54\x69\xa0\xdf\x50\x78\x4a\xfe\x77\xb5\x8e\x7e\x43\x57\x8d\xb3\x33\x7d\x21\x41\xd9\x08\xfd\xb6\x01\x05\xaf\x22\xa3\x80\xc2\x24\xe1\xe7\x0d\x1c\x6a\xc5\xbe\x72\x98\xe2\x01\xed\x5c\x88\x8e\x06\x49\xcc\x36\x98\x62\x57\x3a\xea\x1d\x7c\x24\x7b\x44\xfd\xaa\x5e\xf7\x50\xfd\xaa\xee\xc3\x7f\x1b\xf0\xdf\x16\xfc\x77\xdd\x03\x5a\x20\xff\x6d\xc0\x7f\x5b\xf0\xdf\x75\xf8\xaf\xdf\x27\xff\x6d\x76\x8a\xcd\xec\xc5\x0b\x86\xd4\x0b\xb4\xb9\x7d\x4c\x03\xb2\x23\x2a\x0e\x21\x22\x10\xa4\x51\x3e\x9a\xac\xf0\x32\xab\x05\x2a\xa4\xf4\x06\x13\x1f\x56\xe8\x83\x24\x61\xac\xe0\xab\x9c\x86\x0f\x10\x5d\xfe\x1c\x26\x47\x38\xc3\x79\x17\x39\xb6\x48\x36\x08\xc7\x5f\xa2\x29\x33\xfd\x4d\x86\x28\x3e\x4a\xe0\x38\x36\x0a\x32\xd4\xc7\x38\x06\xf7\x00\x76\xc1\x15\xc4\x21\xd8\xf0\x85\x51\x88\xe2\x24\x67\x76\x98\x26\x29\xd0\x6c\x2e\x1c\x12\xb7\x17\xfd\xfc\x05\x5f\x1f\xa6\x51\x92\x1e\x51\x13\xe0\x8d\x8d\xe2\xbd\x95\x74\xb8\x5d\x98\x36\xa7\x66\x07\x54\xf1\x8d\xff\x71\x8b\xc3\x0d\x7b\xf3\xc5\x5b\x0b\x7f\xfe\x82\xaf\x7f\x49\x52\xb0\x62\xfc\x82\xaf\x57\x2e\xc9\x6f\x7b\xb1\xe3\xe8\x0f\xcc\x4a\x65\xd1\xf9\x5b\xc2\x80\xd0\x2a\x6a\x95\x2d\x23\xe1\x08\x90\xc2\x00\x99\x60\xf9\xc8\x71\x1c\x8b\x67\xde\xe0\x32\xea\x54\x6a\x81\xf4\x3f\x1b\x8c\x30\x39\x7e\x20\x22\x42\x5b\xfa\x90\x1d\x25\x97\x04\x76\x8d\x37\xb3\x4c\x76\xe9\x17\xa5\x7d\x90\xe1\xda\x87\x85\x37\x2a\x8d\xb3\xf4\xee\x54\x5f\xaa\x85\x8d\x28\x41\x87\x8a\x1e\xf4\xe7\x1b\x86\x21\x7b\xb6\x48\x21\x88\x91\x9d\x28\x4f\x07\xc9\x5a\x8e\xfc\x49\xa8\x9c\x42\x9d\x33\x3a\xb2\x30\xe3\xec\x8d\x85\xd5\xb8\x19\x16\x92\xf6\x13\x03\x38\x84\xd3\xd1\x87\x52\x46\xfb\x07\x86\xf8\xbf\x04\xe2\x4e\xcc\xd9\x2c\x1c\x25\x39\x22\x24\xe9\x2e\x94\xcb\x7b\x80\xba\x05\x94\x42\x3e\x9e\xf5\xab\x40\x06\xf1\x89\xc3\x3c\x93\xf6\x36\xf8\x50\xec\x54\x4c\x46\x3b\x93\x76\x31\xb9\xc4\xba\x52\x00\x30\x65\x90\xd9\xeb\x39\xd8\xee\x47\x57\xc0\xb6\xcb\xb0\xfd\x6d\x03\x98\xf8\x29\x1b\xe4\xd5\x82\x3a\xbe\xa2\x3a\x43\xdd\x32\xd9\xa8\x98\x70\x20\x2d\xb6\xee\x7e\x42\x1d\xc2\xcf\xb4\x09\x43\x1b\x1b\xa8\x35\x6f\xd2\xbe\xbb\xa1\xb5\xf7\xd9\x31\xe2\xae\x35\x63\xd0\x3a\x1b\x92\x33\xf4\x1b\x91\x25\xcc\x45\x34\x97\x9b\xcb\x32\x5d\x39\x9b\x89\xe2\x8b\xf7\x16\x4e\x63\xbc\x76\x33\x1b\x52\xb4\xe0\x37\xe2\xa9\x60\x39\xfc\x95\x83\xeb\xc8\x0c\x8b\xf1\xd1\x97\xa2\x8e\x8d\x78\xe1\xc8\xc8\x9b\xf9\x57\x09\xd1\x38\xd9\xc9\xfd\x72\xa6\x96\x15\xdc\x3c\xc4\xdf\xa0\x16\x78\xb2\xd0\x87\x32\xda\x57\xe7\xe2\x94\x43\x60\x92\xe6\x82\x1d\x29\x01\xa6\x0a\xdd\xea\x1a\x22\xa4\xa8\x0a\xd7\x8e\xa5\x74\x86\x7e\x73\x2f\x4e\xc7\x9f\x2a\x7c\xdb\x57\xa0\x8e\x40\xf3\x54\x5d\x8a\xf6\x39\x70\x4a\xb2\x9e\x34\x3d\x38\x1e\xa4\xd7\x53\x6a\x1a\x2b\xcb\x79\xfb\x1e\x4a\x86\xc3\x0c\xe7\xc6\xcc\xd0\x35\x12\x26\x3d\x51\xaf\x28\xec\x99\x7b\xb5\x57\x9c\x10\x8b\x9f\x7e\xf1\xb3\x51\xfc\x6c\x7a\xc0\x62\xe4\x53\x86\x82\x6b\x88\x17\xc5\x95\x70\xcd\xcb\x60\x8a\x1a\x71\x08\xb2\x67\x2b\x1f\x39\x84\x18\x42\xdf\xfb\xa7\x14\x0c\x91\x5f\xf4\x21\x55\xbe\xa9\x65\x9b\x25\x65\x9b\xd6\x23\x51\x95\x21\x54\x69\xd5\x53\x09\x54\x7d\xf4\xd5\xc7\x86\xfa\xd8\xf4\x84\xc2\xc2\xd8\xbc\x57\x57\xd1\x1e\x39\xf9\x7e\x17\x63\x64\x9f\x74\x65\x98\xac\xb3\xee\xa1\xbb\x91\x9b\x8d\x68\xd8\x81\xa0\xb2\x64\x6d\x19\xd8\x77\x98\x05\x0b\x85\x1b\x49\x2a\xaa\x13\x4c\x2d\x3a\xae\xba\x34\x58\x67\xf0\xfa\x37\x85\xd9\xd6\x6d\x1a\xa0\xcc\xd7\xa7\x43\xab\x65\xcc\x0f\xd4\x6a\xa8\xb5\x1a\x7a\x2d\xab\xb6\x29\x6b\xea\xd3\xa9\xd5\x6a\xda\xd4\x50\xef\xb5\xb3\x83\xfd\xe8\x2f\x6f\x81\xb6\x13\xc3\x91\xe5\x8c\x23\xf6\x5f\x3a\xaa\x1b\xc8\x7f\xcd\x7e\xbe\xe1\x33\xc4\x5e\x38\xf6\x5d\x98\xe3\x68\x98\x03\xa5\x7b\x0e\x45\x59\xe9\xc4\x71\xd4\x73\x32\x79\x92\xba\xa6\x2e\x24\xaf\xdf\x24\x45\x57\x2d\xf3\x0d\xb9\xeb\x37\x49\xa9\x55\xcb\x1a\xba\xd4\xf5\x9b\xa4\xbf\xca\x9a\xd2\x6b\x63\x1b\x5e\x5e\xb6\x6d\x00\x80\x9c\xaf\x22\xe7\x3b\x90\x6b\xcc\x41\xae\x59\x8a\x5c\xfd\x96\xc8\x35\x54\xe4\x1a\x0e\xe4\x9a\x73\x90\xab\x97\x22\xe7\xdf\x12\xb9\xa6\x8a\x5c\xd3\x81\x5c\x7d\x0e\x72\x7e\x29\x72\x8d\xb9\xc8\x59\x49\xf7\xd3\x14\x8c\x88\xb2\x3c\xc8\xb1\x59\x00\xd8\x49\x5e\xb7\x74\x0c\x58\x46\xae\xeb\xd1\xe0\x0b\x99\x8b\xbc\x61\xfb\x42\x06\x22\xd7\xb5\xe3\x56\x25\x8a\x75\x3d\xcd\xe1\x7d\xb0\x7c\x6a\xf4\xe4\x21\xad\x1d\xfd\xd4\x62\x59\x3e\xfa\xb1\xc5\x5c\x41\xca\xb9\xa5\x58\x42\x4b\xd5\x28\x41\xac\x1f\x8e\x9d\xef\xc6\xce\x5c\x3f\x06\x76\xc6\x12\x52\xb1\xab\xdf\x06\xbb\x86\x84\x5d\xc3\x8d\x9d\xb9\x80\x0c\xec\x8c\x35\xa4\x62\xe7\xdf\x06\xbb\xa6\x84\x5d\xd3\x8d\x9d\xb9\x82\x0c\xec\x8c\x45\xa4\x62\xd7\x98\x8f\x9d\x49\xad\x98\x47\xb6\xb6\xcb\x25\x74\x1b\xb6\xac\x23\x5d\xc8\x31\x96\x93\xba\xb9\x5a\x56\x95\x21\xfa\x34\x5d\xb2\x0f\x3b\x0a\x77\x51\xa3\xdd\x59\x6d\x36\x98\x06\x7a\xc9\xa6\x0a\xe6\x12\x8b\x10\x90\x32\xe6\x39\xcc\x54\xc3\xcf\x33\x96\xf0\x09\x41\x0e\xef\x61\x30\xc0\x42\x47\x2c\x80\xfc\x27\xbe\x0a\x26\x53\x71\x52\x2e\x3e\xf0\x39\xa5\xb0\x72\x7c\x95\x4b\xb7\xdb\x2b\x9b\xdb\xc7\x2b\xec\x1c\x51\x9b\x70\x93\xf4\x2f\xf8\xda\x43\x83\xe1\xb9\x90\xe6\x0b\x28\xd3\x71\x40\x90\xb8\xca\x91\x0e\x85\x49\xf8\xb5\xa2\x1d\x1b\x20\xa6\xd3\xee\x59\x94\xd8\x9f\x69\xd8\xd4\x5d\x3c\x9e\xe2\xb4\xb6\xb9\x4d\xaf\xf5\xa9\xce\xfe\xe9\x13\x66\xb3\x22\x37\xf9\xfa\xe9\x53\x08\x81\x0b\x06\x24\x8a\x55\x41\xb7\xdd\xf0\xb8\x5d\x42\xb7\x0d\xb6\x23\x92\x65\x42\xb7\xdd\xf2\x0a\x93\x84\x6e\x1b\x7c\x18\x27\x61\xfb\x59\xb7\xe3\xdf\x9c\x79\xed\xc6\x9d\xac\x45\xbe\xa5\x99\xc8\x83\x19\x73\x7c\x43\xb3\x0c\xba\x12\x5e\x20\x66\x40\x41\x9a\x47\x83\x64\x32\x4d\x62\x88\xb9\x4e\xbe\xad\x3e\x7d\x22\xe6\x7d\x1c\xf5\x57\x58\xd1\xaf\x5f\x65\x03\x00\xe1\xf5\x79\xcf\xc6\x1d\x41\x86\x0b\xab\x8e\x20\xc3\xd2\xb7\x5f\x92\x34\x04\xbf\x74\x51\x40\xbc\x91\x21\xcc\x86\x60\xf0\x07\xb4\xbe\xc9\x6f\x79\x0a\x98\xd6\xcf\x0a\x66\x18\x5c\xab\x7a\x64\xa1\x4a\xef\x3f\xe5\xc3\x75\x80\x82\xe3\xc1\x0a\x79\xd0\xb0\xee\xb4\xc4\x57\xfa\x58\x66\x88\x22\xbe\x6c\x5f\x4c\xdf\x6f\xed\x14\x97\x4d\xf4\xd9\x7a\x83\xd5\xcf\xa8\x7d\x1e\x59\x56\xfc\x16\x2b\xc7\x93\xe9\x38\xc8\x6d\x0c\x4a\x44\x99\xfe\x33\x66\x11\x79\xb8\x06\x15\xbc\x0a\x04\xaf\x03\xbd\x5f\xf4\x07\x5e\xe1\x11\x26\xbb\xa8\x85\x6a\x7e\x63\x1d\xf5\xa3\x3c\x5b\x2a\x03\x18\x5d\x58\xe0\xed\xfd\x7c\x5b\x70\x9f\xb7\x3f\xf6\x3e\xff\xba\x73\x70\xb4\xff\x79\xff\x60\x6b\x1b\x6d\x42\x6c\x83\x3c\x88\x73\x94\xe2\x69\x8a\x33\x1c\xe7\x51\x7c\xce\x15\x31\x84\x0c\x27\x49\x58\xf4\xdd\x0a\x73\x6b\xbb\x12\x4c\xc6\x4e\x0d\x98\xd2\xa5\xa0\x66\x72\x24\x1e\xed\x14\x65\xb9\x24\x2c\x66\x93\xa2\xdb\x03\xbf\xef\x59\x0a\x06\x0f\x22\xc9\x87\x5c\x44\x29\x2e\xf5\x4e\xd0\x3d\x99\x03\x74\x32\xc2\x64\xd4\xf3\x04\xcd\x98\x9f\x00\x61\x01\x88\x14\x06\xd0\x0a\xc8\xd5\xe2\x61\x30\x3c\xef\x02\xe9\x72\x5c\x97\xe4\x1d\xd5\xc0\x16\xb6\x8b\x8c\xc2\x66\xe4\x17\xc5\xae\xc9\xb0\xa1\x4f\xed\x31\x25\xdc\x09\xe9\x11\xe4\xbf\xe0\xeb\x15\x6b\x59\xee\x1a\x3a\x18\x9e\xa3\xda\x01\xb4\x12\x8c\x97\xa0\xce\xc0\x36\x78\x15\xc7\x40\x6d\x8b\x07\x12\xa5\x13\x7a\x43\x48\x84\xf7\x8e\x10\xca\xa0\xac\x4f\xe4\x5c\x11\x0d\xdc\xdf\x55\x29\xc1\x2c\x80\x14\x69\x41\xde\xe3\xf9\xd5\xf3\x0a\xdd\xa6\xb7\xe9\x30\x27\x69\x8d\x5d\x9e\xc1\x10\x7a\xe8\x4f\x14\x5d\x74\x51\x74\x51\xf0\xc6\x1b\xc5\xf4\x40\x99\x6f\x15\x52\x57\x89\x0b\xc5\x24\x07\x5d\x03\x20\x67\x0e\xa1\xf5\xd9\x8d\xb3\xba\x56\x2d\xb2\x87\x2e\xa1\x55\xa4\x27\xc7\x42\x7c\xa4\xa7\xfb\xa5\xa7\x2d\x7c\x5f\xf4\x24\x20\xdd\x8d\x9e\x54\x3e\x7d\x0b\x7a\xda\x8b\xa3\x3c\x0a\xc6\xd1\x1f\x38\x43\x01\x8a\xf1\xe5\xf8\x9a\x61\x18\xb2\xe1\x98\x4f\x4b\x7c\xd7\xb8\x1a\x26\xe9\x64\x3f\x09\x31\xda\xa6\xce\x6a\x10\xa7\xb9\xe0\x74\x49\x2a\xd3\x29\x58\x57\x83\x9f\x1f\xa7\x5a\xb1\xc9\xd8\xc9\xf0\xbb\x23\xd9\x7b\x23\xab\x9a\xf9\xc1\xc6\x29\x6e\x49\x70\x51\x1c\x29\x16\x36\x62\x9a\x24\x72\xb1\xa8\xa8\x37\xa7\x53\x42\x0b\x30\x5a\x3c\xdd\x74\x66\xb9\x66\x20\x43\xbc\x21\x7e\xf2\x4d\x91\xd2\xa0\x79\x2a\xce\x89\xe4\x4c\x0d\xeb\x93\x74\x42\xa7\x3d\xb0\xe9\x6e\x28\x7d\x17\x24\xb5\x51\x90\xd7\x6b\x5b\x49\x6a\x47\x03\xb6\x32\xd6\xb3\x78\x44\x09\x9d\x7a\x00\xd8\xfa\x01\xf6\x45\xb5\xca\x0b\x07\x6c\x74\x54\x3e\x0c\xc1\x1c\x32\xd1\x12\x68\xcf\xee\x48\x3e\x6c\x09\x9a\xb8\x29\x33\x9c\x56\x31\xa2\xa2\x46\x45\x61\x90\x07\xa8\x0f\xb2\x97\x5a\xc2\x21\x8f\x01\x68\x9a\xea\x82\xbb\x3b\xeb\x80\x0f\x71\x0a\x73\x39\x48\xe2\x41\x8a\x73\xfc\x92\x0d\xc7\x38\x39\x57\x98\xb2\x74\x2f\x75\xb4\xd8\x58\x43\x40\x0d\xc0\x9c\xfa\xb7\x30\x9e\x82\x43\x89\xa5\xe0\x70\x81\x4d\xef\x6b\xc6\x5c\x61\x08\x50\xa6\xec\x24\xbc\x81\xb7\xc1\x1a\x90\xc0\x57\xd8\xb9\x24\xfe\x24\x60\xd1\xa8\x59\x2c\x1a\x41\x14\x9f\xdf\x03\x37\x29\x3a\xbf\xc1\xc9\x83\xc1\xaf\x3d\x27\x6d\x3e\x57\xc9\xa4\x4a\xbd\x4b\x8e\xb9\x93\xc2\x58\xc9\xae\x16\xe6\x95\x0e\x9d\x83\x7b\xe0\x38\xb4\xcd\x7e\x00\x5f\xe4\xea\x36\x9a\xa2\xed\xa1\xe0\x22\x88\xc6\x41\x7f\x8c\xa9\x19\x62\xe6\xde\x16\x3f\xf3\xce\x54\xa6\xaa\x9d\x28\x66\x1b\x5f\xe9\x3e\xc5\xe0\xaa\xfb\xcc\xc7\x24\x67\xee\xd1\x34\x6a\x1a\x85\x54\xec\x1a\x28\xca\x10\x1e\x0e\xf1\x20\x8f\x2e\xf0\xf8\x1a\x05\x28\xc4\x59\x9e\xce\xe0\xd9\x43\x29\x0e\xc2\x97\x49\x3c\xc0\x95\xf6\x99\xaa\xd4\x0b\x68\x3c\x14\x0d\x53\xe0\x0f\x4d\xc9\x7c\x24\x6b\xd5\x89\x58\x54\x59\x94\xfa\x45\xc5\xf9\xe4\xcf\x8b\x56\xa7\xff\x9d\x62\x2e\x66\x50\x48\x2d\x11\x0d\x4b\x01\xa0\xd2\xd5\xa2\x14\xb5\x5c\x94\x2c\xc0\x90\x21\x20\x12\x41\x95\x2d\x38\x1c\xb2\x80\x99\x9c\x53\xef\x48\x13\x62\x5d\x7c\x66\xed\xb9\xca\x66\xbf\xb1\xbe\xda\x6c\xc8\x9f\xa8\x4a\xc4\xf6\x45\x93\x83\xba\xc8\x57\xbe\xaa\xf2\x6f\x17\x35\xaa\x9c\x9d\x32\xab\x2a\x3b\x98\xaf\xc8\x46\xce\xb5\xc9\x4f\x2d\x6c\xa4\x4f\x46\x58\x12\x0a\x58\xa6\xad\x00\x8d\x40\x6b\x4c\x84\xcc\x0a\x4b\x91\x8b\xb0\x9b\x31\xc7\x07\x22\x0c\xf0\x65\x4d\x84\x26\xb6\xae\x2d\x1d\xfa\x06\x87\x25\x66\xed\x6d\xaa\x3c\x35\x1d\xb9\x21\xdb\x3a\x57\x99\x52\xaf\xeb\xf4\x9b\x22\x7f\xe2\x53\x86\xc7\x78\x90\xd3\x86\x8f\xf3\x34\xc8\xf1\xf9\x75\xcd\x65\xae\x2d\x69\x9f\x41\x5c\xdc\x40\xcf\x29\x2b\x7d\xee\x34\x0f\x63\xb3\x71\x18\x64\x19\x61\x13\x6f\x83\x0c\x87\x8a\xc7\x9c\xfc\x57\x6e\x1c\xc6\x40\x1d\xe3\x14\x0e\x5c\x64\x57\x73\x43\x2a\x5f\xe4\x7a\x72\x3f\x76\x9f\x51\x62\xa3\xee\x42\x8a\x91\x93\xcc\xd8\xcc\x1b\x96\x32\xbb\xd1\x22\x0a\x98\x7d\x1e\xc4\xc5\x0d\x45\xd1\x43\xee\x0b\x1c\x7d\x0c\x3c\x87\xa5\x27\x23\xfb\xae\xd1\x7f\xed\x3e\xe7\x4e\x68\xab\x37\x45\x1e\x2a\xbd\x31\xd2\x31\xb7\x4c\xa8\xce\xb6\x65\x2e\x59\x5b\x62\x1a\x5e\xfb\xd5\x9b\xaa\xc3\xce\xf2\x14\x07\x93\x5b\xa9\xb2\x41\x86\x62\xca\x67\xd9\x06\xbf\xd9\x78\xd9\x8f\xa8\xc1\xb6\x7a\xa2\xa1\xd2\x09\xc4\xb1\x96\x34\xd3\x3e\xaa\x35\x1b\xaa\x62\x5a\x52\xf8\x1e\x03\x7e\x9a\xda\x57\x7f\x59\xe2\x11\xb2\x63\xd9\x6b\x6d\x3b\x2c\x17\x11\xa7\x41\x0a\xc7\x2d\x9b\x80\x68\x6e\x6f\x70\xbc\x29\xac\xab\xb8\xd0\xf8\xc3\x0f\xcf\x87\xe3\x59\x36\x7a\x5e\x6d\x9b\xa3\x50\x5c\x1b\x9d\x18\xe6\x2e\xf2\xcb\xe6\x15\xce\xb5\x90\xd6\x74\x2a\xdf\x96\xca\xca\xf3\xcf\x13\x7a\xf6\xed\xad\xb0\x1f\x7f\xde\xcc\xa7\x10\xc5\x63\x07\xea\x19\x54\x22\xb5\x21\xdd\x6e\xb2\x83\xb6\xe1\x1c\xcc\xde\xcb\x4a\xef\x32\x05\xbd\xac\xa2\x9c\xf0\xec\x5c\x85\x7c\xbd\xf0\x6e\xba\xa9\xf6\xc8\xaa\x10\xd4\x53\xcb\x14\x0a\x7e\xa0\xea\x6f\xb0\x1f\xf2\x99\xe2\xdb\x1d\xe8\x61\x7b\x6f\x7b\x86\x2a\x9a\x73\x94\xe8\x82\x7a\xed\xdc\x46\xf3\x5c\xc0\x28\xd5\x15\x8a\xba\x5c\xd1\x24\xd5\xbb\x95\xc6\x59\x4c\x67\x71\x40\xfa\x9f\x39\x9d\x85\x26\x78\xc1\xe9\xb4\x2a\x7e\x2b\x4e\xa7\xa8\x7b\x87\xe9\x2c\x53\xf8\x56\xbb\x3a\xf8\xa6\xd3\x79\xe7\xe9\x2a\x59\x02\x73\xe6\x4b\xd7\x9b\x96\x4c\x12\xdd\x4c\x84\x9e\x77\x60\x13\xeb\x98\xd5\xf5\x05\xda\x40\xd1\x85\x3c\x5b\x65\x5b\x04\xdb\x31\x69\x60\xe9\xde\x28\x88\x62\xc8\x79\xe2\xba\x6b\x7d\x0b\x76\x03\x9f\x79\xe7\xd1\x86\x3b\xf8\x80\xae\x62\x53\x76\x10\x52\xd7\x20\x06\x69\x68\x8a\xc6\xb4\x5d\x42\xdc\x89\xbe\x2e\xe3\x28\x6f\x7b\x7c\x3b\xd0\x4e\x42\x52\x13\xca\xdc\x91\x5e\xbd\xed\x59\xf6\x1e\x13\x3c\x6d\xe2\x50\xc4\xff\xcc\xb9\x1a\x83\x52\x69\x90\x33\xa3\xee\x15\xbd\x8e\x01\x43\xa3\x59\x2a\x1d\x09\xad\x08\x13\x96\x12\x2e\x23\x21\x95\x13\x22\xeb\x0d\x09\xb3\xcb\x22\x40\xd8\xcf\xcb\x11\x66\xa1\xf7\x29\x7e\x10\xc9\x33\xab\x80\x9c\xb9\x30\xec\x05\xc9\x1f\x4c\x25\x13\x75\xa8\x37\x00\xe4\xc7\x83\x2e\x08\xd7\x06\x5d\x96\x95\x27\x03\x15\x2a\x40\xc3\x4c\x5e\x85\xe2\xb4\x85\xb6\x3a\xc0\x22\xfd\x86\x44\x5e\x48\x0e\xc3\xd9\x42\x88\x15\x9a\x1c\xf1\xca\x61\xce\xfa\xeb\xc1\x11\x9c\x97\x19\xd1\x99\x65\xae\x92\x14\xfa\x55\x28\xba\x3d\xa4\xf4\xcb\x2b\x9a\xb5\x09\xfd\x0c\x0f\xd9\xd7\xa5\xa6\x8f\xae\x15\xb3\x23\x3c\xc1\x20\x85\xc3\xee\x4a\x49\x80\x5d\x45\xc1\x69\x1f\x1c\xda\xe1\xb5\x59\x9d\x4b\xb0\xf8\x82\xc7\x9d\xa7\xcc\x94\x26\x94\xe7\x78\x0b\x53\x40\x67\x07\x64\xcf\x9d\xb9\xeb\x36\xc4\x15\xd6\xad\xd8\xa7\x1e\xd7\xed\xe3\xba\x45\xb7\x5f\xb7\x77\x59\x1d\x60\x21\x3c\x8a\xb2\x85\xd7\x86\x15\x13\x46\xd1\xc0\x45\x7e\x3d\x38\x72\x72\x00\xd9\x83\xcc\xe0\x00\x77\x65\x3b\x56\xcc\x4e\x8a\xa1\xe9\xe3\x41\x32\x61\x4b\x87\xb0\x85\x28\x99\x65\xd5\x99\x87\x18\xac\xaa\xec\x41\x90\x12\xef\x46\xcd\x89\xfb\x42\x1e\x50\x20\x22\x71\x69\xc9\xe6\xe1\x3f\x4a\x92\x0c\xa3\x49\x74\x45\x64\x21\x4b\xff\xc0\x13\xd4\x14\xd2\x90\x4c\x88\x4c\x0a\x73\x91\x5d\x72\x01\xd2\x29\x39\xe9\x64\xb3\x7e\x86\xff\x7b\x86\xe3\xdc\xaa\x62\x40\xaa\x68\x27\xa5\xf5\x50\x47\xd1\xa9\x1a\x94\x51\xd2\x66\x65\xbe\xaa\x9f\xec\x6c\x36\xac\x6c\x31\x92\x8a\xd5\x66\x8d\x94\x44\xfe\x60\x02\x0b\xeb\xf1\xe8\x0c\xfd\xb6\x41\xeb\x9d\x46\xa5\xa1\x4b\x8a\xdf\xdc\x04\xfa\x6d\x8f\x95\x57\x02\x9a\x48\xa2\xed\x61\x10\x86\x64\x02\xe7\x28\x40\xa6\x90\xe6\xaa\xb7\x42\xff\xb5\xab\x3f\x0e\xdf\xf7\x8e\xd1\xff\x6a\xaf\xae\xa1\x29\x03\x9a\x31\x5d\x9e\x0d\xe6\xe1\x97\x41\xb6\x06\x72\xf2\x34\x08\x57\xf8\x53\x89\x6c\x7c\x18\xf0\xeb\xe7\x59\xc6\x63\xe7\x8b\x40\x28\xcc\x5c\x19\x02\x27\x0b\x3c\x16\xb2\xbf\x02\xc8\xf2\xed\x33\x41\xcb\x5a\xc9\xae\xc7\x63\x21\xa0\xa4\xfb\x48\x00\x94\x89\x68\x96\x64\x50\x20\x9e\xe5\x03\x1f\x9b\xc5\xe1\x4b\x8c\x2b\xf9\x55\x5c\xaf\x79\x5a\xdc\x2c\xe5\x82\x39\x08\xf5\xcb\xb5\x5b\x33\x10\x51\x8d\xc6\x3a\xd9\x90\xc6\xcb\x15\x33\x64\x16\xe7\x82\x76\xc0\xaf\xc8\x84\x1a\x33\x82\x35\x80\xd2\x17\x2f\x69\xce\x69\x11\x61\xe5\x5f\x5a\x01\x5b\xb3\xf4\x5e\x88\xb7\x6b\x86\x5e\xa0\xa9\xde\xe0\x2b\xa1\x17\x88\x80\xa2\x60\x51\xf8\xba\x18\xef\x99\x83\x8b\xf1\x1e\xdc\x5a\x94\xb7\x73\x31\x2b\x45\x2a\x2b\x0f\x5f\x50\xb0\x1f\xb5\x4d\x14\xa1\x65\x97\x5b\xbe\x0c\x9d\xc6\xb9\x97\xde\x94\x48\xaf\x1a\x76\x68\xa3\xb0\x7d\xe7\x87\x7f\x19\xb4\xa7\xa2\x64\x33\x43\xd8\x0c\x43\xfb\x20\xc0\x5c\x0f\x92\x78\x10\xe4\x1c\x66\x65\x0d\xcc\xa7\x78\x2a\x18\x0a\x2c\xd9\x51\x10\xd2\x40\x46\x6c\xa1\x7e\x1b\x2e\x33\x8b\x75\x3e\xf3\x4d\x38\x02\x34\x5b\xe1\xca\x1d\xca\xe9\x2c\xc1\xc6\x07\xde\xe1\x5c\xc9\x5c\x2c\x2d\x62\x88\x01\x8b\xc6\x41\x96\xc3\xf3\xfc\x35\x5d\x88\xd7\xa7\x35\x75\x39\xbf\x44\xfe\x12\x75\x31\x3b\x63\xce\x60\x36\x4f\x62\x2a\x38\xb8\x29\xa6\x00\xb7\xa1\xaf\x41\x69\x33\xa5\xdb\xe6\x82\x7a\xfe\xbf\xe2\x22\xc8\xe6\xa2\x60\xbf\x59\xb0\xdd\x2a\x94\xdd\x03\xdd\x9f\xd1\xff\x7e\x12\xe2\x1b\xaa\x1e\x3c\x11\xa7\x35\x7a\x29\x02\x27\x09\xa9\x3b\xbd\xb7\x3d\x17\x14\x36\x57\x37\x82\xbe\x08\x2c\x5d\xd8\x30\x21\x02\xc9\x3b\x08\x1c\xfc\x08\xd8\x00\x48\x86\x93\x1a\x81\x13\x4c\x01\x33\x4f\x3b\xd5\xd1\xb6\x8d\x26\x6e\x14\x6f\x84\x05\x0c\x03\xe9\x44\xab\x1f\x7b\x92\xf5\x61\xb9\x0d\x60\x49\x80\x33\xd5\x3e\xd4\xe2\xc7\x09\x72\x33\x19\x01\x45\x2d\x8a\x54\xc5\x2e\xf9\x3e\x01\xdb\x4f\x07\xfe\xc5\xc4\x9a\x87\x01\xc3\x96\x94\x4b\xda\xaa\x71\x89\xf3\xc4\x40\xa0\xc2\x96\x08\x1a\x0d\x38\x95\x6b\x77\x33\x76\x69\x7f\xf5\x45\x79\xf3\xaa\xf5\xca\x12\x7a\xb1\xba\x30\x06\x42\xd5\xe2\x38\xcb\xbc\xc7\x78\x8a\x82\x1c\x8d\x31\xe1\x82\x49\xcc\x57\x00\x4b\xf3\x41\x2d\x41\x61\xbf\x06\x86\x6b\xf2\x2d\x24\xce\x37\x93\x28\xa6\x46\xa2\xec\x10\x6f\x84\x4b\x54\x1f\x59\x25\x3a\x7d\x12\xfe\x94\x90\x26\x60\x7f\x4c\x8f\xbc\xd1\x05\xfa\xf1\x47\xab\x3e\x5e\x0f\xd4\x71\x78\x2b\x5d\x46\x81\x89\xaa\x4c\x71\x9e\xcf\xf5\x66\xab\x5e\x49\xbb\x45\xd2\x42\x24\x11\x86\xd2\xec\x95\x85\xa0\x79\x73\xf7\x4b\xc8\xab\xab\xe4\x20\x43\xf3\x7d\xb9\x44\x2e\x90\xd7\x99\xe9\x17\x48\xe0\xf0\x7b\xae\x0e\x82\x5f\xc5\x53\x1b\x41\xd7\x29\xf9\x56\x97\xf1\x0f\xb7\xac\x1e\x16\x6f\x6b\x7b\x20\xf9\xcd\x99\x01\x2a\x1f\xd9\xda\x9b\x67\xf9\x77\x47\x4b\x05\x30\xbd\x63\xb2\x87\xdd\x0c\x05\x0d\x92\xf1\x18\x53\xfa\x4f\x86\x5c\x34\x00\x51\x13\x43\x32\xbd\x32\xd1\x43\x12\x45\x25\x27\x6f\xb2\x8d\xa6\xc1\xa5\xf4\xca\xea\x97\x68\x77\xfd\xa0\x0e\xe8\x42\x48\xa9\x52\xbb\xb8\x78\x84\x14\x0f\x8c\x0b\xd2\xfa\x64\x7d\x1a\xe6\xb8\x2e\x40\x59\x30\xa6\xd8\xc3\x0f\x00\x06\x2a\xc9\x80\x86\x1f\xc5\x69\x74\x41\x65\x15\xce\x31\xac\x00\xf9\x55\x6a\x21\xe7\x4b\x96\x83\x66\xac\xd5\x6a\x72\xcd\x6d\x7a\x56\x2e\xdf\x0c\x46\x78\x72\x3b\xb8\x76\x81\x93\xa9\xcc\xc1\x62\x7a\x28\xc1\xb3\x82\xa0\x49\x19\x6f\x8a\xa4\x8d\xf4\x14\x43\x45\x2c\xfe\x56\x17\xc3\x06\x49\x7c\x81\xd3\x5c\x91\x61\x69\xba\x3b\x6e\x4c\x09\x16\x9f\xd4\xfa\xcf\xed\xb6\x7a\x48\xab\xa8\xce\xab\xe2\x65\x45\x7b\x98\xf9\x2e\x56\x2a\x6a\xf3\x8f\x75\xc2\xbb\x49\xc6\x47\xb3\x13\x0d\x62\x91\xc5\x6a\x9a\x64\x59\xd4\x1f\x63\xf7\x8a\xb5\x34\xb5\x98\x73\x53\x31\x50\xa6\x3d\x28\xfd\xc6\x4f\xe0\x7f\x1a\x50\x90\x50\x9f\x93\x15\xdc\x95\x7e\x17\x0e\x4f\xd6\x4a\x5f\xf0\x75\x57\xf5\x8b\xb2\x16\xd3\x3c\xa5\xec\x85\xc8\x32\xee\xc2\x7f\xe7\x14\x14\xab\xb2\x6b\xba\x73\xd9\x6b\x30\x11\x5e\xb7\x4c\xb0\x17\x16\x72\xbd\x7a\x74\x7e\xdf\x3b\x5e\xb3\x57\x90\x58\x78\xcb\x5e\x42\x2c\x1c\x09\x28\x7d\xb7\x72\x30\xc5\xf1\xf1\xf1\x07\xa3\x5a\x75\x67\x32\x79\xfa\xed\x82\xd7\x24\xba\xda\x8b\xd5\x72\x95\x4d\x8f\xe8\x2a\xce\x16\x5b\xc6\xc8\xb9\x6e\x4c\x56\xa2\xf9\x06\x3a\xb8\x09\x39\xd4\xb9\x81\x73\x03\x5b\xee\x95\x01\xbb\x02\xfc\x8e\x86\x91\xbe\xc6\x4b\xe0\x40\x16\xb0\x8c\xa6\x00\x83\xf4\x71\xb8\xf0\xa2\x2c\x30\x8e\x13\xfa\x46\x63\x80\x2c\x69\x3f\x2e\xe3\x1e\x55\x97\x34\x45\x5e\x5c\xd3\xb1\xb5\xbd\x8c\x9e\x3f\xb7\xfb\x56\x58\xcb\xaf\xe4\x09\xcd\x37\xe4\x72\xe5\x98\x53\xcb\x41\xaa\x4e\xc2\xe4\x15\x65\xe2\x14\x63\xe3\xb2\xaa\x2a\x4a\xa0\xaf\x5f\x29\xb9\x16\x75\x56\xf8\x24\x5e\xf3\x63\xaf\xa1\xa3\xb1\xca\x49\x94\xca\xe6\xdd\x6b\xd0\x76\xe0\x6a\x43\xfc\xb4\xdf\x6e\xb0\x9e\xdb\x88\xd3\x06\x9a\x15\x17\xb9\x8c\x61\xf7\x52\x07\xb1\xfc\xba\x43\xac\xba\xc0\xbd\xe4\x62\xde\xcc\xcb\x41\x32\x99\x06\x39\x6c\x2f\x55\x97\xa1\xbc\x2d\x68\x9b\x98\x24\xfe\x54\xdd\x13\x6d\xcb\xef\x36\xc8\xdd\x97\xe1\x60\x42\xdb\x3e\xe6\xe4\xed\x20\x64\x89\xba\x5c\xbc\x51\xa1\x6f\x51\xbc\x32\xf7\x9d\xa3\x96\x91\x23\x2d\x29\x4b\xb0\xf8\x62\x0b\xd4\x48\xc4\x5d\xad\x02\x79\x67\x3b\xc6\x42\x7f\xcd\x43\x2c\x29\xee\x54\xb5\x5c\x4a\xd1\x6a\x0c\xed\xfd\x69\xfd\xaa\xdd\xec\xf8\x9d\xc1\x1a\x24\x36\xe8\xb4\x3b\xad\xf6\xb0\x3d\x3c\x5b\xe2\xaa\x78\x00\xcd\x1f\x8a\x7e\x38\xce\x91\x15\x50\x70\x8e\x85\xe3\xf0\x25\xea\x16\x8c\x8c\x86\xb5\x59\x7c\xcf\x2b\x5b\x63\xb2\xbf\xd2\xa2\xc2\x23\x5f\x27\x05\x9d\xde\x7a\xc9\xa8\x31\x1b\xf8\x82\xbe\xc5\x1a\xbe\xdf\x00\x0e\xa6\x30\xaa\x2d\xbd\x69\x90\x66\xb8\xa6\x2c\xd4\x92\x8b\xc9\x34\x53\x14\x3f\x45\x35\xab\x57\x02\x29\x8e\x68\x0c\xaf\x39\x8b\x8e\x12\x86\x81\x4c\x99\x7a\xb5\x0c\x22\xbf\x8c\x93\x0e\xc3\x2c\x29\x84\x01\xee\x04\x67\x39\xb5\x6d\x08\xc6\x96\x05\xaa\xc1\x3c\xad\x9f\xa1\x8d\x0d\x54\xac\x3d\xf4\xe3\x8f\x7a\xbb\xa7\x3e\x2b\xc3\xd7\xa4\x4b\x05\xb5\x7d\x45\x2f\x30\xcc\x96\x91\xca\x61\x8c\xc5\xaf\xb5\xc8\x4c\x79\x1a\x1e\x6a\x2d\x95\x58\xd7\x25\x17\xec\x88\x0e\x57\x41\x05\x0c\xb3\xbc\x01\x7f\x0a\x0d\xd4\xf5\x5b\x6b\xa3\xb8\x72\xab\xe3\x77\xaa\x31\x0a\xeb\xd1\xc8\x71\x0c\xf2\xa4\xd3\x89\x2a\x9a\x97\xde\x15\xf1\x45\x78\x99\x06\xd3\x29\xc8\x91\x41\xce\x9a\x97\x55\x26\x28\x20\x3b\x7d\x26\x79\xa5\x95\xae\x5e\xc5\xd5\xc7\x70\x65\x2b\x1c\x7e\x6c\x9f\xaa\x3a\x90\xdc\xfa\xb2\x47\x08\x3d\x5c\xc6\x2f\x93\xea\xb9\x8e\x40\xee\x2d\xeb\x2c\x75\x08\x8d\x43\x4a\x35\xe2\x80\x51\x5c\xec\x58\x0e\x4e\x65\x21\xa2\x74\xef\x45\x40\xa8\x6b\x88\x6a\xd2\xc4\x96\x06\x95\x62\xd7\x0e\x64\xde\x98\x37\xdd\x5d\x3c\x54\x0b\xe5\x93\xe5\xa8\x53\xe2\x7d\xce\x9a\xa6\x36\x28\xec\x77\xe1\x77\xfe\x37\x89\xe1\x62\xdf\xc2\x36\xff\xda\x0d\x8c\x2c\x4b\xbb\x46\xc5\x5c\x56\xc2\xbf\xd2\xd4\x46\x28\xae\x96\x8e\x53\xd8\xc3\x35\x58\x04\xa9\xd1\xd5\x09\xdf\xb4\x71\x4f\xac\x36\x87\x34\x50\xa2\xec\xb0\x38\xc7\xba\xbd\x58\x6f\x17\x42\x67\xa1\xe8\x39\xdb\x36\xfb\x75\x29\xba\x41\x52\x38\x9f\xd8\x02\xa0\x59\x7d\x56\x0d\xb1\xa4\xf0\xcc\x10\x01\x12\x58\x67\x6f\x23\x99\xf4\xa0\x7f\x05\x4c\xb8\x02\x36\xa0\x30\x7b\x23\xc2\x71\x85\x63\xae\x6b\x3f\xaa\xbe\x9d\x96\x6d\xda\xca\xfe\x6a\x16\xe4\xaa\x45\xcb\x27\x42\x56\xa2\x6f\x2b\xd1\x85\xa5\x88\xa4\x23\x64\xf4\x62\x96\xa1\x5a\xc1\x02\x10\x5c\x88\x9a\xc5\x84\x3e\xb0\x28\xc9\x5e\x59\x0a\x4b\xba\x40\xdd\xc2\xda\x52\x5a\xd2\x0b\x12\xd2\x1b\x5a\x8e\x6b\x37\x95\x8f\x2d\xec\x1e\x3a\x13\x13\x27\x14\x5f\xf2\xb5\x0c\x7a\xb0\xed\x49\x26\x00\xb1\x43\x69\x17\x4d\xd2\x23\xe4\xf6\xfe\x3b\xee\x53\x5a\x80\x16\x11\xe9\xf8\x1b\xec\x4d\x45\x54\xe5\xf9\x6c\x9a\x7b\xcf\x5b\xd8\x34\x27\x3b\x16\x46\x41\xf2\xa8\xbf\x35\xcb\xbe\x6f\x14\xf5\x7d\xe9\x1e\xb7\x14\x67\xec\x02\x47\x84\x81\x6f\xb0\xab\x30\x8d\x83\xa4\x5a\x90\x17\x93\x06\x58\xde\x29\xd8\xed\x37\x9c\x5f\x65\xe4\x0b\x6e\x62\x6b\x8e\x71\x0a\x73\xc3\x90\x27\x4f\xd9\xc4\x94\xa8\x8b\x74\x58\x8a\xbd\x49\x62\x32\x8a\xc2\xc7\xba\x4d\x88\x26\x16\xd6\xc6\x58\xd9\x9a\x3e\x56\xea\xfd\x0b\xe8\x98\x82\x2c\x9b\x4d\x70\xa8\xde\x27\x06\xe3\x14\x07\xe1\xb5\xb4\xdf\x29\x07\xb2\x59\x4c\xd3\x56\x56\x88\x68\xb6\x18\xdb\xb3\xf3\xaf\x85\x0e\x4d\x84\x71\x81\x89\x7a\x9a\xe1\x85\x79\xbd\x5b\x5f\x34\x8b\x17\x85\xf5\x17\x4a\xdc\x06\xc9\x53\x15\xd2\x01\xa7\x02\x24\x88\xdf\xce\x03\x3e\x1b\x3a\x25\x79\xf5\xb0\xca\xb6\x54\xde\x2c\x76\x8d\xbc\x08\xe7\x84\xb0\xe1\x36\x21\x94\x3d\x99\x4b\x55\xbf\xd8\x40\xa5\xda\x51\x06\xad\x44\x29\x6a\x68\x26\xac\x37\x24\xef\xed\x26\x12\xf3\xae\x4c\xbe\x84\x43\xb8\x2f\xa1\xff\x96\x5f\x96\xcc\xb3\xc2\x30\x2f\x4c\xde\x53\xe8\xa4\x95\x6a\xf7\x24\x5b\x04\x3c\xdc\xe9\x93\xc6\xc8\x5a\xde\xfb\x99\x2b\x0c\xa6\x2c\x5e\x50\x75\x75\x2c\xaf\xc1\x2c\x2f\xd8\x03\xc8\x29\xa4\x19\x00\x5c\xee\x15\x52\x04\x2a\xc7\xd4\xb6\x22\x8a\x99\x25\x2f\xb3\x03\x60\x26\x33\xe7\x38\x06\x63\xde\x72\x68\x22\x4a\xb9\x03\x18\x0d\x9d\x5d\x0e\xcb\xd4\x19\x80\x0a\x4b\x12\x92\x36\x51\xa7\x05\x26\xc7\xf0\x81\xdb\xcf\xee\x0d\x51\x32\x89\x88\x8c\xe0\xa1\x80\x7e\xba\x8c\xc6\x63\xd4\xc7\xa2\xc1\x10\xa5\x41\x1c\x26\x93\xf1\xf5\x3d\x1d\xee\xa9\xd5\x04\x1b\x26\x0f\xed\xfd\xec\xc1\x94\x92\xc6\xbf\x01\x17\xa2\x93\x1c\x9a\x2c\x48\xa2\xc6\x15\x7c\x85\x07\xb3\x1c\xd7\x9e\xf3\x68\x54\xcf\x3d\x96\xb8\xc3\x63\xe6\x5b\x0e\xb1\xe8\x9e\xa0\x7b\xe8\x39\x19\x0e\xf2\xff\xcf\xdd\x67\x66\x0a\x46\xe6\x6e\x9c\x9a\x3d\x4e\xa2\x1e\xa3\x2e\xaa\xd8\xb4\x1b\xf5\xd3\x69\x66\xb3\xec\x50\x54\xff\xe0\xbc\x4a\x32\x94\xc8\x14\x4e\xad\xd3\x5a\x35\xd2\x9a\x5b\xdc\xea\xe8\xd2\x96\xd6\xb5\x29\xad\xd0\x78\xb3\x34\xf1\x40\xa1\xc0\x15\x31\xee\x8a\x34\xc8\x6c\x21\xdd\x2c\xad\xb0\x44\xde\xd2\x78\x00\xfe\xd6\x80\xb5\x84\x36\xf3\x72\x0c\xc0\x6e\xda\x50\x93\x8b\x64\xd0\x4c\x41\xce\x93\xc9\xf2\x31\x47\x2f\x4c\x7d\xb6\x92\x1a\xba\x48\xe1\x6c\x77\x96\x3a\x62\xa2\xd4\x82\x87\xf1\xe2\x48\x2d\xa4\xe8\xdb\x69\xb5\x6d\x9a\x01\x45\xc5\x1d\x32\xbe\xcc\x59\x9e\xc6\x92\x3d\x01\xcb\x21\x7e\xdd\x5e\x1f\x6e\x89\x12\x27\x14\xe2\xf6\x6f\x36\x0d\xd7\x03\xea\xc7\xdf\x6f\xed\xdc\x20\xb2\x7d\x72\x0b\x4a\xdb\x2e\x5c\x48\x79\x9c\xd9\x96\x6f\x71\x0b\x69\xc5\x2d\x1d\x76\x3b\x3f\x7c\x09\x87\x5d\x69\x7b\x96\x28\x64\x41\xf5\x38\x73\xa9\x5a\x64\x5f\xfe\x3e\xf4\xe5\xa5\xd2\xc1\x77\xa0\x8e\xf8\x9b\xa8\xcd\x2d\x8b\xaf\x92\x26\xf9\x39\x1f\x6a\x57\x58\xd9\x87\x6f\xd8\x43\x7f\x3e\xb0\x06\xbb\xd8\x8e\xbe\x91\xc2\x41\xdb\x5d\x93\xdc\xa5\xdc\xb5\xc9\x2e\x04\x3c\x11\x5b\xb8\xb8\x22\x61\x4f\x87\x57\xc8\x18\xec\x99\x6e\x7b\x2e\xef\x4e\x2a\xc6\xd2\xbe\x19\x5d\x5a\x81\x2d\x56\xc1\x60\xc5\x1a\x92\xc0\xa9\x98\x57\xf4\x25\xee\xeb\x0c\x39\x00\x84\x31\x3f\x6a\xfb\x92\x1e\xdf\x40\x63\x3f\xba\xa2\xc9\x40\xa0\x82\x75\x48\xa5\xb3\x35\x35\xcc\x54\xa0\xbb\xf4\x26\xd6\x13\xdf\x1d\xf4\xc1\x7f\x01\x3f\xbe\x67\x05\xf1\xf7\xce\x98\xbf\x47\x3d\xb1\x8d\x19\x2e\xaa\x28\xbe\x13\x63\xbc\x77\x14\x4d\x45\xf1\x7d\x31\xee\x8a\x7a\xe2\x6f\xce\xbb\xbf\xb9\xb2\xf8\xdb\x6f\x15\x9e\x62\xdb\xe3\x38\xa1\xdd\xdf\xde\x51\x49\x1f\xee\xbe\xbf\xb0\x6d\x1d\xf2\xf8\x56\xdc\x3d\xca\x14\xe4\x85\x2a\x4f\x64\xba\x94\x53\x5a\xb2\xfc\x95\x37\x67\x5e\xbb\xf9\xbd\x26\xa5\xbc\xf7\x1c\x94\x8b\xe6\x9e\x54\x72\x4e\x1a\x88\x99\xe9\x27\xb5\xb4\x93\xbc\xa2\x23\xf1\x24\xe8\x47\x0b\xe0\xe2\xa7\x9a\x7c\x72\x3f\xc8\x47\x1e\xb2\xa4\xa0\x2c\x8e\xd7\x1f\x92\x41\x30\x46\xd3\x64\x7c\x3d\x8c\xc6\x28\x19\x22\xba\x69\xb1\x53\xbc\xe5\xc8\xcb\x62\xdb\x6f\xa8\x05\xb5\x86\x15\xc6\x24\x5e\xef\x90\xf7\x37\xaf\xcd\xd8\x41\x92\xad\x65\xff\x77\x83\xa9\x81\x8d\xe0\xac\x4f\x66\x50\x27\xe2\x9d\x95\x69\x9a\xe4\x09\xf9\x84\x36\xc8\xe9\x43\x2f\xc0\xea\xa1\x0d\x14\xe3\x4b\x82\x40\x39\x84\x78\x36\x1e\x3b\x16\x8a\xc0\xa0\x58\x26\x52\xbc\x23\x5b\x24\x4f\x3e\x27\xe5\x4a\x6e\xa7\x62\xfb\x43\xd4\x4f\x83\xf4\x7a\x9e\x8e\x5c\xca\x0f\xea\x04\x05\xd9\x42\x99\xd6\x93\x08\x17\xbc\xcb\xc1\x18\x45\xf1\x08\xa7\x91\x12\xc0\x55\x89\xe8\xa0\xe7\x19\x35\x23\x8c\x9a\xd3\x59\x21\xec\x1f\x8f\x31\x0c\xee\x71\xc2\xcf\x60\x14\xe4\x1c\x21\x16\xca\x83\x8a\x41\xc6\xa9\x12\xa1\xb2\x38\x80\x5c\xee\x4a\x2e\x70\x9a\x46\x21\xce\xd0\x21\x55\x88\x44\x38\xa3\x0c\x7c\x7a\x8d\xa2\x98\x65\x33\x2e\x10\xa8\xd0\x82\x9e\xab\xe1\x64\x51\x00\x86\xcc\xe5\x28\xb7\x48\xd4\x40\x32\x51\xfb\xd7\x27\x94\x84\x15\xe9\xa6\xc4\x24\x51\xf6\x17\x8b\xf0\x38\xec\xa2\xe7\x90\x29\xeb\xb9\x6e\x38\x62\x6f\x93\xfc\x4d\x70\x3e\x4a\xc2\x52\x1f\x79\xa9\xb4\x1e\x23\xdf\xe6\x78\x86\x90\x19\xce\x90\xa2\xaf\x18\x64\xf3\x79\x75\x06\x31\x9c\x06\x97\xb1\xf9\x45\x62\x24\x44\x58\x28\xd2\xea\xb9\xcc\x89\x37\x67\xe7\x13\x1c\x5b\x4c\x87\xc9\x8e\x52\x8e\x05\x2a\x98\x0f\x3b\x77\x15\xe5\xad\xe9\x1f\xac\x08\x30\x33\x29\xee\xfa\x15\x09\xc7\xd2\xd4\x8e\xd3\x0f\xbc\xc9\x51\x90\x1d\x5c\xc6\x8c\xec\xaf\x6b\xcf\x49\xcd\xe7\x4b\xc2\xe7\x89\x3c\xc2\x26\xc8\xcb\x93\x17\x73\xfb\x41\x6b\x95\x4e\xb7\xa5\xd6\xff\x93\xcd\xa6\x44\xd4\x8a\xa3\x7c\x25\x20\xc2\x29\xdb\xfa\x82\xf4\x7c\x46\x46\xd7\x3a\x1e\xc8\x92\x41\xa1\x64\x9c\x0a\x8f\xdb\xf4\x79\x86\x0a\x8e\x1e\x51\xa5\x30\x9f\x74\xba\x4a\x4d\x08\x72\x07\x95\xfd\xc0\xb1\xed\x20\xae\x18\x1f\xe2\x14\xc7\x03\xd2\x00\x8c\xf3\x54\x5f\xaf\xc6\x30\x30\xb9\xd8\x06\xd0\xb9\xcf\x20\x5b\x6a\x0c\x1b\x53\xdd\x86\x95\x92\xc9\x4c\x93\xaa\xbc\x67\x31\x1d\x07\x98\x40\xba\x6a\xcd\x10\xa8\x9b\x7c\x3e\x8a\x0c\x36\xb5\x25\x71\x0d\x47\x44\x69\x08\x29\x07\x40\x6a\x4b\x7f\x67\x5e\xc9\x23\x96\xa3\x0d\xc6\x36\xf9\x9d\xc5\x5c\x5e\x44\xcb\x95\x73\x3c\xb3\x11\x58\x72\x45\x9c\x6c\x73\xe5\xf2\x08\xea\xd2\x1a\xe1\xef\xd4\x75\xe2\xa4\x1a\x5e\xfc\x36\x64\x53\xe6\xae\xee\x98\x2b\x74\xc0\x98\x19\x4b\x12\x00\x24\x05\x26\xf4\x61\x88\xb2\x64\x82\x69\xea\x29\x74\x39\xc2\x31\xba\x4e\x66\xa9\x30\xb3\x0f\x88\x38\x4b\x81\xdf\x73\xec\xdc\xbb\xee\x82\xba\xa3\x73\xd9\x5e\x86\x28\x03\x58\x59\x31\x47\x46\x0c\xfd\x2d\xb7\xbb\xb9\x68\x54\x9a\xd3\x5e\x32\x25\xc2\xce\xb4\x90\x7b\x98\xbc\x73\x07\x71\x4a\x02\x06\x1a\x26\x45\xa6\x9a\x80\x26\xf2\x9e\xa7\x94\xad\x4e\xba\x7f\x56\x95\x5f\x6e\x39\xee\xd0\x88\x72\x89\x2d\xfa\x67\x5d\xe3\x22\xe2\x21\xbf\x6c\xfb\x18\x4c\xc0\x68\x62\x4e\x3d\xc4\xb6\xea\xa2\x98\xbe\x59\xcb\x00\x97\x4a\xb7\x58\x32\x9d\xa7\x72\xf1\x33\xb4\x21\xb5\xaf\x7e\x5a\x20\x75\x91\x63\x93\xdd\x46\x97\x49\xfc\x3c\xa7\xf2\x33\x77\x77\x94\x82\x17\x8e\x93\x64\x8a\x82\x7e\x72\x61\xd9\x06\xcb\xbb\xfc\x9c\x43\x7b\xee\xee\x30\x70\x51\xd1\xaa\xdc\x4f\xf1\xb6\x42\x5e\xad\x4a\x8b\x47\x1c\x4e\xa0\xa7\x60\xff\xb2\xc8\xba\xb1\x6d\x7c\x83\x71\x12\xe3\x07\xe0\x78\x00\x17\x6d\x14\x7b\x08\xbc\xa8\xb0\x93\x91\x62\x73\x37\x32\x39\x17\x89\x2a\x1c\x71\x7e\x6a\xb5\x27\xb3\x9f\x91\xad\xb7\xfb\x31\x0a\xc0\xf3\x56\x8b\x45\x58\x1a\x59\xc8\x88\xf3\x5e\x0e\xc2\x16\x9e\x46\x18\x3f\xa8\xe1\x10\xb3\xe8\x3c\x8e\x86\xd1\x20\x88\x73\x16\x50\x32\xa2\xbd\x07\x90\xb4\x1d\xdb\x31\xf9\x17\xc9\x83\x98\x9e\x95\xe5\x37\xf7\x10\x36\xc6\x6c\x5e\x27\x0b\x47\x18\x7c\xd9\xf4\x6a\xce\x58\x23\xab\x59\x98\x18\x29\xed\x06\x63\xee\xa0\xe1\x7b\x4b\xf5\x22\xfb\x67\x2b\x1b\xbb\x61\x0b\xe3\xd0\xfe\x57\x07\x70\x5a\xbf\xaa\xd7\xeb\x7e\xbd\x51\x6f\x7a\xa8\x7e\x55\x6f\xd5\xdb\xf5\x4e\x7d\xed\xec\xc1\x00\x7b\xa8\x53\x39\xf4\x0a\x0b\x5f\xc7\x67\xc4\x58\xb1\x97\xcc\x21\x18\x96\x2b\x7f\xa0\xff\x7e\xfd\x0a\x31\x7b\x35\x51\x63\x88\x6a\x62\x7a\x7f\xd8\xb0\x28\x0a\xe5\x3f\x80\x2a\x19\x0d\xf1\x9f\x95\x8d\x49\x75\x00\x94\x3c\xc6\x38\x3e\xcf\x47\xd4\xf4\xc8\xc9\x45\xaa\xc7\x8c\x29\x16\xca\x62\x91\x62\xb6\xe3\x41\x12\x12\x7a\xc7\xf4\x87\x4e\xee\xf0\xba\x3c\xf6\xa7\x20\x00\x1c\x0f\x56\x76\xf1\x95\xbb\xcd\x79\x01\x64\x2a\xad\xf6\x85\x83\xbb\x14\xc4\x5a\x21\xb2\x8b\x25\xae\xc1\xbc\xb0\x2e\x96\x2a\xca\x90\x7c\xca\x87\xeb\x0b\x45\x73\x61\x53\xe1\x8c\xe5\xc2\xa7\xea\xeb\x57\xb4\x8b\xaf\x4a\xc3\xb7\xcc\x21\xa0\x41\x90\xe3\x98\xed\xf9\x2a\x05\x39\x98\xbf\x9b\x90\xa4\x7b\xd8\x62\xc0\x4f\x18\x37\x94\x28\x13\xd2\xfc\x2e\x7a\xaf\x5b\x15\x97\x2a\xb4\x21\xb0\xf3\x79\xfc\x0c\xf1\xa6\xe1\x4e\x69\x06\x25\x75\xa6\x44\x03\x3b\x2f\x16\x8e\x84\x0c\xec\x2f\x06\xc3\xb2\xf8\x2a\xe6\xa3\x40\x84\x3a\x28\x48\xcc\x5d\x3a\xca\x8e\x0b\x1e\xa3\xf0\x1c\x07\xf0\x63\x95\x25\x51\xf8\x45\x1d\xa3\x53\xbd\x71\x30\x99\x22\x7c\x05\x91\x24\xfb\x91\xde\x39\x7a\xaf\x4a\xca\x98\xb7\x0d\xf4\x3e\x75\x60\x0b\x92\xa2\x20\xfe\x2f\x47\xa0\x74\xa8\x4f\x44\xd2\x18\xc3\x56\x8b\x82\x1c\x05\x28\x8f\x26\x16\x89\xdb\x16\x92\x5d\xee\xae\x3b\x29\x84\x3c\x38\xa4\x28\xda\x20\xe8\xb1\x59\x38\x8d\x78\x54\x6c\xf2\x4f\xad\xd1\x42\x2f\x51\x2d\xa2\x18\xbf\x40\xeb\x4b\x4b\x22\x5a\xb6\x53\x8a\xa7\x70\xd4\x1e\x2f\xa3\x48\x84\xdb\xfe\xba\x51\x34\xfd\xe6\x0d\x6f\xc3\x52\x5e\x34\x5a\x41\xf0\x77\x6e\x4b\xf2\x98\xd2\xc5\x75\xa7\x31\x75\x47\xb9\xaf\xda\xfd\x0d\x64\x0e\x76\x95\x8c\xc1\x26\x15\x8a\xcd\x76\x79\x43\x45\xd3\x96\x63\x25\x88\xe2\xa0\xaf\x9f\x3c\xa4\x03\x40\x55\x76\x4a\x63\x70\x10\x21\x50\x11\x0c\xa3\xfc\xae\xa2\x60\xb1\x38\xc5\xea\x72\x30\x29\xf2\xb9\x6a\xe8\x5e\x0b\x6b\x32\xe5\x28\x5b\x5c\x24\x27\x93\xb1\x33\x0c\x8b\xa8\x76\x2a\x60\xf0\x38\xf3\x1b\xb0\x74\xe8\x1f\x90\x7e\xb3\x41\x48\x3f\x53\xf8\x82\x85\xe0\x15\x51\x6a\x03\xed\x07\xf9\x68\x65\x80\xa3\x71\x51\x73\x15\x2d\x10\x91\xc8\x7e\xfe\xad\xb4\xf3\x38\xcc\x91\x8c\xe3\xef\x6d\xed\x3e\xd9\x71\x57\xa6\x05\xe3\xbc\xab\xd2\xc2\xbc\x73\xae\x0c\x16\x4e\x6a\x14\x57\x39\xfa\xb9\x79\x72\x5e\x31\x69\x84\x99\xdf\xd7\x9d\x26\x75\xa4\xde\xe2\x53\x20\x89\x0d\xc3\x68\x3c\xe6\x61\x67\x99\x9b\x04\x9c\xb7\xe6\x0b\x25\xfc\x30\x17\xdb\x0e\xbd\x32\x28\xa7\x8b\x4f\xa5\x59\x66\x90\x2a\x11\xca\x7d\x19\x9f\x55\x38\x82\x31\x57\x10\xdf\x7d\xd2\xa2\x25\x64\x32\x89\xed\x47\x2c\x99\x3d\x98\x07\x2a\xf2\x35\x55\x6f\xc8\x27\x9f\x2f\xdd\x51\xe6\x3f\x5f\xa2\x0d\xf2\x5f\x47\x02\xb5\xc9\xe7\x3f\xc8\x36\x73\xd5\x0c\x42\xdc\x59\xef\xeb\xe1\xd7\x45\xb1\x20\xfb\x82\x64\xce\x51\x72\x4f\x50\xe1\xee\x8e\xb6\x5a\xab\x5f\xbd\xaa\x77\x5e\xa1\x17\xa4\x0b\x7f\xc0\x9e\xbe\xb3\xb3\xb3\xb3\x84\x96\xe9\x8b\x9f\x7e\x42\xf5\x2b\xbf\x0e\xdb\x3d\x41\xc0\xb1\xdd\xd3\x2e\xd6\xea\x57\xad\x4e\xbb\x4e\x81\x5d\xea\xc0\x2e\xab\x02\x83\xe1\xc5\xd9\x0c\x3c\x7d\x6a\x80\xc6\x9b\x37\xb4\x26\x5a\x46\x30\xd2\xa5\xf5\x59\xdd\xd5\x0d\xa8\xc3\xfe\xca\xcb\x2e\x6f\xa0\xfa\x4a\xdb\x59\x06\xc6\x94\x15\x7d\x41\xed\x6d\x38\xb5\x2d\xa1\x9f\xd0\x4a\x1b\xfd\x07\xf2\x51\x17\xbd\xf4\xab\x88\x28\x06\xe7\x50\xc5\x0d\x0f\xa5\x83\x60\x30\xc2\x2c\xbb\xce\x7c\x81\x83\xd4\xfc\x4c\xe8\x31\xad\xd5\x68\x55\x72\x54\x52\x90\x24\xbb\x89\x34\x18\xf6\x2b\x26\x5a\x75\x03\x7d\x4e\x6b\xb4\x3c\x10\xe4\x5a\x7f\xcd\xd2\xa7\xcb\x22\x87\x4f\x4d\x94\x2f\xe0\xa3\xaf\xa8\x5e\x31\xac\x79\x8c\x2f\x25\x67\x27\xb8\x75\x64\x0a\x90\x98\xa7\xef\x79\xa2\x8d\xa4\xdd\xf9\x94\x1d\xed\xe7\x19\xd2\xe0\x78\x00\x86\x34\xf4\x5f\xbb\x21\xcd\x2e\xbe\x32\x35\x01\x36\x70\xa4\xe0\x06\x05\xba\x42\x7f\x57\x8b\xbf\xa9\xab\x2f\x46\xf8\xaa\xb2\x0a\xa3\xc2\xc9\x73\xc1\xa8\x9a\x95\x5a\xbf\x2f\x46\x3e\xc2\x57\x66\x08\x4d\x36\x7e\xd2\xd1\x7e\x7e\x22\x21\x6b\xe0\xcc\xdb\x1e\x53\x2f\x2b\x9f\x3c\xb3\x45\x8f\x91\x74\xd6\x4d\x40\x23\x7c\xd5\x1b\x05\x69\xe5\x3c\x5b\xd9\xdc\x03\x1d\xe4\x48\x8b\xe8\x41\xee\xf2\x8e\x87\x38\x8e\x1d\x5b\xe3\x00\x96\x00\x69\x2d\x15\x6a\x1f\xbf\xb3\x64\xe3\x77\xb6\xaa\xa4\x9d\xfa\xb0\xbc\xae\x83\x41\x08\x70\xbf\x27\x51\x5c\x7b\xfe\xfc\x16\x11\x37\x25\x0a\xa7\xeb\x6d\x11\x4d\x0f\x5f\x29\x94\x70\xab\x2f\x18\x87\xf0\xf4\xd7\x4b\x4d\x7c\xb1\x51\x9b\x6d\xb1\x1e\xab\x47\xca\xa4\x55\x16\x4b\x94\x42\xeb\x7c\xe0\x47\x17\xfa\xc8\x8e\x32\x8b\xac\x9a\xcb\x45\x52\xd3\xc9\x8d\xb2\x2d\xb4\x51\x92\x1f\x93\xae\x96\x26\x68\x26\xa0\xd3\x7b\x71\xce\x3a\xbb\x92\xcd\xfa\x59\x9e\xd6\x22\x0f\x35\x96\x3c\x48\xc2\x57\xa8\x2c\xc8\x8a\x5a\x5f\xb2\x39\xe0\x2e\xbc\xe7\x29\xc3\xb4\x8a\x1a\x55\xdd\x67\x3f\x04\x79\x14\xfb\xd5\x36\x2d\x56\x96\xef\x5b\xe2\xf1\x76\x5b\x17\xab\xfe\xd7\xed\x5e\x55\x11\xb8\xaf\x35\x35\x86\xf6\xec\x7b\x18\xc5\xe5\x7f\xd4\x36\x46\x87\xe3\x3b\xde\xc9\x24\x04\xe9\x8e\x44\xa7\x6e\x65\x98\x26\x13\xf2\xb6\x97\x84\x18\x36\xa9\xaa\x1b\x92\x0c\xf0\x0e\x7b\x92\x42\xb7\xb7\xdf\x96\x04\x39\x2e\xb4\x18\xbe\xeb\xcd\x89\xad\x22\xba\x3f\xc9\xcb\xad\xfa\x16\x25\x6a\x2d\xb6\x4b\x89\x6a\x62\xa3\x12\x6f\x1e\x7a\xaf\xd2\x9a\x9e\x97\xcb\x39\x92\xb4\xe8\x45\x6f\x57\x06\x8c\xa0\x37\xf3\x5a\xc4\xd7\x84\xbe\x55\xd9\x75\x8b\x0b\x6f\x55\x1a\xc2\x55\x77\xaa\x4f\x27\x3b\x2f\xd7\xab\x6d\x54\x9f\xf2\xe1\xba\xd8\xa6\xd8\xc3\xed\x36\x29\xda\xe8\x5f\xb7\x47\x55\x6c\xff\xbe\x56\xd6\x2c\x1f\xae\xdb\x37\x28\x32\x8a\x0f\xb9\x3d\xe5\xe9\x75\x89\x81\x51\x88\xc9\x11\xfd\xd3\xd1\x5e\x8f\x7b\x3a\xd5\x70\x36\x08\xa6\xb8\x56\xb2\x71\x9a\x6c\x19\x0d\x82\x7c\x30\x42\x35\x33\x7d\x34\xa0\x30\x4a\x93\x4b\xa0\x5b\xc8\xb8\x52\x7b\xbe\x1f\x8c\x87\x49\x3a\xc1\x21\x9b\x86\x30\xc8\x03\x33\x05\xdd\xe2\x0c\x5c\x9e\xd4\xdb\xf3\x6f\x36\x57\x8b\x90\xc9\x77\xcd\xbc\x81\xc2\x28\xeb\x2e\xc8\xb0\x3a\xe3\x66\x75\x5c\xc6\x00\xca\xd6\x30\x8b\x19\xf5\x50\x0b\x01\x85\xae\x38\x9c\xa5\xca\x01\x68\x44\x0a\x5e\xc8\x85\x89\x43\x96\xcd\x4c\xf2\x42\x77\x66\xe2\x95\xec\x64\xaf\xa5\x94\x68\x93\x59\x96\xa3\x3e\x46\x11\x19\xd1\x09\x8e\x73\x9a\x67\x2d\x80\xeb\xf5\x14\xe7\xc2\x63\xa1\x52\x6e\x5f\x2d\x4f\xa7\xaa\xdc\xa7\x39\x0e\xa9\x6b\x55\x91\x20\xfe\x0b\x9e\xe6\x68\x16\x4f\x79\xd2\x40\x35\x3b\xa8\x64\xd3\x52\xb7\x70\xdf\xb7\x6c\x1c\x20\xd3\xe0\xa6\x18\x05\xe1\x25\xe6\xfa\x5c\xd1\x0c\x0e\xb2\xbb\x32\x6b\x1e\x6d\xa4\x9f\xb3\x24\xda\x2c\x89\x69\x9e\xa0\x28\xcf\xb8\x57\x0c\x22\x14\x7c\xd7\x3b\xa6\xbe\x15\x79\x9a\x10\xd7\x7d\xc9\x54\x29\xeb\x2e\x33\xef\x43\x60\xa5\x6c\xb3\x19\x80\x0c\x9c\xcc\x53\x51\xdb\x59\x75\xa6\x44\xcb\xc7\x5b\x41\x1e\x70\x61\xbd\x5e\x55\xd2\xdc\x0c\xc3\x0c\xda\xe0\x79\xc1\x1d\x23\xcd\x68\xa1\xfa\xa6\x28\x82\x2c\x18\x99\xc7\x99\xb1\x0b\xa2\x6b\x9e\x39\x01\x50\x7e\x49\x7d\x4a\x02\xc9\x82\x92\xda\x13\x03\xc7\x7b\x98\xc9\xfc\x4c\xd1\xa9\x3d\x37\xf9\x7d\xa5\x7a\xf3\xf7\x46\x56\xb2\x4a\x32\x73\xd3\xbd\xbe\x48\x47\x27\x07\x14\x95\x06\x88\x05\x13\x55\x41\xc9\x3e\xce\x40\x46\x73\xe2\x44\x32\x5a\x93\x98\x32\x60\x38\x3f\x52\xda\x26\x74\xcd\x45\xbe\xdc\x94\xc8\x06\xcc\x20\xda\xe5\x0d\x35\x49\x7a\x55\x0a\xe6\xb9\x4e\x33\x14\x5c\x04\xd1\x18\x22\x76\x51\xbe\x00\xcc\xce\x4d\x35\x27\x92\xb3\x4a\x14\x5f\x24\x5f\x70\xa6\x27\x19\xae\xb1\xe4\xc0\x1e\xba\x1c\x45\x83\x91\x95\x55\xf7\xaf\x4b\x58\xb5\xd9\x2a\x5f\x28\xfd\x24\x19\xe3\x20\xbe\x41\x61\xb2\x33\x9e\x65\x23\xf4\xcb\x08\xe7\x34\x9e\x09\xcf\x45\x0b\xee\x5a\xd3\x20\x05\x46\xc1\x5e\x15\x5c\x5b\xb0\xeb\x5b\x84\x03\x11\x9c\x1e\x46\xfc\xee\xdb\xbc\x00\xb8\x45\x09\xc9\xb5\x66\x78\xaa\x5c\x57\x5c\x8e\x05\xc1\xd8\x33\x05\xab\xb1\x56\x69\x51\x65\xf1\xd1\x01\x5f\x50\x67\xc2\x96\x48\x41\xdc\x16\x6d\x09\x79\xcd\x8d\xd3\x60\x64\x5d\x6a\x15\xf2\x51\x32\x34\x73\xd1\x3d\x2f\x5e\xc8\x0a\x1b\x5a\x4a\xe6\xb2\xc2\x1c\x7a\x51\xdb\x1e\xd1\xaf\x97\xcc\xe2\x9c\xd3\x97\x85\x99\x10\xa0\x31\x4d\x24\x7c\x04\x71\x8b\x37\x54\xfc\x57\xb5\x26\x5f\x9b\xbc\xc8\x35\xe4\x0c\x83\xa3\x64\x16\x87\x68\x36\xa5\x0e\x85\x83\xf1\x2c\xc4\x1a\xdd\x9b\xd5\x34\x8c\x0a\x23\x17\xf9\x43\xf5\xd8\xb6\x02\x8b\x30\xb9\x8c\x65\x3c\x92\x78\x7c\x8d\x86\x33\xb1\x28\x2d\x91\xf4\x57\x57\xd1\x18\x67\xd4\xa9\xd2\x2e\x6b\x01\xdf\x48\xf1\x24\x88\x62\x55\xb8\xaa\xd6\xaf\x49\x70\x55\x53\xfa\x05\x17\xa7\xe8\xa5\x2d\x33\xbb\x37\xff\x4a\x55\xcc\x39\xd5\x3c\xb8\xa6\x1c\x28\x99\xe3\xa1\xb4\xfe\x02\x49\x04\xe8\xa2\x27\xa0\x0d\x27\x39\x91\xaf\x6a\x1f\xa3\xb8\x26\x37\xf9\x02\xb5\x3c\x85\xce\x6c\xe6\x93\x3c\x83\xb7\x8d\x48\x08\xdd\x49\x00\xcb\xdd\xb6\x28\x9f\xa7\x6a\x16\xf6\xfb\x8d\x3c\x02\xe2\xed\xb2\xb4\x9e\x9c\x46\x13\x04\x33\x9c\x92\xd3\xa4\xd8\x18\x5e\x16\x07\x04\x70\x86\xb4\x57\x64\xdc\x45\xdd\x83\x04\x57\xb1\xe5\xaa\x77\xcd\x31\x52\x52\x60\x15\x0c\x1f\xa6\xdc\x2c\xaa\x70\x5f\x99\x85\xe9\xc9\xb0\xe4\x11\xb5\xa0\xa1\x70\x32\xf4\x72\x43\x9e\xe9\xf9\x54\xc9\x63\x8b\x96\x61\xeb\x56\x38\xa9\xf8\x7b\x72\xd3\x77\x35\x76\xab\x9c\x85\xb2\xd4\xc9\xeb\x8e\x56\x6e\x8e\xdd\xf0\x2f\x32\x79\xfb\x6c\x6c\x88\x05\x26\xd6\x19\x2b\xb5\x78\x53\x79\x98\x38\x69\x3a\x32\xd1\xf3\x33\xf8\x28\xc8\x20\x43\xae\xf3\xc4\x3d\x37\x15\x79\xc1\xae\x65\x1f\x28\x3a\xe9\x0c\x3a\x0d\xbb\x86\x33\x94\xc4\xd2\x51\xd8\xef\xa0\x5a\xdb\x6f\x80\x25\xeb\x92\xe5\x58\xbc\x4b\x2b\xf3\x63\xb0\x78\xb4\x9f\x87\xef\x25\xea\x6b\x59\x06\xb2\xd2\x80\xa9\x65\xae\x66\x74\x10\x16\xc8\x49\x7e\xdb\xe8\x76\xa4\x21\x44\x43\x24\xcf\x0b\x72\x57\xd9\x86\x44\xcc\x81\x12\xba\xed\x78\x77\xb3\xd1\xee\xd8\x9d\xc4\xca\x52\x5d\xdf\x3a\xc2\x1a\x8f\xad\x56\x3d\xcc\xda\x31\x16\xe1\x3d\xdc\x1a\x02\x53\x0d\x31\xc7\x12\xbb\xd0\xa4\xf0\x85\x73\xff\x2a\x13\x46\x2f\xf7\xa1\x22\x01\x84\x65\x15\x8f\x5a\xc2\xb1\x92\x00\xb4\xc2\xbc\x4c\xa9\x41\xdf\x9b\xd9\x70\x58\x36\x66\xbe\x21\x1f\x2d\x36\xd6\x9f\xa6\x21\xb0\x0c\x79\xb0\x69\x5a\xfe\xea\x19\xfb\x9c\x11\x84\x29\x70\x3d\x8e\x70\x65\x17\x22\xca\x8a\x98\xff\xd0\xdc\xe5\xbd\xc0\x9c\xcf\x00\xaf\xda\x73\x86\x94\x4d\x97\xa2\x96\x9c\xaf\x3a\xa1\x05\x65\x42\x51\xc6\xc0\xb1\x1e\x1d\x1a\x09\xa6\xb0\x51\x21\x58\xc8\x83\x8d\x2f\x11\xd2\x09\xbe\x36\x50\xd2\x39\xd6\x14\x7f\xef\xcd\x77\x62\x87\x25\xb9\xc9\x04\x2e\x4e\x06\x89\x3e\x26\x80\x72\x90\xd3\x7c\xf1\xac\x66\x11\x33\x14\x45\x19\xc2\xc3\x21\x1e\xe4\xd1\x05\x1e\x5f\xa3\x00\x85\x38\xcb\xd3\x19\x3c\x7b\x20\xa7\xbf\x4c\xe2\x01\xae\x14\x65\xb4\x22\x85\x2a\x89\x1e\x00\xa5\x22\x20\x37\x94\x58\x5c\x73\x41\x06\xe1\x9e\x76\x06\xb4\xc1\xc9\x51\x24\x13\x72\xa8\x25\x1c\xa5\xcb\x08\xbd\xa4\xda\x7c\xaa\xe7\x45\x17\xa2\xfb\x1d\xcb\xf8\x9a\x07\xa2\x72\x30\x68\xde\x5a\x99\x27\xc0\x2f\xc0\x59\xa5\x11\xe2\x4c\x76\x47\x9a\x07\xeb\xe2\x21\xe5\x5d\x8b\x47\x4a\x7e\xd7\xf6\x1b\xab\xcd\x46\x35\x31\x3f\x63\x1a\x1f\x25\xfe\x7d\xc0\x26\xed\xb9\x08\x9c\x14\xc5\x39\x4e\x87\x92\xb5\x30\x72\xae\x0a\xce\x5f\x59\xd7\x39\xd5\xd2\xed\x96\xc5\x47\x0c\xd0\x08\x8f\xa7\x38\x25\xe2\x4f\x85\x45\xb0\xc3\x70\x63\xbe\xc1\x3a\xca\xdf\xe0\x1e\x8f\xca\x4c\xba\x53\x05\xed\xea\xca\x67\xda\xab\x5d\xe8\x52\xcd\x26\x6c\xb9\xf5\x73\x72\x55\xc5\x78\x10\x40\xbb\xee\xf7\x8c\x75\x61\x0f\x80\x8b\xd4\xf3\x22\x5b\x89\x70\x58\x54\xb3\x88\x15\x19\x2e\x55\x0a\x5f\xfc\xd8\x68\xa5\x27\xc2\x92\x77\xf7\x37\x7b\xf7\x4f\x4f\x44\x84\xe6\x41\x29\x48\x0b\x8c\xae\xfe\x16\x34\xb5\x3b\x09\x06\x95\xe8\x6a\x12\x0c\xee\x42\x5b\xa2\xfa\x9d\xe8\xeb\x0b\xb6\xab\x90\x24\xfa\xea\x7d\x06\xb4\xc8\x3c\x50\x22\xa3\x8d\xd0\xba\x8b\x11\x5b\xe9\xf1\x57\x68\x92\xe6\xf8\x30\x10\x6c\xc0\x89\x81\xfd\x28\xbc\x18\x78\xa6\x16\x08\xe9\xbb\x1f\xe4\x23\x1a\xd6\xf7\x09\x7f\xcf\x86\xf9\x75\x11\xe9\xf7\xe6\xcc\x6b\xb7\xbe\xd7\xf0\xbe\x0c\x99\x1a\x0f\x47\xbc\x74\xef\xf1\x7e\x39\xe4\x45\xe3\xfe\x0a\x0c\xe5\xf8\xbf\xae\xa0\xbf\xe2\x3b\x04\xff\xb5\x05\xd0\x35\xaf\x28\x78\xd4\xd8\x62\xca\x24\x02\x90\xa2\xc1\x4a\xef\x4b\xc2\xd3\x28\xb5\x25\x17\x18\x57\x18\xd9\x4e\xab\x9a\x89\x16\x2b\xcb\x8d\xb4\xc4\xe3\xed\xcc\xb4\x58\xf5\xbf\xce\x4e\xab\x2a\x02\xf7\xc5\x29\xfb\xd0\x9e\xdd\x54\x8b\xe2\xf2\x0f\xb0\x25\x36\xca\x4f\x82\xa9\x10\x0e\x27\xc1\x74\xf1\xd8\x0b\x16\x17\x71\x13\x84\xcb\x2a\x93\x8e\xf9\x6d\x0d\x96\xd1\xf2\x06\x6a\xba\x6d\x96\xaf\x73\xec\x5b\x8c\x96\xe9\x9f\xcb\x74\x99\xfe\x39\x0d\x98\x39\xe0\x46\x01\xb8\x16\xa1\x65\xe4\x2f\x59\x6c\xa2\xf9\x97\x2a\x96\xd1\x1c\x70\x53\x03\xdc\x70\x02\x6e\x58\x01\xdb\x21\xe7\x69\x34\x1d\xc3\xd5\x4b\x8d\x0e\xcb\x9b\x37\xe0\x37\xf1\x95\x3e\x37\xc8\xf3\x3a\x79\x04\x14\x6c\x50\xc4\x54\xfc\x4e\xa7\xa2\xf6\x3b\x7a\x43\x5a\xff\xf1\x47\x04\xd8\xfc\x8e\x5e\xa0\xfa\xca\x5a\x5b\x9a\xa1\xa5\xd7\xe8\xf7\x92\x70\x17\xd2\xdc\x53\x5b\xf0\x49\x30\x05\x9b\xd9\xcd\xbc\x56\xe3\x08\x43\xa7\x3b\xe8\x05\xaa\x35\xd1\x4b\xf4\xfb\x12\xeb\x69\x73\x68\xf5\x76\x32\xe2\x33\x98\x8a\x8b\x30\xe4\xe9\xbe\x4d\x6a\x64\x1f\x08\x4a\x68\x03\x49\xe8\x74\x0c\x67\x12\x88\xad\x57\x14\xb7\x1b\x07\x8f\xa2\x31\x46\x35\xb9\x9f\x2c\x5c\x80\x2b\xd6\x88\x75\x58\xe4\x66\x16\xef\x33\xe3\xac\x32\xd4\x3b\xd8\xc9\x2b\x3c\xf9\xf6\x76\x96\x82\xd5\x2e\xc4\xe8\xbf\x6b\x53\x4b\xb6\x43\x50\xbb\x1e\x79\x2b\xa9\x6e\x6e\x29\x6a\x2d\xb8\x39\x88\x7a\xc2\x50\x5e\xbc\x11\x86\xf2\xf3\xf9\xbe\x51\x22\xc5\x17\x38\xcd\xf0\xbe\x54\xb0\x78\x65\x8b\x6b\xf6\x43\xf1\xd9\x49\xdd\xa5\x40\x6d\x5b\x00\xff\xd3\xf9\x0f\x61\x3f\x64\x85\xb2\x0e\x96\x72\x1a\xb5\xe1\x53\xbe\xb0\x99\x6d\xfe\xef\x4b\x67\x68\x03\xfd\x5e\x2d\x56\xa7\x85\xa5\xec\x9d\xc7\x49\x8a\xbf\x19\x57\x91\x40\xee\xc5\x21\xf8\x39\x17\xd3\x1d\x91\x37\x07\xc3\x79\x3c\x43\x6a\x87\xc2\xf8\x61\x63\x03\xbd\xf4\xe7\xf0\x24\x99\xc2\xe4\xda\xb7\x62\xc4\x56\x91\x20\x15\x69\x2f\x33\xfc\x21\x49\xa6\xc5\x92\xf0\x74\x1c\x3c\x69\x46\x15\x91\x43\xbb\xf1\x0c\xa6\x5d\xf4\x7c\xf3\x6d\x6f\x6b\x7b\xe7\xdd\xee\xde\x7f\xbd\xff\xb0\xff\xf1\xe0\xf0\x7f\x1f\x1d\x9f\x7c\xfa\xf9\x97\x5f\xff\xfd\x7f\x82\xfe\x20\xc4\xc3\xf3\x51\xf4\xfb\x97\xf1\x24\x4e\xa6\xff\x9d\x66\xf9\xec\xe2\xf2\xea\xfa\x8f\xba\xdf\x68\xb6\xda\x9d\xb5\xf5\x57\xcb\xab\x1b\x2c\xc2\xad\x38\xda\x89\x45\xbb\x30\xaa\xc5\x10\x3b\xbc\x52\x0a\xcb\x0d\xc5\xc2\xd4\x26\x0a\x69\xed\xd8\xdc\x54\xc8\x4c\x47\x8e\xfd\x86\x39\x76\x65\x44\x48\x92\x96\x47\x41\x4d\xb2\x03\x0b\x7a\x89\xfc\xa5\x33\xf0\x5e\x29\x04\xa6\x86\x49\x5c\x1c\x68\xa3\x0a\xd0\xa5\x33\xbe\xc1\xcb\x62\x98\x05\x2a\x15\x88\x62\x25\x72\xcf\x57\x22\xcc\x00\xfa\x5f\x69\x8b\xb2\x6f\x4d\x5c\x1e\xbc\x07\xb1\x21\x5e\x5e\x56\x3e\x08\xb2\x15\x3f\x18\x45\x1a\xb1\x25\xad\x61\x11\x6e\x8a\xdc\x3d\xfa\x21\x5f\xda\x23\x5e\x3b\x33\xfb\xb4\x1f\x8f\xfe\x8f\x47\x7f\x71\xf4\xff\x74\xb2\xf3\xd2\xef\xa0\xb7\xdb\x95\x1d\xb4\xfc\xce\xdb\x6d\xd9\x47\xcb\xef\xa8\x4f\xf0\xf5\xf6\x4e\x5b\x14\x99\xbf\xd6\x71\xab\x22\x0e\xf7\xe8\xbc\xe5\x77\x9c\xde\x5b\x7e\xe7\x1f\xa0\x11\xa8\x7e\x58\x87\xc1\xb8\xcb\x59\xdd\xee\xef\x0f\x96\x51\x49\x88\x0f\x93\x28\xce\x5d\x4e\xc6\x7e\xc7\xe1\x64\x6c\x3d\x4c\x17\x98\xba\xbd\x8c\x45\x93\x55\x5d\x8d\x25\xa0\x77\x38\x41\xe9\x44\x7c\x27\x67\x35\xa0\xcd\x45\xd7\xc6\x77\x7d\x8c\xa2\xab\x4a\xb8\xac\xf1\xc5\xb7\x90\xcf\x1a\x54\x5a\xcc\xd7\x98\xd7\x12\xf2\x2d\x7f\xf1\xd0\x9e\xc6\x6a\xc3\xd5\x1c\x8d\x7d\x90\x7d\x04\x86\xaa\x9b\x31\x11\x81\x8a\xc5\xd2\x20\x8b\x45\x0b\xc2\xe6\xa6\x70\x97\x94\xa3\x8d\xce\x8b\xea\xa1\x30\x18\x59\x7e\xa8\xb0\x87\x49\xfb\xd4\x87\x3b\xef\x53\x1f\xbe\x83\x7d\xaa\x0a\x0e\xf7\xbd\x4f\x59\x97\xd3\x87\xed\xc7\x6d\x4a\xfc\xdd\xdb\x36\x95\x5d\x06\xd3\xed\x38\x8c\x82\xb8\xb6\xe8\x8e\x65\x3b\x92\x7f\xff\x5b\xd6\x87\x87\xd9\xb2\xaa\x2c\x93\xef\x7f\xcb\xfa\xb0\xad\x6d\x5a\x8f\x3b\x96\xb1\x63\x49\x2b\x66\xa1\xcd\xeb\x9b\xee\x5e\x62\x5e\x24\x6c\x09\x20\xa5\x8f\x3c\x1a\x3e\x7c\x61\x77\x27\x74\x71\xd7\xeb\xe4\xff\xe1\x62\x85\x7e\x24\xdd\x67\x5f\xe9\xb7\x62\xf9\xcf\x53\x17\x00\x61\xb9\xb5\x05\x9d\x3b\x69\x0b\x58\x8e\xda\x6f\xa9\x34\xf0\x90\xf4\x2a\x1b\x05\xbe\xf6\x6a\x34\x09\x06\x0f\xa8\x5a\xf0\x10\x6f\x16\x7e\x41\x6b\xff\x04\x75\x83\x91\x2f\xf6\x16\xaa\x08\xc5\x88\x45\xfa\xb2\xbf\xd5\x86\x9a\x60\x72\xb3\xbf\xd5\xb6\xc9\x78\x60\xe2\xfc\x05\x5f\xd3\x2c\xd8\xd4\x0e\x56\xf4\x15\x9c\x7f\x83\x38\xe7\x49\xbc\x93\x74\x42\x6d\xb4\xb7\x7f\x3e\xfc\x0c\x9b\xee\x49\xf2\x1e\x17\xc2\x20\xba\xbc\xbc\x5c\x49\xa6\x38\xce\xb2\xf1\x4a\x92\x9e\xaf\x86\xc9\x20\x5b\x85\x24\xdc\xc9\xaa\x56\x67\x94\x4f\xc6\x16\x45\xc8\xf6\xc5\xf4\xfd\xd6\x4e\x81\xb6\x78\xae\x18\x0c\x61\xbe\x0f\x88\xb6\xc7\x19\xde\x2f\x2c\xe5\x39\xec\x51\x64\x60\x32\xf2\x10\xc5\xdc\xed\x45\x0a\xf7\x5c\xb8\xba\xb4\x50\xcd\x6f\xac\x2b\x9e\x2e\x06\x7c\x87\x91\x9a\x1c\x16\x43\x4f\x90\xb2\xbf\xd5\x9e\x87\x6d\x94\x33\x5b\x64\x3d\x48\xb5\xf4\x21\x4f\xd0\x94\x5a\x9d\xca\xde\x39\x8e\x1d\xce\xf0\x8b\xd1\x76\x07\x36\x3c\x5d\xe4\x37\xd6\xc1\x84\x54\xf9\x4a\x3b\x07\x98\x6b\x5f\x0a\x7c\x94\xb6\x6f\x6e\xed\x76\xe3\x20\xda\x87\xf6\xc3\xc1\x52\xa3\xf7\x60\x66\xfd\x25\x1c\x1a\xde\x37\x94\xe6\xe7\xa4\x68\x9a\x5f\xf1\xcf\x62\xae\xd6\xb5\x7c\x7e\xb7\x05\xe3\xa9\xd3\x58\xaf\xd7\x75\xc0\x0b\x7a\x07\xcd\xf5\xfb\xa9\x26\xef\x6e\x41\x0a\x7f\x42\x23\x84\x2a\x20\x11\x76\x00\x19\x58\xc9\xa2\xbd\x8d\x95\x3e\xaf\x4b\x63\x01\xd8\x00\x95\x54\xce\x82\x71\x8e\x36\xe1\x9f\xc5\xc5\x62\xa0\x2e\x4a\xde\xf7\x41\x5e\x98\x6c\x1e\x5f\xc2\xe1\x0a\x75\x8b\xc0\x35\xde\x19\x0f\xf0\x2b\xc9\x5b\x03\xc5\x95\xfc\x8e\x6a\xcd\x85\x04\x5e\x75\x8a\x2d\xe2\x2d\x59\xe9\x8c\x7b\x98\xb5\x85\x97\x1a\x21\x0f\x66\xa2\x5c\xac\x0e\x2b\x2c\x97\x5b\x18\x84\x16\xa0\x43\xfc\x1e\xc6\xc6\x96\x12\x6d\x91\x33\x72\x01\x4c\xf8\x04\x8b\x37\xce\xe3\x32\xdf\x63\x68\x8f\xd8\x93\xa5\x9c\xc4\xc4\x69\xd1\xe2\x85\x05\xcb\x77\x6c\x63\x22\xe0\xd5\x8f\xcc\x98\x45\xc3\x95\x1b\xb4\xbc\xe1\xf8\x58\x8f\x02\x44\x8c\x03\xcf\x01\xe7\x05\xb3\xea\xb2\x44\xcb\xce\xbf\x56\x46\x72\x30\x86\xc2\x09\x84\x41\xe1\xc4\x26\x19\x05\x1b\xf4\xaa\x36\x2f\xfc\xe9\xcc\x12\x84\x26\xc4\xc0\x99\x9f\x95\x83\x92\x4f\x0f\x4a\xd2\x40\x97\xa6\xfd\xd1\xb0\x17\xc8\x3a\x47\xc1\x86\xb1\x65\xa8\xcc\x77\x12\x59\xb1\x98\x31\xd6\x36\xb4\x51\x96\x6a\x49\x3a\x1a\x4e\x7f\x96\x68\x17\x22\xc0\x1c\xaf\x57\xd5\xe6\xba\x12\x0f\x96\xfd\x8e\x6f\xc5\x7b\x17\xe4\xbb\xf7\xe8\x7d\x6b\xf1\x2b\x93\x7a\x53\x9d\x9b\x4b\x95\x14\xed\x86\xf4\x5e\xe5\xee\xc5\x07\xa4\x70\x75\xb1\x69\xd3\xfd\xda\xc5\xd9\x17\xab\xe6\x21\x87\xd8\x70\x17\x30\xa5\x62\x83\x50\x21\x17\xb2\xbe\x6b\xcf\x31\x5d\x58\xd8\xb0\xab\x12\x0b\x38\xae\x94\xef\x77\x37\xaf\x4b\x8e\xef\x14\x9a\xfd\xec\xee\xf1\xc3\x67\xb7\xbd\xee\xf1\x23\x69\x77\x6d\x8d\x9c\xe9\xd7\xfe\xd6\x67\xfa\x41\x34\x1d\xe1\xf4\xe5\x03\x9b\x08\xc0\xe9\x5d\x6e\xea\xaf\x39\xc4\x9b\x99\x3b\xef\xe5\x34\xdf\x83\x8e\x1d\x12\x8e\x93\x89\x43\xbb\xfc\xd2\x6d\x42\x20\xde\x6b\x99\x30\x94\x1a\xe4\x0c\x17\xe4\x50\x89\xfe\xe4\x8c\x98\x55\xdc\x81\x97\x39\x8b\xaa\x40\x8b\x2c\x90\x4e\x83\x9c\x6e\xe8\xdc\xe4\xf8\x2a\x27\xa7\xc8\x80\x3d\xa3\x29\xed\x13\xf3\xcd\xe2\xa9\x36\x82\x10\x0f\xa2\x49\x30\x1e\x5f\xb3\x34\xa0\x61\xe5\x9b\x1b\x79\x54\x6e\x58\x2b\x6c\xe0\x4e\x04\x1a\x6a\xb3\x8b\x27\xe3\xb8\x0d\x7e\x0f\x9a\x9e\xa3\x98\x12\xe9\x56\x47\xee\xfc\x62\x17\x3b\x4a\x4d\x87\xa3\x96\x5c\x66\xa5\x98\xdd\x22\x81\xc4\x2e\xbe\xba\x65\x26\x08\xcb\xf0\x4a\xe4\x23\xdf\x37\x2c\x38\x9d\xda\xcd\x43\x14\x4f\x67\xf9\x5d\xe6\x94\x93\x87\x4a\x74\xb7\xa0\xb3\xfb\x22\x8e\x81\xc6\x28\x2c\xf4\x71\xeb\xa4\x12\x30\x5a\xf6\x10\x36\xc5\xe4\x6c\xa0\xa2\x0d\x5a\xe1\xb5\x95\x7a\x7a\x0a\xf5\x70\x8d\x40\x01\xa8\x2b\x03\xbd\xb1\xeb\xe6\xdd\x3b\x6d\xd1\x5d\x6d\xb7\x95\x36\x88\x6e\xbb\xe1\x69\xca\xf3\xf5\x47\x53\xbb\x7f\xba\xee\xdb\xb5\x3b\x1a\x91\xcc\xcb\x34\xe1\xe6\x21\x05\x1c\x80\x85\xc6\xd5\x9a\x88\x8a\x94\xd8\x90\x1d\x55\xef\x27\x21\x3d\xb8\xbc\xce\xe5\x78\x95\x95\xc4\x15\x55\x51\x44\x56\x07\xe7\x65\x3c\x48\x71\x7e\x4f\x4a\x25\x22\xff\xee\xda\x03\x07\x41\x2f\x19\x9b\xb0\x79\x22\x53\x47\xdf\xaa\x1a\x43\xd9\x39\xd8\x11\x20\xd8\xaa\x33\x12\xfa\x22\xea\xa3\x20\x1e\x75\x0f\xf7\x12\x6f\xb7\xfb\x8c\x2f\x0b\x07\xa6\x39\xe1\x65\xe9\xa1\x4a\x8a\x2e\xab\x8f\x93\xdd\x10\xbf\x44\x31\x45\x3b\xfa\x56\x8a\x8b\xc9\xba\x5e\x16\x19\x53\xab\xc4\xf5\x05\x3a\x2c\x7b\x94\xcc\xcd\xf1\x38\xb9\x44\x41\xda\x8f\xf2\x34\x48\xaf\x11\x53\x2f\x7d\xc1\xd7\x96\xb8\x83\x5f\x64\x8d\xc4\x4f\xd6\x86\x4b\x06\x4a\x57\xb7\x54\x1b\xad\x39\xce\x90\x04\xa5\x12\x37\x48\x88\xff\x06\xba\x8d\x24\x45\x51\x1c\xe3\x14\xa2\xcf\x26\xb3\x1c\x04\x08\x3d\x0a\x1f\xc4\x4c\xa4\x3a\x46\x4a\x86\xec\x81\xb6\x62\x04\xa4\xe3\x1a\x3f\xb9\x46\x64\xa9\xb1\x08\x09\x24\x92\x56\x32\x29\xd3\x47\x46\x52\xc1\x48\x2a\x68\x34\xf6\xeb\xc1\x11\xcc\x27\xbd\x06\x9c\x06\x21\x1a\x24\x71\x96\x07\xb1\xde\xbc\x35\x89\x94\x3a\xc7\x6e\xc5\x9a\xc0\xfb\x34\x3a\x43\xbf\x6d\xa0\xfa\x55\x7b\x40\xff\x67\x73\x87\x31\x0a\x37\x3b\xf4\x7f\xe5\x9a\xb1\x44\xd3\x89\x45\xda\xb3\x8d\x22\xff\x82\x38\x64\xb0\x03\x3d\x44\x14\x32\xc1\xc4\xef\x25\x12\x59\x49\xbe\x32\x1b\x33\xb6\x0c\x24\x74\xda\xc6\xc7\x1d\x7a\x52\x55\x5f\x5c\x2c\x98\xdb\x45\x20\x83\x61\xfe\x6e\xe2\x8f\xed\x6f\xf6\x58\xf4\x31\xc0\x2b\x82\x25\x56\x1a\x09\x65\xc1\x29\xaf\x12\x88\xcc\x28\x7d\xff\xc1\xc8\x64\x92\xe0\xad\xcc\x0d\x3e\xf6\x50\xd1\xc3\x60\xa8\xff\xa7\x47\x0f\x9b\x23\xa6\x2e\x22\x22\x12\x1e\x5a\xd0\xd0\xdc\x08\x62\xee\x1a\x73\xa3\x88\xb9\xab\x3e\x50\x24\xb1\xbb\x73\xbb\x1e\x55\x4f\xc3\x78\x5b\xf6\x63\x22\x5d\xec\xda\x83\xa3\x95\x06\x1c\x2b\xe5\x98\xf2\x58\x69\x40\x0b\x09\x85\x4b\x1a\xfc\x92\x49\xa0\xb2\xe4\x0c\x39\x36\x09\x06\xf6\x4b\x22\x71\xf0\x77\x18\xc1\xbd\xfa\x5b\x2b\xcc\xaf\x3a\xad\x97\x96\xd7\xe3\xa8\xff\x92\xa0\x12\x82\x6d\x6b\xa6\x7d\xc5\xf1\xe0\x25\xd8\x34\x5a\xde\x53\x37\x4b\xed\xc3\x24\x6c\xcf\x37\xbe\xcb\x46\x41\xa3\xad\x83\x24\x2f\x1b\x3a\xb8\x6c\x14\xb4\xfd\x86\xf9\xb2\xb9\x6e\x29\xd9\xd4\x5e\xa5\xd1\x14\x4f\x42\xbf\x53\xb7\xda\xfe\x29\xaf\xa6\xfd\x2f\xe1\x50\x6f\x07\x5f\x4c\xbf\x84\xc3\xb2\x7b\x07\xb5\xeb\x49\x88\x5f\x0e\x86\x7d\xeb\xeb\x3c\x75\xbc\x7e\x79\x3e\x0e\xc2\x49\x10\xdb\x3e\x27\x76\x60\x78\xa0\xbf\x9e\x06\xe1\xcb\x20\xce\xa2\xab\x57\x0d\x7d\x10\xc8\xa7\x28\x4b\xfc\xba\xdf\xd0\x47\x9c\x7d\x7a\xb5\xf6\x6a\x4d\x9f\x21\xf2\xe9\x0f\x9c\x26\xcc\xf5\xda\xf2\x35\x76\x7c\xa3\x3a\xb2\x97\x23\x7c\xa5\x7d\x08\xb0\x4e\x5c\x34\xee\x46\x68\xbc\x4f\x07\xfa\xe4\xa6\x41\xbf\x1f\xe5\xd6\x97\x2f\xc7\xf8\x3c\x18\x5c\x3f\xf4\x1d\x90\x58\x3d\xf0\xa4\x2f\x1a\x78\x59\xac\x15\xf1\xc8\x96\x08\x3c\x93\x95\xa1\x99\x85\xb2\x75\x20\x7e\x37\x5a\xe2\x37\xa1\x7a\xfe\x9b\x10\xbb\xf8\x4d\x7f\x15\xa4\x5d\xd8\x97\xc2\x2f\x46\xc8\x14\x03\x4a\xbf\xc6\x1d\x16\x45\x87\x53\xab\xf4\x94\xa7\xea\x93\xa0\xcd\xe2\x6d\xa2\xd4\x20\x94\x48\x9b\x95\x09\x50\xbc\x11\x74\x27\xbf\xa1\xe4\x26\xde\xc8\x54\x26\x5e\xc6\xea\x2b\x89\xa6\xe0\x99\x90\x12\xfc\x28\x28\x88\x8e\xca\x80\x0d\x14\xa3\x17\xe9\x37\x27\x93\x45\x15\x91\x8a\x02\x52\xe6\xb5\x8b\x2b\x26\xdd\xa1\xd8\x58\x97\xba\x6d\xdf\x2b\xd7\x26\x7b\x2a\x5d\x75\xdb\x2d\x4f\x21\xbc\x6e\xbb\xed\x15\x13\xdf\x6d\x77\x3c\x75\xf4\xba\xed\x35\xfd\x46\x58\x27\xe5\x6e\xa7\xee\x31\x6a\xed\x76\x00\x1f\x41\x29\xdd\x4e\xc3\x93\x69\xa5\xdb\x69\x79\x36\x6a\xe9\x76\x9a\x9e\x4c\x21\xdd\x4e\xdb\x93\xe9\xa7\xdb\x01\xbc\x14\x9a\xe9\x76\xd6\x3c\x9d\x6a\xba\x9d\x75\x4f\xa7\x9b\x6e\xe7\x95\x67\x10\x49\x77\xad\xee\x59\xc8\xa9\xbb\x06\xf8\xb3\x25\xd1\x5d\x03\xec\x19\x69\x74\xd7\x5a\x9e\x41\x1c\xdd\x35\x40\x9c\x90\x51\x77\x0d\x70\x2e\xd6\x59\x77\xad\x23\x5f\xa0\x7b\xc5\x92\xed\xae\xf1\xab\x75\xb2\x98\xbb\x6b\xaf\x3c\xbe\x54\xbb\xeb\x75\xaf\x58\xc2\xdd\x75\xdf\x2b\x16\x77\x77\x1d\xd0\x29\x28\xb8\xbb\x0e\x8d\x0b\x46\xd3\x5d\x6f\xdd\x9c\x79\x9d\xfa\xe3\xe5\xc1\x5f\x7f\x79\xd0\x1b\xe1\xc1\x17\xd2\x29\x58\x29\xd4\x0d\x88\xa6\x39\xcb\x66\x53\x32\x30\x98\xc5\xa7\x96\xfa\x0d\x72\x3c\x0d\x69\x8e\x7e\xd8\x40\xcf\x39\xe4\xe7\x16\x8b\x10\xe1\xa4\x71\x8f\xd7\x15\xa5\xe6\xf8\xa2\x9d\x23\x3c\xc4\x29\x86\x83\x5e\x1a\x9d\xc3\x99\x2c\x8a\xa3\xbc\x00\x93\xcd\xa6\x38\x05\xd5\xf5\x86\x96\x9e\x43\x82\xb2\x39\x3b\x9f\xe0\x38\xd7\x0a\xa0\x3c\x41\xa3\x20\x0e\xc7\x58\x19\x37\x19\x76\xdf\x0a\x59\xb1\xa9\x81\xaa\xa6\x3b\xa0\xa4\xfb\xa6\xb1\xe4\xa9\x09\x54\x14\xe7\xeb\x92\x86\x7e\x28\xd7\x17\x8a\x09\x75\x76\xcc\x63\x7e\x51\x83\x2a\xe1\x3f\x11\xa8\xf0\x42\xc6\x46\x39\x44\x58\x11\x4b\x68\xfa\x2f\x80\x74\x11\xe1\x4b\x17\x8a\xce\xe6\x25\x84\xf7\x38\x0a\xe8\xeb\x57\xb5\x3c\x27\x38\xc0\x12\x74\xc6\xbc\xfa\x0f\x64\xcd\x09\xdb\x11\x58\x74\x76\xe0\x46\xd5\x25\xa3\x15\x27\x56\x7e\xc7\x8e\x96\xbb\xa5\xc5\x6a\xec\xc5\x79\xb3\xb1\x68\x13\x8b\xd5\xd8\x19\x27\xc1\x6d\xaa\x74\x5a\xf0\xbe\x28\x7f\x4b\x52\x5a\xa1\x14\xec\x21\xf9\xd5\x75\x8e\x0f\x20\x39\x90\xf1\xda\x96\x77\x59\xa1\xbf\x5d\xba\xe8\x8a\xb6\xaa\xac\x88\xa2\xf4\x62\x2a\x84\x02\xda\x5b\x81\x1b\xda\xb0\xe3\x6c\xd1\x2c\x6c\x5f\xb1\xec\xab\xd7\xb9\xcd\xf8\x79\x21\x77\x41\x1b\x2a\x8b\xe4\xd3\x2e\xea\x9f\x46\x67\xb7\x4a\x9e\x5d\x98\x73\x47\x7f\x60\xaa\xaa\x2d\x1c\x47\xd5\xa2\x82\xb1\x16\xa9\x2d\x3c\xc4\xdc\x08\x6d\x1d\x51\xe6\xdb\x9a\xf5\x8c\x8c\x26\x79\x4d\xe0\xa1\x98\x48\x7d\x32\x33\x37\xdb\x0d\xa6\xd3\xf1\x35\x6b\x38\x48\xcf\x67\x84\x85\x67\x65\xfe\x8a\x8c\x5f\xaf\x4c\xd3\x24\x4f\x08\x8e\x32\xe7\x2e\x33\x9c\xd0\x77\x1f\xbb\x82\xa5\xe3\x3f\xca\x3a\x7f\x8d\xac\x03\x01\xa3\xff\x82\xb8\x44\xd6\x9c\x4a\x15\x4c\x24\x60\x8b\xa5\xf7\x78\x28\x2f\x74\xeb\xa4\xca\x09\x63\x16\x52\x49\xaa\xba\xd4\x6e\xfe\x6c\x92\x9e\x8b\xaf\x74\x5a\x76\x2e\x72\x42\xd8\xc4\x06\x1d\xbe\x95\xa0\x9f\xd1\x1f\x59\x14\xb3\x60\xac\x84\x65\xd4\xaf\xfc\x3a\xfb\x5b\x42\x5f\xd5\x34\xbe\x6c\x79\xd5\x96\xac\x16\xea\xfb\x5b\x6d\xcd\x9a\xc2\x66\x00\xa2\x7b\x4d\xa2\x0d\x36\xaa\x16\x03\x10\x9e\xf6\xa6\xf4\x76\xac\xd0\x04\xdb\x73\x15\x9f\x9a\x9c\xb4\x7e\xd5\x59\x6b\xb5\x1b\xcd\xba\xef\xa1\xfa\x15\x1e\x0e\xc2\xa0\xbf\xfe\xca\x92\x57\xb1\x7e\xf5\x6a\xbd\x1f\x84\x83\x21\xf6\x60\x60\x9a\x8d\x76\x6b\xad\xa3\x96\x3b\x73\xde\x88\x69\x69\xf4\xe4\x5e\xec\x8b\x4c\x7a\xb6\xbd\xeb\x32\x98\x22\x0c\xee\xd5\xf3\xf7\x10\xbf\xe3\xde\x31\xdc\xd7\xd7\x7c\x36\x28\x12\x9f\x09\x3c\x9e\x5e\x10\x45\x8e\x08\xbc\xfb\x9f\xa5\xd2\xfb\xa7\xfc\xe1\xcc\xe6\x12\x22\x7d\x26\x04\x67\x16\x20\x7f\xb5\x5a\x4d\x82\x49\x3d\xc5\xd1\x57\x24\xbf\x84\xbd\xae\xb5\xa4\xf9\x88\xa3\xaf\x15\x01\x36\x5a\x4b\x16\x80\x10\xca\x58\x71\x49\x37\xc1\xdd\xcd\x38\x64\x57\xb9\xa1\xb0\x5f\xf7\x2b\x43\x5a\x47\xd2\x98\xa2\x65\x54\xd7\xc5\x07\xa5\xb4\xaf\x95\xf6\x4b\x4b\x37\xb4\xd2\x8d\xd2\xd2\x4d\xad\x74\xb3\xb4\x74\x4b\x2b\xdd\x2a\x2d\xdd\xd6\x4a\xb7\x4b\x4b\x77\xb4\xd2\x9d\xd2\xd2\x6b\x5a\xe9\xb5\xd2\xd2\xeb\x5a\xe9\xf5\xd2\xd2\xaf\xb4\xd2\xaf\xca\x67\xa7\xae\xcd\xce\x9c\xc9\xf4\xb5\xe2\xe5\xb3\xe9\x37\xb4\xe2\xe5\xd3\xe9\x37\xb5\xe2\xe5\xf3\xe9\xb7\xb4\xe2\xe5\x13\xea\xb7\xb5\xe2\x6d\x83\x1b\xac\xae\x12\x86\xfc\x25\x8a\xcf\x49\xd5\x28\x18\xf7\x6d\x62\x73\x40\xb6\x81\x53\xeb\x40\xf5\xe1\x93\x75\x50\x06\xf0\xc9\x3a\x00\x21\x7c\x6a\xda\xd0\xe9\x15\x77\xd0\xea\x37\x82\xc4\xce\x4e\x2d\xf0\x50\xdf\x43\x03\x0f\x85\x9e\xb4\x40\x3d\x84\xd6\x3c\xb2\x85\xd6\xcf\x74\xde\x10\xd2\x7a\xa1\x87\x44\xd5\x62\x84\x3c\x84\xfc\x86\x87\x4e\x4e\x7d\xa3\xde\x80\xd6\xa3\x2d\xd1\xaa\xc5\xa2\x25\xf5\xd6\x48\xbd\x86\x51\xaf\x4f\xeb\x09\x24\x03\xa9\x5e\xd3\x43\xa8\x01\xed\x35\x8d\x7a\x65\xfd\x6b\x89\xfe\xb5\x16\xea\x5f\x5b\xf4\xaf\xbd\x50\xff\x3a\xa2\x7f\x9d\x85\xfa\xb7\x26\xfa\xb7\xb6\x50\xff\xd6\x45\xff\xd6\x17\xea\xdf\x2b\xd1\xbf\x57\x0b\xf5\xcf\xaf\x7b\xac\x7f\xbe\x49\x30\x65\x1d\xf4\x7d\x8f\x75\xd0\x37\x29\xa6\xac\x87\x04\x4b\xda\x43\xdf\x24\x99\x52\x12\x6d\x7a\x9c\x44\x4d\x9a\x29\xed\x63\x4b\xf4\xd1\x24\x9a\xd2\x3e\xb6\x45\x1f\x81\x6a\xcc\x4e\xbe\x7b\xe7\xe8\xa4\x87\x50\x9b\x76\xd2\xa4\x9b\x90\x56\xb4\x76\x92\xd0\xdb\x2b\x5a\xd1\x24\x9c\x01\xad\x68\xef\xa4\xef\x21\xd2\xd1\x93\x53\xdf\xa4\x9c\x3e\xad\x68\xed\x24\xe1\x18\x8d\x3a\x54\x34\x49\xa7\xac\x8f\x6d\xd1\xc7\x86\x9d\xd7\xb8\xfa\x48\x68\x8e\xf6\xb1\x61\x67\x36\xce\x3e\xb6\x79\x1f\x1b\x76\x6e\xe3\xea\x63\x4b\xf4\xb1\x61\x67\x37\xae\x3e\xbe\x2a\xfa\x68\xe7\x37\xce\x3e\xb6\x44\x1f\xed\x0c\xc7\xd5\x47\xc2\x18\x59\x1f\xed\x1c\xc7\xd5\xc7\xf5\xa2\x8f\x76\x96\xe3\xa4\xd5\xa6\xc7\xfb\x68\xe7\x39\xae\x3e\x36\x04\xad\x36\xec\x4c\xc7\xd5\xc7\x35\xd1\xc7\xa6\x9d\xe9\xb8\xfa\x48\x96\x3f\xed\x63\xd3\xb7\x2f\xc8\xdd\x5d\x37\xb1\xb6\x00\xd7\xa6\x9d\xeb\xec\xee\xda\x3b\x49\x86\x95\xac\xad\x93\xd3\xa6\x9d\xeb\xec\xee\x96\x2c\xc8\x0e\x54\xb4\x73\x9d\xdd\x5d\x47\x27\x5b\x1e\x6a\x34\xa1\xa2\x49\x3a\x65\x7d\xf4\x8b\x3e\xda\x99\x8e\xab\x8f\xad\xa2\x8f\x76\xa6\xe3\xea\x23\x4c\x24\xed\xa3\x9d\xe9\x38\xfb\x58\x17\x7d\xb4\x33\x1d\x67\x1f\x9b\x1e\xeb\x63\xcb\xce\x74\x5c\x7d\xac\x8b\x3e\xb6\xec\x4c\xc7\xd5\xc7\xa6\xe8\x63\xcb\xce\x74\x5c\x7d\x24\xac\x9c\xf6\xb1\x65\x67\x3a\xae\x3e\xbe\x12\xf3\xd8\xb2\x33\x1d\x57\x1f\xc9\xf2\x60\x7d\xb4\x33\x1d\x27\xad\xb6\x39\xad\xb6\xec\x4c\xc7\xd5\xc7\x46\xd1\xc7\x35\xfb\x82\xdc\xdb\x73\x0b\xaa\x1d\xda\x49\x3b\xd7\xd9\xdb\xb3\x77\x12\x68\x0e\x78\x40\xcb\xce\x75\xf6\xf6\x4a\xc4\x80\x36\x88\x80\x76\xae\xb3\xb7\x67\xef\x24\xe1\x1d\x0d\x18\xd6\xb6\x5d\xd4\x71\xf5\x91\xcc\x07\xed\x63\xdb\xce\x74\x5c\x7d\x6c\x8a\x3e\xb6\xed\x4c\xc7\xd9\xc7\xba\xe8\xa3\x9d\xe9\xb8\xfa\xe8\x17\x7d\xb4\x33\x1d\x57\x1f\xd7\xc5\x3c\xb6\xed\x4c\xc7\xd5\x47\xa0\x39\xda\x47\x3b\xd3\x71\xf5\x11\x44\x72\xda\x47\x3b\xd3\x71\xf6\xb1\xe9\xf1\x3e\xda\x99\x8e\xab\x8f\x2d\xd1\xc7\x8e\x9d\xe9\x38\xfb\xe8\xf3\x3e\x76\xec\x4c\xc7\xd5\xc7\x86\xe8\x63\xc7\xce\x74\x5c\x7d\x7c\x25\xe6\xb1\xd3\x34\x17\x24\x5c\xa3\xe4\x38\x9d\xe0\x30\x0a\x72\xe6\x54\x06\xee\x0a\x6a\x39\x72\xc4\x45\x1b\xa8\x06\xff\x2e\xa3\x40\xd7\xb0\xd2\x32\x3e\x2b\xe3\x93\x32\x7d\x7b\x99\x06\x2b\xd3\x20\x65\x06\xf6\x32\x4d\x56\xa6\x49\xca\x84\x86\x36\x57\x53\x55\xee\x58\x2c\x75\x17\x0c\x68\x0b\x99\xd2\x45\x36\xdd\x20\x0f\x6c\x07\xf3\x20\x0f\x44\x28\x9f\x20\x0f\xdc\xca\xb1\xf8\x6d\x94\x67\x27\x49\x1e\x8c\x05\xcc\x78\x2b\xc8\x03\xea\x41\xf2\x02\xad\x5b\xa0\x43\x9d\x0f\x78\x98\x73\xe8\xc2\xe3\x04\xca\x1b\x9d\x71\xa6\xbc\x12\x68\x9e\x16\x20\x7f\xfa\xe9\x27\xd4\x86\x8b\xb7\xfa\xd5\x7a\xbd\xb8\x6f\x2b\x4a\xfc\x0b\x35\x1b\x06\x71\xa8\x7d\xd9\x45\x1b\x08\xd4\xee\xc3\x71\x92\xa4\x35\xa9\x93\xab\x8a\xee\xdd\xd5\x39\x28\xfb\x01\x6d\x48\x4f\xfa\xc2\x11\xa8\xd7\x6a\xb5\x02\xb7\x65\xd4\x69\xd1\x7c\x69\xaf\x20\x98\x68\x6b\x89\x2a\x6c\xec\xfa\x59\x5e\x95\xe1\x5c\x28\x67\xe5\xb7\xd5\xb5\xb3\x26\x38\xa6\x9a\xd5\xc1\xcd\xd3\xcd\x1a\x5c\x62\x91\xce\xb6\xaa\x74\xf6\x83\xb5\xb3\x1f\x6e\xdb\xd9\x0f\xd6\xce\x7e\xa8\xda\x59\xb3\xb7\xb2\x13\x55\x4d\x74\x9f\x07\x9b\x82\x9c\x7a\x76\xff\x41\x30\x78\xa7\x6e\x0c\xe0\xa3\x68\xf3\xa4\x2a\xcd\x2b\x3f\xc7\x1b\x52\xd1\x79\x5b\xc8\x77\x97\x19\xc6\x3b\xbd\xdf\x16\xba\xf7\x70\x5c\x71\xa1\xb2\xeb\x7f\x81\x09\x5c\x61\xec\x9e\xda\xef\x2e\x76\xd9\x2d\x59\xad\xb6\xab\x5c\x4b\xec\x2e\x7c\x1f\x41\x69\x61\x57\xb9\x8b\xd8\x75\x5e\x42\xcc\xbf\x71\x38\x62\xb9\x81\x61\x0e\x59\x04\x9e\x10\xc6\x54\x2d\x5a\x21\x59\x39\xb8\x21\x94\xb2\x7a\x50\xb0\x82\x53\xa6\xb8\xa1\x83\xc7\xe2\xfa\xdf\xd8\x78\xe1\xf3\x67\x83\x16\x5c\xde\x95\x3c\x82\x06\xf9\x6a\xf7\x70\xa0\xbf\x04\x92\x9a\xea\xeb\xca\x43\x99\x87\xd4\x2b\x34\xe0\x93\x68\x03\x05\x68\x19\xd5\x6a\x7d\xf4\x23\xdd\x1c\x6b\xff\x97\xfc\x0c\x97\x08\x1b\xb8\x42\xcb\x28\x97\xda\x13\x01\x8b\x63\x32\x4d\x19\x5d\xa9\x34\x4e\x79\xb3\x81\x5e\xa2\x6c\x09\xaa\xf5\x35\xa3\x37\x81\x95\x76\xfe\xaf\x86\x15\x6c\xc7\xb5\x01\xfa\x11\xfd\xdf\x87\xc1\x4a\x3b\x04\xcd\xc5\xaa\x8f\x7e\x43\x03\xf4\x1b\x41\xec\xfe\x91\xd1\x04\xc0\xb9\xc8\x10\x44\x6a\x7d\xf4\xf5\x9e\x07\x47\xbe\xad\x3e\x76\xa5\x49\x9f\x9b\x78\xbf\x4a\x90\x35\xee\x27\xa6\xb9\x28\xc2\x6a\x30\xc1\x38\x9c\xc5\x1c\xa5\x6f\x1b\xd6\x8c\xad\x4b\x61\xe4\xb2\xbf\xd5\xb6\xf8\x7e\x95\x97\x37\x1d\xbe\x8a\xf8\x62\xca\x65\xbe\x9a\x91\x7f\x7f\xab\x6d\x35\x19\x70\x4e\xc2\x9c\x5c\xf5\xf7\x35\x05\xb7\x0a\xed\x30\x7f\xe2\x64\x2f\xbf\xfb\x98\x38\xea\x54\x26\x26\x62\x77\x12\x0c\xc8\x64\x28\x99\xe1\xcd\xf9\x60\xc5\xcc\x39\x29\xb2\xd9\xd3\x79\x29\xcd\xc0\xce\x22\x5b\x3b\x2c\xa0\x1a\x7f\x6b\x17\xb3\x7f\x7e\x4c\x36\xba\xd8\x5e\xb0\x38\x43\x68\x07\xe3\xb0\x1f\x0c\xbe\xb0\xb8\x9a\x93\x24\x84\x25\x45\x68\x46\xcc\x37\xbc\xec\xed\xbc\x25\x22\x90\x45\x3c\x00\x33\x27\xf8\xaa\x58\xcb\x81\x85\x0b\x6d\x65\x9f\x00\x60\xc6\x3c\x62\xd5\xf7\x76\xde\xae\x6c\xc7\x34\x56\x39\x18\x50\xed\xbc\xb5\x18\xfc\x4c\x1d\xe6\x32\xcc\xcc\xb0\xc4\x64\xc6\x2d\x9a\xb2\x10\x54\x5c\x20\xa1\x8f\xb6\x7b\x66\x29\x94\x07\x2d\x24\x87\xf2\x50\xcb\xf3\x18\xe5\xef\xf1\x75\x96\xa7\x38\x98\x6c\xc6\x21\xeb\x9d\xc5\x3a\x32\x61\x66\xb1\x02\x9c\xc7\x1a\xb0\x09\xd9\x47\x78\x82\x21\xc8\x38\x18\x63\xd2\x79\x62\xb1\x32\xc1\x7f\x3e\xc6\x57\x39\x7d\x6d\x17\xdf\xf1\xc5\x5b\x16\x33\x15\x5a\x5f\xc9\xc6\xd1\x00\xd7\x38\x0a\xe2\xa6\x5e\xe0\x62\xb3\x9f\x54\x66\x6d\x0b\xff\x53\x66\xed\x0e\xa3\x0b\x86\xc3\xa3\x28\x5b\x78\x6c\xbf\x19\xdd\x9c\x14\x1d\xea\xe3\x41\x32\x61\x5e\xf7\x84\x20\xa2\x64\x96\x55\x23\x19\xd1\xc5\x4a\xe2\x78\x49\x6f\x6a\x73\xbb\xa0\xf9\x46\x98\x07\x36\x38\xef\x5d\x14\xc1\x5a\x2e\x5e\xab\x46\xe3\x72\x38\x66\xda\x7c\xf1\x19\x32\xbb\x5e\x58\x8f\x34\xa2\x34\xda\x40\xd1\x05\x9b\xc2\xba\x63\x25\x26\x17\x18\xed\xfd\x0c\xe7\xcf\x6c\xd6\xcf\xf0\x7f\xcf\x70\x9c\x97\x9c\x9e\x01\x5f\xe1\xc0\x30\xd7\x00\x5a\xc7\x47\x9b\x10\x73\x12\xc8\x1f\xa3\x72\x4c\x07\x1a\x0a\xd6\x04\x10\x0f\xa9\x5d\x59\x5d\x45\x6c\x46\x8a\x77\xd6\x6c\xb9\xe5\x51\x63\xa8\xe9\x79\x61\x21\x08\x91\x60\x44\xa3\x70\x8e\x36\xe8\x85\x61\xc1\xc5\x89\x9d\xb7\x65\x06\xd7\x7c\xd3\x59\x24\x4e\x5d\xa7\xf9\x28\x7c\x7c\xef\xc2\x07\xfa\xcf\x69\x8a\x33\x9c\x5e\x60\x2a\x86\x24\x33\x22\xca\x4b\xe2\x07\xa8\x31\x82\x3c\xea\x8f\x19\x07\x46\x5b\x29\x7a\x9b\x46\x41\x8c\xde\x51\xf7\x4c\x34\x8c\xc6\x18\xc7\x83\x95\x01\x80\xe0\x21\x9f\x21\x02\xb6\x46\x3f\x27\x47\x50\xe4\xbf\x82\x18\xed\xa6\xb3\xfe\x35\xfa\x7d\x44\xfe\x59\xb9\xc4\xfd\xff\x3c\x9f\x04\xd1\x78\x65\x90\x4c\xec\xf2\xce\xc9\x11\x6f\xae\x44\xec\x91\x0b\x55\x96\x7e\x9e\x14\xf9\x5e\xe2\x01\x39\x28\xd0\x94\x49\x4f\x9f\x3c\x21\x83\x0e\xa4\x27\xd2\x21\x81\x92\x88\x2a\x85\x96\x60\xd6\xe9\xaf\x3f\xd1\xea\x6a\x72\x81\xd3\xe1\x38\xb9\x24\x75\x60\xe3\xf3\x79\x3a\x50\x52\xcf\xef\x2c\xfd\x48\xca\xbe\x16\x9f\x1b\xf2\xe7\x75\xfd\x6b\x93\xed\x61\xac\x31\xc0\x13\x50\x21\x60\x45\xbb\xab\xab\x88\x37\x8b\xfa\x3e\x29\x02\x28\x43\xd3\xf5\xd7\xa2\x4a\xa3\xa8\x22\xca\x3c\x01\x04\x68\x21\x5a\xaa\xa9\x96\x62\xc5\x9e\x00\x2a\xac\xdc\x0d\xfc\x97\x10\xa4\x5c\x62\x79\xb9\xdf\x94\xbe\xc3\x7f\x78\x19\x5a\x64\x79\xb9\xdf\x78\xfd\xd4\x5d\x60\x79\xb9\xef\xb3\xef\xe4\xbf\xd0\x71\xde\x28\x3c\x2c\x6f\x40\xcf\xdf\xbc\x61\xf9\x20\xe5\xd7\x0d\xaa\x02\x54\xde\x32\x84\xcc\x96\x44\xb5\xfa\x55\xdd\x67\x5a\xbf\xa2\x28\xe3\x7a\xa4\x10\x79\x79\xa3\x53\x07\x5b\x1e\xb5\x01\xfd\x57\xa5\x11\xf6\x92\xde\x20\x71\x52\x2a\x5e\x2e\x31\x82\x91\xa6\x60\x75\x15\x91\x5d\x02\x6e\x62\x50\x24\x2d\x24\xba\x78\x8c\x95\xf6\x3c\x43\x00\x2f\x43\x49\x3c\xbe\xa6\xcb\x71\xeb\x97\x83\xa3\x2d\xf4\x3b\x7a\x83\xd6\x01\x26\x6f\xd0\xb7\x61\x41\xef\xe2\xd4\xce\xb2\x6f\xbc\xbf\x7c\x2d\x29\x67\x01\xb1\xae\x56\x1c\xaf\xff\x42\x99\x73\x51\x91\xd3\x28\xae\xc9\x30\x66\xab\x8c\x27\x8a\x66\xf9\x80\x19\xa8\x97\x49\x3c\xc8\x2d\xf5\x80\xd0\x60\x6f\xa4\x5c\x06\x42\xb7\x90\x83\xd0\x7c\x59\x88\x4b\x07\x84\xb0\x4d\x9a\xa7\xac\xe8\x89\x2e\x1a\xb1\xcf\x12\xae\xaa\xea\x79\x11\xa1\x08\x39\x04\x23\x74\x3b\xe1\x08\x2d\x28\x20\x21\x55\x9e\x33\x0f\x5d\x05\xdd\xcb\x67\x2f\xb1\x34\x5e\x6b\x92\x95\x28\x2e\x09\x58\x4e\x11\x4b\x2a\xbc\x80\xa4\xd5\x7a\x94\xb4\xbe\x77\x49\xcb\x21\x5f\x39\xd4\x3b\x27\x47\xe5\x72\xce\xa2\xea\x1d\x0b\x4b\xd7\x79\xf9\x23\x13\xff\xe7\x31\xf1\xd2\xd3\xec\x03\xb0\xec\xbd\x78\x90\x62\x88\xdc\xc0\x80\x6b\x20\x99\x1c\x52\x4c\xee\x4b\x44\x8d\x69\x1c\x5f\xe0\xb6\xfc\x2b\xaa\xff\xad\x36\x87\xaa\xbb\xc2\xfc\xf3\x36\x29\xb3\xc0\x2e\xd0\x7e\xdc\x05\xfe\x16\xbb\xc0\xf6\x18\x0f\xf2\x34\x89\xa3\x01\xea\x25\x21\xee\x27\xc9\x7c\x85\xff\x76\xaf\x4c\xe1\x4f\xbf\x2e\xb4\x23\x6c\xf7\x54\x85\x3f\x79\xbe\xaf\x1d\x40\x66\xed\x2a\x03\x51\xeb\x95\x69\x31\x09\x3e\xca\x42\x7a\x28\xfc\x42\x7c\x2b\xfc\x78\xea\xa5\xde\x7c\xbd\x19\x94\x59\x60\x1d\xff\xbd\x93\x23\xff\xcf\x59\xc7\x07\xb3\x7c\x3a\xcb\xab\x5f\xda\x1d\x94\x5e\xda\x1d\x2c\x7e\x69\xa7\x4b\x75\x07\xda\x25\xde\xc1\x5f\x7b\x1d\xf4\xe0\x52\x9d\xa9\x9b\x17\x6f\xee\x57\xb2\x2b\x69\xe8\x7b\x91\xee\xfe\x49\x27\xec\x03\xed\x5a\xd3\x25\x44\x1d\x54\xb8\xb4\x38\x58\xf0\xd2\xe2\x31\x8b\xdd\xdf\x83\xf9\x6e\x7e\x3c\xde\x43\xbf\xae\xbc\x6a\x34\xb9\x81\x38\xca\x72\xb2\xbc\xcf\xaf\x0d\xee\x3b\x0d\xc2\x95\xcd\x38\x8b\x7e\x25\xa5\x45\x2e\xb8\x69\x10\xca\xec\x2f\x0c\xf2\x40\xba\x08\x75\x5d\x80\x66\xea\x0d\x28\xa9\x75\x5c\x18\xfc\x2a\x06\xc0\xaf\xd5\xa2\x7d\x3d\xad\x48\xdf\x95\x50\x04\x88\x62\x16\xe7\xa2\x67\x5a\x30\x2b\xb0\xc5\x3b\xa4\xdf\x0c\x60\xf4\xc5\x4b\x15\xb3\x7f\x69\xdf\x8d\xd6\x68\x4c\x9b\x71\x90\xd1\xc8\x59\x68\x9a\x64\x91\xea\x81\x4f\x1a\x25\xdf\x49\xfd\xc3\x84\x77\x56\xb4\xb0\xac\x61\xf4\x12\xf9\x5a\x23\x87\x41\x58\x3c\xc3\x40\x89\x6c\x23\xea\x6b\xca\x4a\xe4\xb6\x8a\x90\x5a\x6a\x23\x45\x48\x2d\xb9\xb4\x2d\xb8\x96\x6a\x99\xbd\xac\x01\xe2\x76\x88\xdc\x02\x77\x16\x5b\x88\x43\xa7\x88\x77\x38\x97\x12\xce\x2b\x53\x45\x15\xf8\x62\x34\xcb\x67\x4e\xea\x73\x4d\x45\xf3\x25\x39\xfe\xb2\xbe\x17\x17\x41\x12\x0a\x6c\x5f\x31\x3c\x24\x34\x30\x8e\xde\x3e\x7d\x72\x63\xe5\x9b\x7c\xb9\x5c\xbd\x6a\x34\x17\xe2\x9d\x77\x4b\x4c\xf6\xc8\x3b\xbf\x15\xef\xdc\x3b\x3e\x40\x10\x12\xb7\x1a\xeb\xdc\x63\x01\x74\xef\xca\x3a\xff\x72\x76\x58\x2c\x89\x39\xfc\xd0\xc2\xaa\x68\x3a\x00\x7b\x04\xba\x95\x34\x88\xc3\x64\x52\x33\x38\xe0\xd2\xd2\x8a\x26\x29\x95\xc3\x61\xa9\xc3\x4e\x0d\x2e\xd7\x68\x9d\x79\x04\xdc\x23\xa3\xd2\x19\x15\x27\xce\x85\x18\xd5\xdf\x3b\xf3\xc2\xff\x28\x46\xb5\xba\xb7\xdd\x43\xaf\xd6\x5e\xad\xbd\xf4\x11\xa3\x0d\xb4\x8f\xf3\x51\x12\xa2\x86\x8b\x5b\x41\x68\xef\xdb\x72\xab\xcd\x30\xa4\xfe\x83\xea\x82\xa8\xc0\x05\xf8\xea\x25\xb5\xe9\x1f\x5f\xb4\x4a\x03\xff\x07\xa7\x09\xe4\x0e\xcb\x47\x18\xa5\x38\x93\xf8\xa2\xd2\x11\x52\x8e\xf5\x98\x3c\x1b\x78\xdf\x8a\x17\xb0\x85\xf8\x07\xc3\x41\x5d\x8d\xce\xe6\x01\x34\x85\x67\x5f\xd8\x49\x8c\xd1\x24\x49\x31\x15\x1e\x5f\xbe\x84\xbe\xb9\x46\x91\xaf\xf7\x97\x2f\x2b\x2e\x70\x98\xcf\x45\x16\xf8\xda\xdd\xa2\x9c\x3f\x2e\xf0\x6f\x76\x8a\x43\x71\x92\x4c\xab\x89\x21\x1f\x39\x39\x3a\x57\xb6\x20\x76\xf7\x9a\x28\x8a\x94\xd1\x9c\x68\x6a\x21\xa2\xbb\x5b\xb8\xd9\x47\xa2\xfb\x56\x44\xf7\x7f\x24\xe6\x57\x4e\x72\x12\x0f\xfc\x0b\x85\xdf\xca\x07\x67\xf9\x7c\x6b\x08\xc0\xb5\x5a\xb9\x08\xbc\x84\xbe\x7e\xd5\x5f\xdd\x6a\x8b\xb1\xf7\x78\x7e\x5c\x81\xd5\x55\xf4\x89\xc0\x57\xeb\x45\x46\xa4\x00\xd0\x2c\x88\x32\x97\xa3\x68\x8c\x51\xed\x87\x5a\xe1\x6b\x5d\xc4\xe0\x06\x8f\x43\x23\xe6\xb6\x30\xe1\x34\x14\x99\x91\xd8\x92\x90\xaa\xa2\xd4\x1d\xbb\x21\x1e\x6f\x95\xdd\x4b\xa2\xa0\x85\x78\xc9\xdf\xdb\x71\xcb\x92\xa3\x8b\x26\xc9\x7a\x58\xbe\x52\x64\x42\x82\xd6\xfe\xfa\x3c\x1f\x0f\x9b\x24\xbc\x5a\x4c\x6c\x23\xe6\xb5\xf8\x72\xbc\xbb\xe9\x17\xb1\x9e\xc9\x93\xf4\xd1\x4c\x04\x6e\x73\x10\x3d\x0c\xb2\x8c\x2c\xe4\x97\x04\xb5\x10\xbd\xc7\xd7\x68\x0b\xa7\xd1\x05\xcd\x09\xb9\xc3\x07\xa5\x51\x1e\x73\xfa\xf0\xed\xfb\xad\x9d\x46\xd1\x9a\x78\xae\x98\x78\xbc\x97\xc4\xc3\xe8\x7c\xc6\x32\x51\x26\x90\x15\x32\x2b\xcb\x2f\x99\x26\x53\x9c\xe6\xd7\xe8\x4f\x7a\x2c\x06\x6f\x52\x60\xbe\x27\x23\x9a\xe3\x38\x23\x0f\x51\xcc\xd2\x05\xe4\x89\xf0\xa5\x59\x41\x5b\x78\x18\xcc\xc6\x79\x17\xb5\x50\xcd\x6f\xac\x43\x22\xe5\x25\x17\x7c\x47\x42\x73\x9c\xf2\x44\xe6\x05\x38\x32\xfe\xf3\xd0\x8c\x72\x96\x3c\x33\x03\x50\xc5\xa1\x5e\xfa\x90\x27\x68\x8a\xd3\x61\x92\x4e\x24\xe0\x0a\x64\x29\xfd\xe3\x60\x78\xde\x75\x8d\x32\xa2\x17\x5f\xc7\x10\x73\xc6\x6f\xac\xaf\x36\x1b\x5a\x08\x6e\xda\x15\x8a\xba\xf6\xa9\x40\x48\x69\xfc\x66\xa9\x2c\x21\x69\x59\x02\x79\x32\x2b\x61\x41\x5a\x7c\xbd\xcd\xcf\x22\x7a\x00\x7c\xee\x86\x74\x55\xce\x18\x4a\xc6\x6f\x60\xa3\x1b\xee\x6f\x36\x4c\x52\x38\xc5\x14\x8d\xde\x43\x62\xd0\x2f\xe1\xd0\x48\x1a\x4f\xa9\x9d\x9f\x1e\x15\x33\xac\x45\x2a\xfe\x59\x4c\xd6\x3a\x4d\x3f\x79\x67\x30\x9e\x3a\x8d\xf5\x7a\x5d\x07\x5c\x92\xbd\x7e\x30\x3c\xb7\x1b\x5e\x90\x89\xd8\x10\x3f\x39\xe1\x91\xe2\xae\x60\x18\xe6\x7a\x87\xeb\x0a\xea\x41\x57\x95\x05\xdd\x26\xdf\xec\x94\xc1\x06\x6a\xe1\x0f\x2b\x15\x2b\x67\xc1\x38\x47\x9b\xf0\xcf\xe2\x89\x68\xb9\x1b\x8d\xe4\xd7\x7e\x17\xb2\xa3\x89\xd4\xc3\xe1\x0a\x8b\x4a\x52\xe3\x9d\xf1\x00\x3f\xe7\xa4\xb2\xe2\xf2\xbc\x6a\x35\x17\xca\xed\xa2\x4e\xbd\xd5\x80\x30\xca\x1d\x49\x61\x99\x97\x3d\xf8\xee\x33\x5a\x25\xe4\x43\x79\x90\x27\x66\xc7\x6e\x96\xe8\x4e\x50\x0e\xb2\x29\x1d\x6c\x9a\x6e\xde\xd0\xe7\xd8\x42\x3d\x81\x9c\xbc\x17\x87\xf8\xca\x56\xe3\xb4\x7e\xc5\x14\x40\x96\x68\x9d\x73\x42\x74\x09\x54\x84\xb0\x2c\xde\x38\xf3\xd7\x17\xd8\xf0\x4a\xc5\x1b\x67\x25\xbe\xe5\x6d\x90\x59\x59\x61\x4f\x36\x23\x8c\x62\x6b\xa1\x45\x8b\x17\x73\x8c\x2c\xd4\x8f\x4c\x50\xd7\x3a\xc8\xe3\x22\xbd\xe1\xf8\x58\x8d\x0b\x44\x27\x59\x9e\x63\x9e\x2c\x1b\x28\xb0\x48\xe3\x5b\xf4\x5a\x9f\x33\xc4\x32\x7a\x17\xa9\x81\xcd\xef\xf3\xb3\x31\x00\x7c\x65\x88\xad\xa3\x6b\x16\x17\x59\x8c\x8a\x57\xac\xe3\x0e\x44\xf6\xc4\x18\xdb\x41\x47\x72\x34\x3b\x06\xd6\x82\x85\x62\xcb\xe1\x53\x5b\x0e\x69\xfa\x9c\xc6\x1c\x08\xf8\xb9\xd2\x04\x8c\x9e\x18\x69\xf9\xa3\x6d\xac\xab\x8c\x37\x9a\x17\x0a\xca\xd6\x59\x3e\xfa\xf2\x3b\x7b\xc0\x2a\xa9\x89\x5f\x0f\x8e\xd4\xee\x80\xeb\x94\xc5\xe3\xda\x18\xb7\xdf\xa9\x0d\xcc\xef\xdc\x06\x46\x9a\xcd\xd7\xe8\xf7\x92\xd1\x23\x7f\x45\x8d\xd3\xdf\xc1\x1c\xc6\xe8\xc8\xe9\xef\xba\x59\x0c\xff\xbb\x31\x5f\xeb\x01\xa7\xc8\x9f\xc4\x1c\x98\x6e\x1a\x1a\xb5\x4d\x89\xc6\x24\x4e\xeb\x67\xcb\xcb\xe5\x26\x45\x12\x70\xe9\xe8\xcb\xf9\x86\x25\x88\x19\xdb\xcb\x8a\x7a\x65\x06\x94\xf2\x31\xe2\x4e\x1b\x7a\x95\x60\x33\xa5\x1b\xf9\x82\x9b\xf8\x7d\x89\x96\x51\x66\x4b\xb7\x3f\x3f\x7a\x8d\x45\x34\xb8\x87\x20\x36\x54\x44\x10\x92\x21\x15\x0a\x5d\x62\xc2\x62\xd5\x3c\xe4\x90\x4d\xef\x02\xa6\x54\x36\x2d\x82\xec\x88\xa3\xa4\x4b\x80\xf1\x90\x2e\xa8\xb2\x61\x57\xc5\x62\x52\x68\x8e\xf0\x74\x53\x66\x8b\x46\xa1\xd9\x03\xf5\xe8\x29\x74\x79\x4e\xd8\x9b\x33\x6f\xed\xef\xed\x43\xbf\x40\x5a\xf7\xf9\xc9\xd1\x1f\x56\x77\xe4\x4c\xaf\xed\xca\x7a\xfd\x4f\xd0\x2e\x1d\x83\x71\x66\x8f\x1b\xef\x52\x25\x92\xfc\xb2\x4c\x8f\x24\xf0\x38\xc2\xb3\x2c\xe8\x8f\x31\x0b\x07\x26\xa1\x73\x8c\xe4\x54\x8b\x14\x8a\xfe\xe6\x1d\x52\x33\xac\x49\xdb\xc2\x11\x64\x53\x46\xcc\xd0\x96\xd9\x18\x9b\x9a\x24\x51\x1e\x62\xac\x44\x19\x0a\x10\x4d\xc0\x8c\x2e\x70\x9a\x41\xd4\xb2\x51\x90\xa3\x18\x9f\x8f\xf1\x20\xc7\x21\x61\xc3\x03\x96\x52\x35\x67\x0a\x9f\x3c\x41\xe3\x28\xcf\xc7\xf8\x25\x0d\x70\xb9\xa2\x02\xc5\x69\x9a\xa4\x28\x4c\x70\x16\x3f\xcf\x51\x30\x1c\xe2\x01\xad\x4b\x91\x7a\x9e\xa1\x0c\x0f\x66\x69\x94\x5f\x7b\xa2\x62\x7f\x96\xa3\x28\x87\x4a\xbc\x46\x94\x67\x22\xa0\x42\x34\x8e\x72\xe6\xc4\x4d\xf3\xba\x46\x84\x3f\x4f\x70\x4c\xf7\x83\xcc\xa6\x28\xa3\x03\xf2\x81\x76\x4e\xa8\xcb\xb4\xb7\xf2\xfc\xdd\x36\x69\x5b\xf9\x21\xe5\xbd\x6c\x06\xed\x3c\x60\x14\xd6\xdb\x70\x6a\xb8\x28\x3b\x2d\x44\xec\x84\x46\x76\x2f\xec\x3c\xa7\xfd\x2a\xda\x25\xbf\x2c\x89\xe3\xde\x9f\xd6\xcf\x3c\x54\x7b\x7f\xda\x3c\x63\xc1\x02\xd0\x57\xf2\xc8\xae\x02\xfc\xce\x92\x25\x89\xdc\xfb\x53\x9f\x56\xaa\xab\x95\x9a\xe5\x95\x1a\xb4\x92\xaf\x56\xaa\x97\x57\x6a\xd2\x4a\x0d\xb5\x92\x2f\x2a\xa9\x75\x6c\xd9\x91\x8c\x21\xe3\x5e\x86\xae\x41\xeb\x89\x41\xeb\xd9\x07\xcd\xc4\x47\x1a\x2e\xd6\x27\x7a\x61\x32\x1c\xf2\xb4\x83\x14\x69\x1a\x64\xb5\x5e\x27\x5f\x6c\xfd\x35\x27\xa2\xa9\x42\xf6\xad\x90\x1b\x95\x20\xd7\x9d\x03\x2f\xc1\xd0\x20\x37\x2b\x41\xf6\x5d\xb3\xe3\x49\x30\x34\xc8\x75\x0d\xf2\xfc\x89\xec\x05\x69\x7a\x8d\xfa\x7a\x3a\x55\x3a\x55\x7d\x1a\xff\xc2\xd4\x64\xe4\x74\xf2\x09\xeb\xc9\xae\xb3\x1c\x4f\xd0\x30\x99\xa5\x28\x8f\x26\xfa\xdc\x2f\x18\x94\x37\xc6\x57\xf9\x31\x59\x7d\xee\xf8\xb1\x96\x88\xb7\xfb\x49\x18\x0d\xaf\x29\x27\xa4\x74\x58\x01\x8b\x75\x37\x16\xbd\x53\xea\x38\xf0\xeb\x29\xa4\xbc\x84\x68\x2b\x46\xa6\x38\x5b\x92\xdc\x9f\x51\x86\xf3\xd9\x54\xfd\x50\xe2\xd1\x31\xff\xb0\xbf\xf7\x33\x75\xed\x28\x3b\xe1\xef\xfd\xfc\xb9\x8e\x36\xd0\xde\xcf\x66\x6a\x34\xa9\x88\x4f\x8b\xf8\xd6\x68\xc6\xf2\x92\x86\xa9\xcc\x66\xfd\x0b\x4c\x44\x05\xd7\xd1\xbf\x4e\x83\x1f\x43\xdb\x34\xfa\xf1\x57\x44\x9f\x5c\xd1\x8f\xe5\xe2\x2c\xcc\xb1\x28\x5f\x5c\x87\xda\xc3\x1c\x8b\x66\x1b\xa2\x59\x5f\x69\xd6\x9f\xd7\xac\xaf\x36\xeb\x2f\xd6\x2c\x84\xd1\x89\xea\x7c\x09\x12\x20\x51\x43\x5d\x81\xae\xaa\x4d\xa8\xda\xe0\x8b\x19\xaa\xd6\xd5\x65\xea\x98\x11\x46\xd6\x65\xac\x15\x01\xb5\xd6\xe9\xb9\x5e\x8f\xed\x4f\x3f\xfa\xf4\xa3\x6f\xfd\xd8\xa0\x1f\x1b\xd6\x8f\x4d\xfa\xb1\x69\xfd\xd8\x2a\x6b\xb3\x5d\xd6\x66\xa7\xac\xcd\x35\xd1\x66\x89\x46\xaa\x12\xe7\x41\x8b\x73\x1f\x54\x8d\x03\x21\x53\x49\x21\xfb\x11\xdd\x4b\x72\x57\xa7\xf2\x5a\x92\x3e\x2a\x71\x66\xb5\x88\xbd\x77\xee\xed\x1d\x06\xb7\xf0\x32\x03\x2e\xa4\x96\x3e\xa6\xa1\x86\x7e\x05\x22\x44\xb5\x5f\xc9\xdc\xf3\x55\x02\xcf\x62\xef\x7d\xad\x57\xf4\x69\xc5\x06\xab\xb8\xa6\x55\x6c\x3b\x2b\x36\x68\xc5\x16\xab\xe8\x6b\x15\xd7\x9c\x15\x9b\xb4\x62\xe7\x4c\xa0\xa6\x54\xf4\x8b\x8a\x77\xda\xc5\xca\xa2\xd4\x53\x44\x78\xec\xf8\x63\x96\x92\x9d\x05\x8f\x87\xc7\xdb\x44\x8f\xe7\x70\x18\x83\x13\x70\x6c\xf1\xe3\xad\xf8\x5a\x9d\xf0\x90\x94\xa3\x57\x78\xd3\x1d\x97\x7b\xd1\xc9\xd4\x2f\xec\x78\x8a\x9b\xdb\xe2\x63\x74\x41\xbf\x74\x5a\xab\xcd\x86\xae\x96\x13\xcb\x44\x10\x6c\xad\xa2\x2b\x94\xb2\x3e\x94\x2f\x92\x08\xaa\x19\xfc\x1c\x07\x17\x18\x25\xe3\xd0\xc9\x6a\x17\x90\x1f\x7a\x9f\xe9\xe4\xf6\xf4\x78\x87\x4a\x8b\xbd\x60\x3c\x98\x8d\xc9\x0a\x8b\xf1\xa5\xb3\xd9\x1e\x4b\x04\xd3\xa3\x89\x60\xea\x57\xad\xb0\x09\xff\x87\x96\xb9\x84\xa6\xe7\x6b\xe9\xb1\xbc\x30\x3d\x9a\x17\xa6\x7e\xc5\x6a\x34\x21\xa6\x7c\x8f\x0b\xa8\xf5\x25\xf4\x06\xd5\x7a\x9f\xa5\xe7\xff\x40\x3e\xea\xa2\xfa\x92\x09\xb1\xc1\x20\x36\x28\x44\x06\xb0\xc5\x20\xfa\x1a\x44\xbf\x02\xc4\x26\x83\xd8\x34\xba\x55\xa3\xed\x28\x10\x1b\x15\x20\xb6\x18\xc4\x96\xb5\xd7\x4d\x0d\x62\xb3\x02\xc4\x36\x83\xd8\xb6\xf6\xba\xa5\x41\x6c\x55\x80\xd8\x61\x10\x3b\xd6\x5e\xb7\x35\x88\xed\x0a\x10\xd7\x18\xc4\x35\x6b\xaf\x3b\x1a\xc4\xce\x5c\x88\x85\xd8\x4f\x81\x2a\xd5\xd7\xf4\xea\xba\x77\x8c\xa0\x69\xb2\xfb\x9c\xbf\xbc\xc3\x22\x22\xa5\xce\xaf\x80\x57\x47\xa4\x6b\x3d\x4b\x12\x0e\x9e\x2e\x3f\x9d\x0d\x72\x34\x8a\xce\x47\x28\x88\x43\x34\x4e\x2e\x51\x90\x9e\xcf\x20\xfc\x0b\xb8\x39\xff\xf7\x2c\x48\x8d\xc4\x3d\xd0\x40\x80\x36\x48\x2b\x5c\x8a\xb3\x28\x0f\xce\xfb\xb4\x08\xdd\x25\xac\xc7\x27\xde\x67\x05\x83\x14\x67\xb3\x71\x8e\x92\x61\x59\xf3\x23\xba\x05\xd4\xce\x03\xf4\x02\x9d\x07\xd4\x75\xc5\x5f\x5b\x42\xcb\x88\xbe\xea\xb3\x57\x6d\x78\xd5\x87\x57\x36\x24\xc7\x14\x90\xd4\x15\x7a\x24\x7c\x81\xce\xaf\x60\x86\x97\x80\x20\x78\x01\x21\x76\x4a\x05\x6c\x89\x60\x48\x87\x7e\x3d\x38\x42\x10\x4e\x52\xfe\xf8\x8e\x72\xb8\xf3\x11\xfa\x0d\x9d\x8f\xab\x32\x39\xbb\x52\xe5\x57\xc6\xe2\xde\x51\x16\x57\xab\xbd\x2b\xb6\x6f\xb2\x93\xbd\x93\xc4\x82\x25\x56\xa0\xa3\x16\xe8\x14\x05\x74\x7a\xfe\x95\x71\xc3\x77\x94\x1b\xd6\x68\x33\xc5\x7e\xfb\x8e\xf3\x3f\xd8\x6f\x97\x11\x69\xcd\x84\xd1\x60\x30\x1a\x1c\x86\xaf\x22\xe0\x1b\x18\xd6\xd5\x02\xf5\x32\x0c\x9b\x0c\x7a\x93\x43\x6f\xa8\x18\x36\x34\x0c\x7d\x0b\x86\x2d\x06\xa3\xc5\x61\x34\x55\x04\x9a\x06\x86\x0d\xb5\x40\xa3\x0c\xc3\x36\x83\xde\xe6\xd0\x5b\x2a\x86\x2d\x0d\xc3\xa6\x05\xc3\x0e\x83\xd1\xe1\x30\xda\x2a\x02\x6d\x03\xc3\x96\x5a\xa0\x55\x86\xe1\x1a\x83\xbe\x76\xa6\x90\x88\xc0\xb0\xa3\x61\xd8\x56\x30\xac\x94\xf8\x23\xe3\x49\x27\x84\xae\xb5\x42\xda\x89\x79\xd7\x5d\x14\x56\x8e\xaf\x72\xf9\xde\x49\xd6\xa4\xf2\x50\x0a\x4a\x1a\x07\x7a\x5b\x64\xde\x5f\x4d\xc7\x01\xc1\xe6\x2a\x47\x4e\x70\x2c\xce\x4c\xad\x68\xd9\x06\x51\x5c\x5c\x95\x29\x75\xd5\xe4\x1d\x72\xc9\xa5\xb2\x3b\x28\xb9\x60\x65\x63\x64\x4f\xbd\x1b\xe9\xb6\x5b\x5e\x71\x29\xd2\x6d\x77\x3c\x76\x57\xd2\xed\xf8\x37\x67\xde\xda\xdf\x3b\x12\xe1\xe3\x7d\xd5\xe3\x7d\xd5\x83\xdd\x57\x69\x4b\xbc\xb8\xcf\xd1\x6f\x72\xfe\x5e\x77\x38\xf7\x95\x15\xee\xbd\x38\x9a\xbf\x57\x8f\xe6\xef\x6f\x7b\x34\x7f\xaf\x1e\xcd\xdf\x97\x1d\xcd\xe7\x29\x98\x1f\x6f\xaa\x1e\x6f\xaa\x1e\x6f\xaa\x94\x2f\x8f\x37\x55\x8f\x37\x55\x8f\x37\x55\x45\xb3\x8f\x37\x55\xfa\xc7\xc7\x9b\x2a\xc7\xe3\xe3\x4d\xd5\xe3\x4d\xd5\xe3\x4d\x15\xfc\x3d\xde\x54\x55\x53\xe2\x3e\xde\x54\x3d\xde\x54\x3d\xde\x54\x49\x7f\x8f\x37\x55\x8f\x37\x55\x8f\x37\x55\x8f\x37\x55\xff\x93\x6f\xaa\xee\xed\x8e\xea\x76\xb7\x53\x55\xee\xa5\x2a\xdc\x48\x3d\xd4\x5d\xd4\xdf\x3b\x1f\xca\xe3\x5d\xd4\x3f\xff\x2e\x4a\xbe\x3b\xea\xb5\xe6\x3a\x3a\xc9\x37\x47\xbd\x96\x74\x6d\x04\x0f\x0f\x7f\x67\x44\xbd\x34\xc5\xad\x91\x3d\xa8\x00\xf7\xd0\x2e\xbb\x56\x02\x37\x4e\xd9\xa3\x58\x8a\x99\x6e\xea\x2b\xe2\x28\x47\x59\x3f\xb9\x32\xe1\x1c\x0b\x74\x8e\xe5\x6b\x3a\xfe\x67\x93\x26\x1b\xed\x8e\xfb\x50\xce\x0e\xdd\xd1\x7c\x35\xee\x7b\x7c\x6d\xd3\xe3\xaa\x2d\x7a\xdc\x7f\x7c\x6e\xc3\x6c\x50\xc8\x10\xf0\xa8\x12\x11\xfa\x97\x3c\x4e\x0e\xd5\x21\xab\x44\xb6\x36\x3e\xf6\xa7\x0a\x20\x33\x12\x9a\xf2\xd9\x08\x8a\x66\x3b\xfb\x93\x5e\xd4\x7e\x47\xcb\x74\x7c\x96\x79\xa3\x4b\xe8\x5f\xd0\x2b\x47\x2c\x85\xcb\x60\x6a\xc7\x19\xf6\x0d\x53\x43\x20\x4d\xc0\xb1\xdd\x31\x9e\xbc\x26\x33\x3e\x7f\x7a\x7a\x56\x15\x3f\xcb\xaa\x21\x88\xe6\x77\x96\x65\x56\x00\xba\xb3\x5a\x8e\x6b\x42\x40\x0b\x62\xe4\x5f\x27\xd3\x63\x57\x19\x2a\x2d\x0b\x27\xe7\x46\xbb\xe3\x50\x88\xd4\x9d\xca\x10\x6b\xa3\x55\x15\x23\xd2\x7a\xd2\x14\x23\xc5\xa0\x45\xda\x97\xdf\x8b\xe1\x9c\x9b\x01\x1e\x94\x83\x6a\xf5\x2f\x32\x9e\xda\x7c\x88\xd5\x14\xd3\x65\x14\x53\x95\x5a\x6c\x59\x44\x11\x68\xd0\x69\xc2\x38\x46\x95\xca\x77\x85\x84\x1d\x84\x6b\x25\xda\x12\x82\x75\x13\x6b\x41\xa8\xea\x7b\xb5\xb3\x5f\x49\xdd\x1a\x5b\x53\xa4\x0a\xc3\xeb\xac\xc8\x6b\x10\xeb\x79\x0c\xb4\xe3\xd3\x27\x88\x83\x62\xb9\xd1\x2a\x48\x3d\x32\xce\xee\x64\x2c\x94\xb9\x62\x62\x99\x82\xdd\xf7\x2a\xf7\xf6\x5a\xf7\x21\xf4\xf6\x5a\x0b\x4b\xbc\xe6\x1e\xab\x89\xbb\xbd\x96\x35\xb6\x05\xdc\xd0\x44\x38\xbc\xc5\x0e\xbf\x95\x26\x53\x65\x97\x67\x2f\x60\x10\xbe\x41\x54\xbc\x90\x34\xa7\x06\x9a\xd3\xf4\xfc\x64\xe2\x49\x29\x11\x6a\x0e\xf9\xaf\x1a\x32\x58\x3d\xd6\x1c\x41\x5d\x8a\xfa\xa5\xad\x62\x02\xaa\xab\x82\x50\x23\xc6\x55\x12\x62\x48\x1b\xbc\x60\xf9\x1d\x06\x19\xcf\x92\x0d\x5c\x18\xbe\x10\xbc\xc8\x2e\xfe\x13\x6c\xe6\x2f\x5f\x5a\xf7\xf0\x05\xd8\x3d\x9a\x93\x00\xe9\x3b\x5a\x6d\x64\x88\xee\x67\xc5\x01\xa4\xc5\x57\x1d\xa3\xf9\xf2\x95\x47\x0a\x95\x9f\x34\x7b\xad\x87\x3a\x66\xde\x2d\x5d\xdf\xb7\x3c\x5f\x3e\xd8\x29\xf0\xdb\x06\x71\x26\xac\x0a\x67\x38\xbd\xc0\x4f\x9f\xd4\x06\x4b\xa8\x51\xf7\x1b\xa8\x7f\x8d\x7a\xff\xdf\xff\x1b\xa6\xd1\x00\xed\xe3\x2c\x8e\xc6\x2b\x68\x73\x3c\x46\x69\x74\x3e\xca\x33\xc4\xca\x87\x2b\x4f\x9f\x3e\x39\xc2\x61\x94\xe5\x69\xd4\x9f\x01\xfc\x20\x0e\x21\x28\x4f\x14\xa3\x2c\x99\xa5\x03\x0c\x6f\xfa\x51\x1c\xa4\xd7\x84\x1d\x4c\x32\x8f\x45\x69\x48\xe1\xdf\x64\x96\xa3\x09\xf0\xf4\x01\x70\x56\x0f\x05\x29\x46\x53\x9c\x4e\xa2\x3c\xc7\x21\x9a\xa6\xc9\x45\x14\xe2\x90\x06\x9d\x20\xeb\x74\x98\x8c\xc7\xc9\x65\x14\x9f\xa3\x41\x12\x87\x11\x5d\xc3\xa4\xd2\x04\xe7\x5d\xb6\xe2\x5f\x22\x15\xad\x0c\x14\xc3\x14\x9f\x41\x12\x62\x34\x99\x65\x39\xd9\xa8\x83\x28\x06\xa0\x41\x3f\xb9\x20\x9f\xa6\xd7\xd0\x45\x14\x27\x79\x34\xc0\x1e\x8d\x2b\x34\x8e\x32\xd0\x2c\xcb\xed\xc5\xa1\x86\x4c\x18\x65\x83\x71\x10\x4d\x70\xba\xe2\xc2\x21\x8a\xe5\x81\xe0\x38\x4c\xd3\x24\x9c\x0d\xf0\xbd\xa3\x81\x58\xd7\xc2\x64\x30\x13\x71\x30\x48\x8d\xd5\x24\x65\x31\x32\x26\x41\x8e\xd3\x28\x18\x67\xc5\x30\xc3\xdc\x40\x35\x09\x75\x32\xcf\x27\xbb\x7b\xc7\xe8\xf8\x60\xe7\xe4\x97\xcd\xa3\x6d\xb4\x77\x8c\x0e\x8f\x0e\x7e\xde\xdb\xda\xde\x42\x6f\xff\x8d\x4e\x76\xb7\x51\xef\xe0\xf0\xdf\x47\x7b\xef\x76\x4f\xd0\xee\xc1\x87\xad\xed\xa3\x63\xb4\xf9\x71\x0b\xf5\x0e\x3e\x9e\x1c\xed\xbd\xfd\x74\x72\x70\x74\x8c\x9e\x6d\x1e\xa3\xbd\xe3\x67\xf0\x61\xf3\xe3\xbf\xd1\xf6\xaf\x87\x47\xdb\xc7\xc7\xe8\xe0\x08\xed\xed\x1f\x7e\xd8\xdb\xde\x42\xbf\x6c\x1e\x1d\x6d\x7e\x3c\xd9\xdb\x3e\xf6\xd0\xde\xc7\xde\x87\x4f\x5b\x7b\x1f\xdf\x79\xe8\xed\xa7\x13\xf4\xf1\xe0\x04\x7d\xd8\xdb\xdf\x3b\xd9\xde\x42\x27\x07\x1e\x34\x6a\x56\x43\x07\x3b\x68\x7f\xfb\xa8\xb7\xbb\xf9\xf1\x64\xf3\xed\xde\x87\xbd\x93\x7f\x43\x7b\x3b\x7b\x27\x1f\x49\x5b\x3b\x07\x47\x68\x13\x1d\x6e\x1e\x9d\xec\xf5\x3e\x7d\xd8\x3c\x42\x87\x9f\x8e\x0e\x0f\x8e\xb7\x11\xe9\xd6\xd6\xde\x71\xef\xc3\xe6\xde\xfe\xf6\xd6\x0a\xda\xfb\x88\x3e\x1e\xa0\xed\x9f\xb7\x3f\x9e\xa0\xe3\xdd\xcd\x0f\x1f\xac\xbd\x24\xb8\x2b\x7d\x7c\xbb\x8d\x3e\xec\x6d\xbe\xfd\xb0\x4d\x5b\xfa\xf8\x6f\xb4\xb5\x77\xb4\xdd\x3b\x21\xdd\x29\x7e\xf5\xf6\xb6\xb6\x3f\x9e\x6c\x7e\xf0\xd0\xf1\xe1\x76\x6f\x8f\xfc\xd8\xfe\x75\x7b\xff\xf0\xc3\xe6\xd1\xbf\x3d\x06\xf3\x78\xfb\x7f\x7f\xda\xfe\x78\xb2\xb7\xf9\x01\x6d\x6d\xee\x6f\xbe\xdb\x3e\x46\xb5\x39\x43\x72\x78\x74\xd0\xfb\x74\xb4\xbd\x4f\x70\x3e\xd8\x41\xc7\x9f\xde\x1e\x9f\xec\x9d\x7c\x3a\xd9\x46\xef\x0e\x0e\xb6\x60\xa0\x8f\xb7\x8f\x7e\xde\xeb\x6d\x1f\xbf\x46\x1f\x0e\x8e\x61\xb4\x3e\x1d\x6f\x7b\x68\x6b\xf3\x64\x13\x1a\x3e\x3c\x3a\xd8\xd9\x3b\x39\x7e\x4d\x7e\xbf\xfd\x74\xbc\x07\x83\xb6\xf7\xf1\x64\xfb\xe8\xe8\xd3\xe1\xc9\xde\xc1\xc7\x25\xb4\x7b\xf0\xcb\xf6\xcf\xdb\x47\xa8\xb7\xf9\xe9\x78\x7b\x0b\x46\xf7\xe0\x23\x74\xf5\x64\x77\xfb\xe0\xe8\xdf\x04\x28\x19\x03\x18\x7c\x0f\xfd\xb2\xbb\x7d\xb2\xbb\x7d\x44\x06\x14\x46\x6a\x93\x0c\xc1\xf1\xc9\xd1\x5e\xef\x44\x2e\x76\x70\x84\x4e\x0e\x8e\x4e\xa4\x3e\xa2\x8f\xdb\xef\x3e\xec\xbd\xdb\xfe\xd8\xdb\x26\x5f\x0f\x08\x94\x5f\xf6\x8e\xb7\x97\xd0\xe6\xd1\xde\x31\x29\xb0\x47\x9b\xfd\x65\xf3\xdf\xe8\xe0\x13\x74\x99\xcc\xd1\xa7\xe3\x6d\xfa\x53\xa2\x58\x0f\x66\x12\xed\xed\xa0\xcd\xad\x9f\xf7\x08\xda\xac\xf0\xe1\xc1\xf1\xf1\x1e\xa3\x13\x18\xb2\xde\x2e\x1b\xee\x95\xa7\x4f\x5e\xac\xaa\x3a\xaf\xfd\x20\x1f\xdd\xaf\xde\xab\x5a\xd4\x69\x1a\xf8\x58\x14\xa1\x8f\x95\xac\xb3\xe1\xc2\x2e\x88\xf3\x0c\xe5\x41\x9f\x4b\x2c\xa4\xca\xe7\x3f\xc6\xd6\x60\x9b\x85\x1c\x55\xf7\x10\xf2\x3d\x84\x1a\x1e\x42\x4d\x0f\xa1\x96\x87\x50\xdb\x43\xa8\xe3\x21\xb4\xe6\x21\xb4\xee\x21\xf4\xca\x43\x7e\xdd\x43\xbe\xef\x21\xbf\xe1\x21\xbf\xe9\x21\xbf\xe5\x21\xbf\x2d\x59\x58\xae\xd1\xba\xe4\x1b\x81\x47\xca\x13\x18\x7e\x9b\xc2\x25\xf5\xa0\xad\x57\x0c\x7e\x83\xc1\xf0\xa1\x8d\x02\x4e\x93\xb5\xd5\x62\xb8\xbc\x62\x30\xd6\x25\x3c\xd7\x18\xac\x0e\xc3\xc5\xa7\x30\x7d\x39\xd6\xb2\xcf\xea\x72\x5c\xea\x14\x06\xe0\xc1\xf1\x6c\x52\x58\x04\xbe\x2f\xf7\x5b\x86\xd3\x62\x75\xdb\x0c\xf7\x35\x06\xa3\x21\xe1\xe9\x33\x58\xeb\x0c\x17\xd6\x6f\xbf\x79\xb6\xf4\x5a\x9e\x8b\x74\xce\x5c\x70\x3c\xd6\xa4\xb1\x6a\x30\x98\x1c\xe7\x8e\x3a\x1e\xd0\xb7\xa6\xd6\xf7\x0e\xab\xd3\x2c\x60\x41\xdd\x76\x81\x33\x87\xc1\xc7\x03\xda\xf2\xb5\xbe\x43\xa1\xb6\xd4\xc1\x35\x86\x60\xa7\x18\x5c\x01\xa4\x21\x0d\x34\x45\xb6\x00\xb4\xce\xea\x48\x83\x05\x13\xd3\x2e\x06\x57\xc0\x68\x4a\x03\x4d\x91\x95\x10\x6a\xb0\x91\xad\x4b\xc0\xf8\x68\xac\x89\xd9\x13\x14\x8a\xd8\xe8\x50\x64\xd5\xd9\xc8\xe6\xad\x0c\x8a\x22\x1b\x2b\x40\x4f\x6e\x89\xd3\x56\x53\x1a\xcf\x4e\xf1\x4d\xa1\xe9\x35\x0f\x3e\xc1\x50\x71\x7a\x7d\x55\xd0\x1e\xa7\x29\xbf\x2d\x0d\xeb\x1a\x2b\xab\xcc\x87\x5f\x10\x81\x98\x8b\x57\xac\x20\x27\x9e\x75\xa9\x0c\x47\x7c\x0d\x7e\xcb\x67\x29\xb1\x96\x5b\x45\x55\xde\xbe\x58\xf3\xf2\x9a\x58\x57\x40\x16\xa0\xf8\xfa\x6c\x17\xb4\x2f\xfa\xd9\x28\x50\x10\xe3\xc4\x48\x86\xc2\x45\xda\x94\xcc\x5b\x20\x0c\x31\x65\xf0\xdb\x05\x02\xd0\xcf\xb5\x62\x21\x42\x83\x2d\x86\x48\x47\x43\xba\xa9\x0e\xbe\xe8\xb4\x5f\xc0\x11\x63\x27\x16\x34\x7c\x57\xe0\x08\x06\xe2\x4b\x83\xd4\x29\xda\x15\x0b\x8f\x2d\x60\xbf\x69\x99\x0f\xd1\x01\x0d\x71\x0e\x48\x2c\xb8\x86\xf4\x6f\x5b\xac\x62\x75\x80\xda\x96\x72\x2d\x75\x66\xc4\x4c\x16\x9d\x42\xbe\x8f\xce\x94\x2c\xd9\x9f\x47\x64\x85\x58\xe6\x03\x89\x50\xcd\x75\x0f\xd5\xaf\xda\x9b\xeb\x8d\xb5\x57\xaf\x5e\x91\xdf\x9d\xed\xad\x57\xdb\x6f\x37\x7d\xf2\x7b\x7d\xc7\x7f\xfb\xb6\xb7\xd5\x23\xbf\x37\x5f\xb5\x9b\x3b\x5b\xad\x6d\x75\xbe\x47\xa9\xb3\x81\x76\x7d\xb3\xb1\xfe\x76\xbb\x03\x0d\xf4\x5a\x5b\x5b\x7e\xa3\x05\x0d\x6c\xad\xd5\x9b\xdb\x3b\x4d\xf2\x7b\x6d\xb3\xb3\xb5\xd6\xd9\x86\x86\x39\x42\x67\x56\x7d\xc0\xd1\xde\xe1\xf6\xfe\x96\xdf\xa9\x43\xf8\xfd\x39\x3a\x24\x51\xb6\xd0\x22\x49\xaf\xe8\xae\x7c\xdb\xbb\x22\xaa\x4c\x04\x24\x1c\x41\xb0\x3b\x6b\xad\x76\xa3\x59\x87\x11\xdc\xde\xe9\x6d\x6d\xbe\x5d\x87\x0e\xbe\x5a\x7f\xbb\xb9\xd5\xdb\xd9\x26\xbf\xfd\x7a\xb3\xd1\x6e\xad\xc1\xe0\xf4\x9a\x5b\x8d\x6d\x7f\xa7\x7e\xe6\x54\x8d\x57\x55\xca\x5b\x15\xbb\x95\xbd\x94\xfc\x92\x9b\x9a\xf9\xe6\xf8\x14\x0b\xd0\xbd\x16\x66\x91\x8e\xeb\x9b\xfd\xcf\x52\x69\x7e\x79\xf0\xd9\x34\x64\x42\x65\x77\x2a\x52\x3d\xb4\x81\x6a\x66\x01\x44\x0d\x40\xa5\xc6\x0a\xc3\x07\xe9\xe5\x62\x46\xa5\x06\x40\x66\x57\xaa\x01\x34\xad\x4b\x4d\x70\x25\xaa\x31\x34\xcf\xd6\x79\x17\x89\xfb\x07\x42\x8a\xce\x2b\x47\x60\x00\x9f\x47\x63\x77\x81\x14\x0a\xa4\xce\x02\x20\x7e\x7e\xfe\xc3\x0d\x01\x64\xa2\xcf\x7f\xb8\x21\xc0\x36\xfd\x39\x73\x43\x80\x4d\xe3\x73\x96\xda\x23\x5a\xaf\xae\x92\x55\xf6\x85\x1c\x9a\x2f\x82\x34\x22\xd2\xb1\xe5\x92\x36\x18\x7b\xa8\x3f\xf6\xd0\x60\xec\xa1\x70\xec\x21\x3c\xb6\x34\x14\xa4\x1e\xea\xa7\x1e\x1a\xa4\x1e\x0a\x53\x0f\xe1\x54\x6f\x2c\x20\xa8\x04\x04\xe1\x5d\xd3\x65\xa4\x9f\x42\xd0\x71\xf8\xe8\xeb\x1f\x07\xe4\xe3\x80\x7e\x6c\xe8\x1f\x43\xf2\x31\xa4\x1f\x9b\xfa\x47\x38\x30\x60\xfa\xb1\xa5\x7f\x14\x69\xaa\x03\x35\x2f\x35\xef\x92\x7e\x2b\x68\x35\x25\x84\x7f\x97\x37\x90\x6f\x5d\xdb\x39\x59\x3e\xc1\x18\x2d\x17\x6b\x6a\xf9\x8f\xf1\x69\x74\x76\xb6\xf4\xd5\xe6\xc4\x00\x5e\x3b\x6f\xfc\xce\xd2\x9f\x4f\x9f\xa8\xac\x91\xb4\x81\x86\x7e\xad\x3f\xf6\x06\x63\x2f\x1c\x2f\xa1\x65\x34\x1a\xdb\x7d\x6f\x6e\x90\x50\xc8\x45\x6f\x9a\x0d\xaa\x6a\xb3\x40\x6b\xe8\xd0\x8c\x91\x37\xa0\xb5\xd6\x9d\xd0\x9a\x3a\x34\x63\xaa\x0c\x68\x9d\x96\x13\x5a\x4b\x87\x66\xcc\xad\x04\xed\xcf\xd5\x55\x06\x71\xbd\xee\x84\xd8\xd6\x21\x1a\x04\x81\xec\x61\xd2\xc9\x24\xe6\xd6\xe9\x22\x5f\x50\x9a\xe4\xe3\x5a\xee\x65\x64\x5a\x6d\x4e\x1b\x40\x03\xf9\x32\x1e\xdb\xa7\x1c\x56\x84\xb1\xa4\xc8\x1f\xd0\x6d\x68\xfb\x02\xe4\x0e\xed\x92\x35\xe9\x5b\xdd\x80\x60\xbd\xf4\x6d\xb5\x61\x99\x19\x37\x89\x02\xd5\x20\x45\xcb\x12\xb5\xa6\xb7\xa7\xd6\x76\xad\x9f\x7a\x83\xd4\x0b\x53\x18\xf1\xf4\x6e\xd4\xda\xd2\xa1\xdd\x95\x5a\x55\x68\x77\xa2\xd6\x86\x0e\xed\xce\xd4\xea\xeb\x10\xef\x99\x5a\x53\xb8\xb5\x2e\x21\xd7\xd4\x41\xae\xc0\x51\x53\x1b\xb9\x02\x23\xb6\x7d\x01\x16\x4d\xc9\x35\x75\x92\x2b\x6c\x00\xb6\xda\xb0\x35\x98\x16\x1a\x3a\x2b\xdf\x93\xd3\x31\x80\x0c\x09\x56\xbf\x9a\x84\x49\xfe\xb3\x81\x6a\xbb\xd4\x34\x77\x40\x38\x73\x68\xe9\xe9\x2e\x33\xe1\xdd\xa5\xe6\xb7\x21\x29\x67\x1b\x91\x5d\x66\xa6\xbb\x4b\x0d\x69\x31\x29\x17\x58\xcb\x35\x59\x39\x30\x96\x85\x1d\xa1\x6f\x2d\xd7\x62\xe5\xc0\x30\xb9\x4f\xca\x0d\xac\xe5\xc0\x80\x59\x19\x16\x5d\xac\xdd\x61\xa9\x35\xee\x60\x9e\x15\x06\x79\x20\x84\x21\xf2\x60\xd9\xf8\xe7\xa7\x61\xe4\x25\xe3\xb7\x51\x9e\x9d\x24\x39\x70\x3c\x0a\x33\xde\x0a\xf2\x80\x5a\x6d\xbd\x40\xeb\x16\xe8\x50\xe7\x03\x1e\xe6\x46\xd2\x46\x28\x6f\x74\x66\x33\x0c\xcd\x2c\xc4\x88\xe5\x5b\xa4\xc6\x4c\x05\x48\x22\x4d\xb6\xcf\xd0\xd7\x0d\x9a\x58\xb8\xb0\x91\x10\x25\xfe\x85\x9a\x0d\x9d\x5a\x0b\x48\xb5\x5a\xad\x28\xba\x8c\x08\x7f\x20\x20\x5f\x2d\x11\x50\x2d\xb2\x6e\xfd\x96\x43\x80\xe6\x55\xe9\x70\x14\xc2\xb3\xf4\xb2\xba\xf0\x6c\x00\x63\x82\xb3\x06\x6c\x9e\xe0\x6c\xeb\xa8\x9c\xa7\xa3\xc8\x87\xc9\x73\xec\x80\x71\x8c\x25\x6d\xc7\xea\x2a\x9c\x04\x11\x64\x77\xa1\x0e\x59\x56\xc3\xa9\x29\x3d\x79\x99\xd9\x5c\xca\xc9\x12\x56\xb7\x2c\xa3\x5b\x08\x67\x17\x6d\x20\x59\x7c\xbf\xdb\xf9\xad\x5d\xe9\xf8\x66\x3f\x91\xed\xc2\x51\x6c\xd7\xe2\x4c\x82\xca\xce\x60\xbb\xc2\x5d\x6f\x57\x39\x5e\xed\x2e\x7c\xae\xa2\x14\xb2\xab\x9c\xa9\x76\x9d\x87\xa9\xf9\xa6\x70\x47\xf4\x26\x9c\x4e\x2e\xcb\x60\x11\xc2\x60\xab\x45\xd9\x8d\xb9\x36\x41\x0a\x9b\x1a\x8c\x93\xb8\x9c\x41\xfd\xff\xec\x7d\xff\x72\xdc\x36\xd2\xe0\xdf\xf1\x53\x60\xb7\x6a\xed\x51\x34\x96\x08\xf0\x17\x68\x5b\xb9\x4b\x14\xfb\x73\x2e\x76\xec\xb2\xb5\x67\x7f\xe5\xb2\xb3\x20\x09\x6a\x18\x8f\x66\xf4\xcd\x50\xd6\x68\x37\x4e\xdd\x6b\xdc\xeb\xdd\x93\x5c\xa1\x01\x92\x20\x09\x80\x33\xb2\x9c\x6f\xb3\x6b\x6d\xad\x33\x33\x6c\x74\x37\xfa\x17\x9a\xf8\xd1\x80\xad\x04\x02\xaa\x9d\x5d\x80\xaf\xf6\x6d\x10\xf2\xf1\xcf\x03\x23\x91\xed\x86\xb6\xa6\xd8\x84\xa7\x9d\x7d\x51\xf0\xf1\xa3\x5c\xfd\x47\xfa\x8e\xb8\x02\x4f\x36\x53\x74\x35\x45\x7f\x37\x5d\xf3\x31\x99\x6c\xe0\x64\xe7\x15\xfc\xfb\xf7\xf6\xb6\xf6\x8f\x03\x3c\xc4\x8d\x67\xb2\xd9\xbb\x3d\xb9\xda\x93\xc7\xc9\x7f\x13\x5f\xfe\xbe\xb7\xb7\x77\xdf\x86\xcd\x1f\xc5\x26\x10\xfd\x26\x30\xb6\xac\x59\x70\x05\xe3\xb8\x6e\x03\x06\xe0\xed\x6a\xef\xf6\xe4\x37\x60\xce\x8e\x31\xdc\x46\x66\x42\x68\xbf\xb6\xa8\x2c\xb8\x20\x95\xd8\x4c\x17\x46\x4c\x9b\x07\x0f\x16\xc0\xd5\xe6\x9b\x6f\xbe\x99\xf8\xe4\xee\x42\x67\x4a\x7e\x70\xee\x86\xa9\x37\xc3\xc8\x7b\xe0\xb6\xdb\x0c\x63\xbd\xed\x47\xed\x6f\x81\x3d\x4f\xf5\xe7\x6a\x29\x23\xd3\x10\x8d\xe5\x7e\x1e\x0b\xf4\x75\x2f\xe6\x51\x9e\xd1\xee\x64\xa9\x27\xf0\x26\x77\x14\x8b\x77\x0c\xbb\x70\xec\xad\x2e\x6b\x6e\x4d\xdb\x6d\x86\x93\x83\xbd\xad\x36\x35\xc0\x76\x5b\x95\x6a\xe5\x3c\x7e\xfa\xed\xf1\xef\xa0\x1a\x47\xf3\xf7\xfc\x0a\x9a\xae\x79\xb6\xe2\x95\xe5\xee\x24\x8b\x42\xe1\xca\xc1\x1b\x54\xa8\xbc\xc8\xb0\x51\xcd\xe3\x33\x96\xb5\xea\xd1\xb7\x58\x19\x34\xd4\x01\x1e\x6a\xe9\x8c\x65\x06\x4d\x7d\xf5\x51\xae\x03\x5b\xb6\x46\xd5\x90\xe6\xdb\x89\x3e\xbe\x9d\xc6\xf1\x97\x2d\x4e\xff\x0a\x47\x56\x3e\xf7\xd2\x7d\xaf\xb0\x9a\x46\xd8\x5a\x32\xed\xe5\xe3\x6f\xef\xe2\x2d\x56\x32\x86\x77\x55\xdf\xe4\xfa\xc5\x11\x9c\x3e\x6d\x97\x30\xca\x45\x59\x4d\x0c\x05\xa8\xba\x4b\x1a\xbc\xc8\x72\x96\xd2\xc4\x50\x9b\xc9\xdb\x24\x34\x65\x79\x56\xf0\xce\x1a\x87\x09\x30\xf3\x73\xc2\x71\xe1\x75\x9f\x7d\xfa\x12\x88\x2d\x43\x37\x27\xdf\xc3\x19\xf4\x01\x82\x6d\xe6\x9e\xcd\xd3\xc5\xe2\x51\x6a\x9e\x2c\x86\x84\xd1\x3c\x55\x0c\xaf\xab\xe6\x89\x62\xf1\x88\x37\xd3\xc4\x03\x4e\xad\xf3\xc4\xd6\x39\x61\xcb\xdb\x02\xcc\xfb\x20\x79\xc2\xd4\x52\x0b\xe6\x95\x4c\xfc\xbb\x25\x30\xba\x67\x4f\xeb\xbf\x7a\x42\xc9\x8c\xa8\x3e\xe7\xf0\xea\x4d\x89\xee\x22\xff\x2d\x7a\xa7\x3e\xd2\xf6\x23\x0e\xb4\xcf\x91\xed\xee\x48\xc5\xd2\x64\x01\x87\x63\xe5\xbb\x25\xbc\x3e\xf8\xd8\x5c\xa6\xc6\xfc\x26\x04\x53\x4b\x13\x26\x90\x84\x80\x84\xc9\x37\x99\x18\x0e\xc8\x72\xb4\x0f\x84\x6c\x13\x8d\xe8\x01\x22\x9e\x55\x6a\x30\x6d\x36\x99\xa4\xe8\x36\xca\x64\x9e\x2b\x3e\xe6\x80\xd9\xdb\x84\x4c\xae\xc2\x8e\x4c\xf1\xa1\x07\x28\x18\x23\x91\xa2\x77\x28\x43\xef\x50\x2e\x31\x47\x3c\x4f\x78\xca\x4c\x45\x87\x7a\x98\xa3\x1d\x98\x97\xbc\x8b\x4f\x99\xea\xc5\x5d\xe4\x6d\x62\x8f\x07\x81\x4f\x02\x3b\xad\xc3\xaf\x1b\x72\xd4\xdb\x43\x5f\x1f\x6e\xdd\x17\x81\xdf\x0f\x93\xdc\xe7\xa4\x3f\xcb\x83\x2c\x2a\x15\xfe\x92\x9b\xa6\xfb\xd0\x11\xca\x4c\x53\x7c\x08\x48\x3e\x78\x80\x7c\x4f\xf5\x12\xd4\x6f\xbc\x5b\x14\x1d\x21\x13\x1f\x6c\xbb\xd3\x5a\x5b\x4d\x06\xaa\x49\xb4\x7a\xb2\x8d\xf5\x4f\x78\xa3\xce\x44\x20\x4c\x18\x0e\x2a\x9f\xa0\xce\x24\x20\x4c\x16\x66\x66\x18\x5f\x9f\x28\xcc\xcd\x30\x81\x3e\x49\xc8\xfb\x30\x5f\x26\xf8\xfe\x59\x27\xf8\x44\x2e\x7c\x50\xcc\x97\xcb\x95\x3e\xe7\x76\x08\x03\xb5\xfa\xfb\x24\x22\x50\x0b\xa1\xc5\x3c\x32\x4f\x37\x98\xa6\xfb\x4c\x33\x74\x3b\xce\x03\x19\xa7\xeb\xfe\x88\xb3\x41\x5f\xa6\x10\x06\x93\x01\x22\x7d\xde\x69\xf6\x00\x1a\xb8\x26\x0e\xba\x09\x79\x77\xce\x40\x3c\xfb\x32\x5d\x70\xa3\xd3\x05\xa0\x8f\x2d\x66\x0a\xcc\x6a\x69\x27\x09\x94\x6a\xec\xc7\xa6\x04\x80\x7d\x5a\x80\xfe\xa1\x0b\x6c\xac\x67\x8c\x84\xd1\xe7\xae\x8d\xa1\xa8\xfc\xfb\x4c\x1f\x0c\xa6\x07\xf4\x77\x78\x12\x46\x9d\xb7\x78\xed\x14\x76\x7f\x56\x80\x90\x60\xbb\x79\x01\x01\xd8\xc1\x09\xdf\x25\xf2\xdf\x75\x6e\x20\xc3\x5e\x98\xf0\x9c\x8a\x57\x7e\x3f\x8a\xb3\x3c\xf4\x62\xf8\xec\xc5\x5e\x9e\x63\xf8\x5c\xc4\x1e\x0f\x13\xdf\x3c\x67\x50\x14\x99\xe7\xa5\x3e\x4c\x2e\x44\x34\xa4\x38\xc4\xf2\x73\x50\x24\xb4\x60\x80\x20\xe5\x05\x0b\x0a\x16\xec\x30\x5d\xb0\x55\xe6\xa9\x85\x7d\x25\x3a\xad\xa5\xe3\x14\x2d\x44\xd4\x26\x9d\xb9\x7b\x34\x4c\x5e\x2c\x0b\x4b\x5f\x86\xe8\x91\x11\x97\x90\x60\xd7\x41\x5a\x34\x19\x19\xa6\x3b\xde\x31\x18\xa8\x09\x31\x1f\x62\xff\x32\x54\x7f\xc2\x50\x2d\xb4\xb2\xdd\x60\x6d\x54\x4e\x67\xb8\x96\x0a\x72\x0e\xd8\x84\xf4\x8f\x3a\x6b\xe7\x9a\xd5\x70\x74\x2f\x4e\xc4\x00\x9e\x7c\x99\xd7\xff\xef\x19\x98\xff\x78\xc7\xf2\x7e\x90\x97\x38\x94\x7f\x6f\x4e\xe5\xa2\xd5\xf2\x62\x91\xa3\xac\x7b\x5e\x4f\xeb\xc1\xe3\xfe\xd5\x29\x3f\x76\x97\x01\xea\x89\x5a\xde\xe2\x90\x4f\x4c\x19\x0c\xd2\x97\x94\xcb\xf5\xf3\x55\x79\xc6\x27\x0b\xe3\x30\xb6\xfe\xaf\x55\xf5\x53\xfd\x9e\x2f\xbe\x4c\x16\xfd\xf7\xcc\x66\x22\x58\xaa\x13\x1d\x21\x72\xbf\xfe\xfc\xe0\x48\x62\xa8\x7f\x70\xcc\x0d\xff\x69\xb2\x40\x7f\x51\x60\x7b\xd6\xf9\x42\xe5\xa3\x05\x9b\xaf\xf9\xf8\xae\xc0\xfe\xfc\x58\xfd\x3e\xbe\xba\xe8\xbe\xe1\x1a\xc4\x72\xca\xab\x47\x2b\x06\x9f\xd9\xfc\xbb\xb2\x5a\x1b\x04\xd4\x2c\xe1\x2f\xd0\x5d\x34\x59\x40\x65\xcf\x3d\xf4\x75\x67\xf2\xa3\x3f\x93\xa5\xd1\xaa\x67\xa9\xf5\xca\xec\xf0\x1b\x28\xa4\x57\xbf\xe7\x72\x56\xce\x39\x9a\xa8\x67\x0f\x90\xda\x92\xd9\x97\x62\xab\x4d\xab\xa0\x1b\x14\xd4\x2a\xe5\xc7\x6f\x24\x10\x94\x1d\x1d\x08\x02\x6c\xe1\x7c\x79\x39\x59\x4c\x11\x46\x87\x88\xec\x6d\x51\xb1\x1d\xc1\x4d\x28\xbb\xa0\xf5\xf7\x8c\xc5\xb3\x25\x8a\xfd\xfd\x91\xa9\xd0\x45\x07\xa2\xce\x90\x26\x2d\xce\xeb\xaf\xb1\x89\xc4\x7b\xbb\x6c\x7a\x98\xa1\x7f\xf6\x95\xb6\xc7\x07\xeb\x79\x99\xf1\x89\xb7\xf7\x65\xd5\x6b\xeb\x55\xaf\xc1\xa3\x02\x1e\x85\xa6\x47\xa7\xf0\x68\xb0\x60\x04\x39\x0b\x3c\x8a\x3f\x79\x19\x2d\x72\xd4\xba\xff\xbd\x97\xd1\x4e\xd9\xd9\x19\xf3\x36\xcd\x62\x1a\x1e\x08\x65\x08\x0d\x1b\x8d\x27\x75\xcb\x07\x0f\x10\x91\x8b\x5e\xf5\x2f\xdf\x7c\xf3\x0d\x8a\xf7\xf6\x10\x7a\x67\xc6\xd4\xfd\xeb\x60\xc2\xc1\x00\x13\xa6\x7b\x7b\xdb\x61\xea\xb6\xf3\x8d\xe1\xa5\xd3\x13\xdc\xf6\xdb\xb8\x49\xbe\x0b\xac\x75\x1b\x4b\x66\xb5\x6e\xe3\x4d\x5d\x6f\x7a\x4b\x66\xbb\x98\xfc\x21\xa6\x64\xc7\x6e\xd7\xed\xcc\x77\x12\xa0\xd6\x70\x94\x12\xf7\x55\xcf\xa1\xc8\xaf\xea\xe1\xbe\x73\xc1\xd4\xb6\xfa\x99\xc1\xa9\xc6\x09\x47\xb7\x51\x01\x9b\xdd\x7e\x13\x1f\x4f\x6d\x57\xb8\x9c\x31\xa8\x30\xc7\xd0\x6d\x94\x02\x38\x93\xab\x83\xef\x90\x5a\x27\x34\xf1\x0f\xc9\x4a\x79\x2a\x18\x6f\x96\x5a\xd5\x62\x9b\x5a\x6b\x95\x5b\xff\xe4\x13\x9c\x68\x4f\xb0\xdf\x79\xd4\x69\x64\x1e\xdb\x1a\x62\x70\x4f\xcd\x84\x83\x8d\xcb\xca\xc9\x1c\xda\x45\x0a\xa3\x7c\x82\xb5\x27\x18\xeb\x8f\x62\xb9\xb3\x55\x3e\x22\xa1\x79\xc4\x83\x05\x64\x41\x69\x86\xf6\x6b\xb2\xfb\x42\xa8\xfb\xf2\xa2\x37\xeb\xe2\x31\x34\x24\xe8\xa8\x16\xcc\xbe\x10\xad\x89\x82\x08\x5c\xa7\x06\x04\x22\xd6\xf5\xeb\xb4\x8b\x3f\x11\x1e\x4d\xe9\x17\xd4\xce\x84\xdb\x12\xb0\x69\x99\x0f\x8d\x2c\x91\xf6\xab\xad\xa3\x91\xe5\xd0\x49\x25\x04\x51\x11\x13\xad\x7f\x97\xa5\x51\x09\x13\x2a\x18\x28\x19\x5e\x98\x61\x22\x05\x03\x25\xc1\x4f\xcd\x30\xb1\x82\x01\x9f\x9f\x7d\x59\x86\xfd\xb2\x0c\xfb\x65\x19\x76\x98\x6d\x7e\x59\x86\xfd\xa7\x9c\xe3\x0d\xa3\x9d\xe7\x78\xc3\x68\x74\x8e\x57\x7f\x67\x1b\xce\xf1\x86\xd1\x97\x39\xde\x1b\x9f\xe3\x0d\xa3\x6d\xe7\x78\x4d\xca\xe9\xce\xf1\x82\x82\xdc\x9b\xb6\x9b\xb5\x33\xf3\xd2\x2c\xf5\xfe\xd0\x4b\xb3\x9b\x28\xf8\x5d\x2e\x2e\x68\xe8\x7c\x99\x05\xee\xce\x02\x6f\x22\x58\x53\x3d\xd8\x44\x81\xf6\xfb\xeb\x28\x50\x55\xba\x01\xe2\x40\xab\x13\xbd\x53\x4d\x37\xad\x7f\x2f\x1e\x3f\xfb\xf9\xd9\xa3\x47\x2f\x1f\x9e\xbc\xec\xcf\x16\x3f\xff\xe1\xe7\x1f\x7e\xfa\xfe\xe1\xeb\x87\xc3\x5b\xb9\x5f\x3c\xfb\xeb\x4f\xdf\xff\x7c\xfc\xec\xa7\x97\x27\xdf\xfe\xd4\xb4\xd4\xc8\xc9\x69\xe5\xe3\xed\xa6\x95\xb5\x16\xab\xd9\xb2\x2e\xda\xd2\x9b\x93\xae\x49\x8b\xb7\x6b\x3c\x45\x57\xb6\x52\xe5\x95\x9c\x12\xa9\xd0\x03\x44\x82\xfb\xa8\x32\x4c\x89\x68\x7d\x7e\xb3\x41\xfb\x28\x44\x5f\xa3\x2b\x79\x7a\xb0\xaa\x0f\x69\xc2\x27\xb2\x07\x33\x95\xe8\x2f\x28\x1a\xe4\x22\x90\x06\xf2\xcb\xd7\xe8\x08\x5d\xa1\xbf\xa0\xd0\x94\x25\xf2\xcb\xff\x14\x58\x09\xfa\x1a\x09\x3a\xbe\xa0\xb3\x67\x00\xde\xc8\x69\xb9\xd7\xbd\x9f\xaf\xe4\xcf\xff\x69\x99\x0a\xd6\xc4\x76\x5e\xa2\x12\xae\x13\x30\x08\xad\x91\xcc\x46\x4a\x66\x23\x0f\x68\x6e\x0c\x82\x69\x40\xa5\x74\xd1\x95\x04\xbd\xb2\x4c\x2b\xb5\x06\xd2\x15\xe3\x15\x5c\xf0\x33\xec\xb5\x90\x6b\xbf\xeb\x1f\x47\xfb\xd6\x5b\xe5\xe8\x5a\xc3\x93\x47\x2f\x5f\x08\x5e\x37\x1e\x36\x19\x83\x7e\xef\x84\x65\x7e\x4c\x80\x01\x89\xda\x58\x9f\xae\x2f\x7b\xb6\x65\x04\x7b\x52\x83\x59\x44\xa8\x6e\x9e\xf8\x05\x3d\x40\xf1\x7d\xf4\x8b\x63\x66\x0e\xfa\x00\x47\x53\xcd\x55\x51\x6a\xf2\x69\x59\x3d\x5f\xae\xa1\x8e\xab\xb0\x2a\xb8\x2c\xf7\x97\x3d\x74\x17\x99\x76\x53\xd7\xc8\xf5\x46\x0f\x90\xaa\x17\x61\x02\x16\x7f\x83\x0e\xbe\x3b\x42\x40\x46\xc3\x62\xa1\xd5\xdd\x51\xad\x53\xfd\xe6\x08\xc8\xda\x37\x57\x0f\x28\x3f\xd5\x28\x77\x50\xdd\x35\xbc\xf7\x34\x0c\x6c\x37\xb5\xa4\x19\xd6\x82\x6f\x2a\x30\xa0\x11\xb5\x50\xfb\x4e\xf4\xc3\x43\xf4\x7c\x55\x9e\x95\x55\xf9\x81\xa3\xf3\xe5\xfc\x6a\xb1\x3c\x2b\xd9\x1c\x2d\x3f\xf0\x15\xfa\x8f\x47\x13\xb2\x77\x0f\x6d\xde\x51\xb4\x8f\x36\xef\x22\xf8\x37\x84\x7f\x03\x11\x66\xcc\x28\x95\x45\x4b\xf2\xf2\xfc\xc0\x3b\xe4\x6d\x62\xc7\x96\x79\x0b\x73\x0a\xc3\x91\xd1\x3e\x46\x16\xbd\x7a\x01\x5e\xce\xf1\xa9\xe1\xa7\x2e\x30\xd6\xd7\xd9\x74\x60\x3f\x7b\xbb\xae\xa6\xac\xc1\x7f\x2a\x7e\x76\xbe\x5c\xb1\xd5\x55\xe7\x26\x3a\xe1\x02\x27\xfa\x40\x64\x5d\xa5\x34\xde\x3a\x63\xf6\xfe\x13\x63\xcf\xc6\xf8\xee\xad\xed\xf8\xdb\xad\xec\xf8\x9d\x75\x1d\xdf\xb5\xaa\x73\xf3\x57\x09\x2c\x2f\xaa\xf3\x8b\xea\x09\xbc\x5a\x77\x60\x11\x24\xe9\x39\x5f\x97\x2b\x9e\x6b\x17\x0d\xa4\x65\xb5\xae\x0b\x42\xcb\xc6\x9d\xb7\x85\xba\xf1\xb3\xc5\xbc\x56\x93\x56\x83\x9b\xad\xf8\x3d\x44\x48\x30\x45\x24\x8c\xa6\xc8\xa7\xc1\x14\x85\x98\xf4\x1b\xab\x3b\x0b\xee\x89\x67\xfa\xa3\xfe\xa5\x05\xf5\x4b\xb3\xf5\xde\x02\xbd\x77\x3d\x6c\xd7\xb8\xbf\x00\x66\x6a\xe1\x26\xc4\xfa\xdd\xbb\xfe\xf6\xe6\xad\x25\xda\x6f\x61\x6a\xe2\x0f\xf0\x48\x93\x5b\xf0\xcb\xc6\xec\x60\x11\x6e\xac\x94\x00\x70\xd2\xdc\xd6\x0b\x23\x40\xe4\x79\xe8\x2e\x12\x03\x6d\x73\x53\x82\x2e\x09\x91\xbd\xf8\xe4\x73\xad\xe8\x19\x26\xe6\x0c\x42\x33\x4e\x9e\xd5\x9d\x78\xc2\x16\x30\xf7\xd3\xeb\xda\x21\x22\xa6\x39\xb4\x74\xbd\x5c\xa5\xe3\xf2\xef\xa1\xff\x94\x4a\x82\x4f\x49\x89\xba\x8b\x62\x42\xd6\xd6\x69\xf3\xa7\x04\xee\xa0\xef\x83\x8b\x58\xef\x2a\x66\x61\xbd\x82\x5a\x90\x77\xd6\x13\x24\x9d\x42\x82\xe4\x3a\x15\x04\x49\xa7\x74\x20\xb9\x7e\xcd\x40\xc5\x30\x1e\xe3\x18\x77\x59\xc6\xd7\xe2\x19\x77\x99\xc6\xbb\x70\x6d\xd4\x83\x34\xae\x66\x6a\xa4\x5c\x54\x4b\x69\xcd\x66\x4d\xcf\x19\x4c\xe6\xd5\xee\x6c\x10\x85\x80\x38\x80\xfb\x66\xdf\x1d\x81\x5c\x6c\x30\xf3\xe5\x25\x52\x30\xe3\xab\x11\x2f\xc4\x00\xbb\xb6\xf8\x80\x4c\x94\xc1\x0f\xe4\x47\x99\xf4\xc2\x67\xbb\x0b\x9c\xcc\x78\xc5\x86\x4f\x76\x78\x6b\xd0\x90\x3d\x2d\xc5\x2b\xc8\xfc\xe2\x6c\x01\x9d\x33\xb8\x55\x2d\xc1\x3a\xcd\x9e\xa2\x36\x93\x36\x02\xef\xf8\x4e\xa2\xd3\xe8\x68\xa9\x7d\x43\xb1\x10\x12\x7f\x75\xea\xd9\x68\xcf\x05\xfb\x44\x83\x9d\x2f\x2f\xad\x79\xa9\x55\x5a\x27\xc6\x3c\xc7\xd4\x93\x13\xa1\x85\x93\x37\x1b\x1b\xef\x27\x1b\x69\x6b\x47\xd0\x03\x3b\x10\x18\xdb\x11\xb0\xbe\xdd\xee\x9b\xeb\x99\x81\x23\xac\xb6\x3d\x0a\xa0\x4b\x13\xa1\x97\x00\x5e\x0f\x5d\x8b\xe5\x27\x1b\xdc\x82\xe3\x6d\xc0\xa5\x7d\x9d\x6c\xb0\x4b\x8f\x0a\xf6\x49\x03\x0b\x7a\x74\x9a\xf7\xfa\x62\x05\x1e\x25\xaf\x13\x11\xa6\x3e\x6e\xe5\x27\x9b\x40\xc5\x02\x34\x99\x28\xde\x9a\xa3\xc1\x8a\xbe\x3a\x1f\x6c\x7b\xbd\x01\x6c\x4f\x1a\x6c\x32\x6a\x48\x6c\x4f\x7a\xd8\x9e\x8e\x63\xfb\x5d\x9d\xaa\x13\x0a\x1d\xf6\x89\xfa\x21\xd1\x62\xa6\x68\xa7\xb7\xbd\x17\xb3\x25\x7a\x5e\x3a\x2c\x5b\x90\xac\xef\x7c\xc4\xf7\xb5\xaf\x32\x95\x6b\xbe\x7f\xb2\xc9\x77\x24\xd7\xa0\x75\x99\xb1\x00\x92\x16\x34\x16\x90\x6a\xe8\x27\x2d\xb4\x3d\x24\xc1\x60\x31\x5b\x3e\x93\x59\xca\x51\x67\x3e\x4c\xe7\xcb\xda\xd9\x17\x4b\x48\xf4\x1c\x21\x5e\xbc\x40\xb7\x24\x46\x27\x1e\x34\x5f\x99\xd4\x9d\x7e\xf0\xa0\x65\x12\x4c\xbb\xee\x1f\x5c\xa5\xe9\x13\x74\x57\x7b\x6e\x33\x74\xd4\x75\x9d\x06\x87\x11\xf9\xd3\x1d\x91\x77\xe7\x3c\xda\xee\x6e\x35\xe3\xd1\xef\xb2\xe2\x4a\x43\x03\xb3\x1d\x43\xe6\xa2\xe0\xda\x3d\x7f\x3a\x42\xe3\xc9\x8e\x34\x5c\x63\xdb\x8a\x2d\xd6\xe7\xcb\xb5\xd3\x4a\x20\xfc\x3e\x2f\x9f\x48\xc7\x38\x79\xa3\x4d\x28\xb6\x76\x68\x1d\xf3\x64\xc3\x6d\x06\x3e\x05\x39\x36\xfa\x59\xe3\xc7\x79\x89\x58\x05\x43\x20\xc4\x4b\x73\x4e\x78\xe2\x41\x1f\x8c\x45\x5b\x9b\x97\x23\xaf\x09\x00\x46\xb8\x13\xaf\xee\x8e\x84\xb6\xb9\xfc\x89\x57\x77\x46\xc1\x59\xc6\xad\xc3\x43\x74\x3c\x73\x05\xbf\xed\x87\xf5\x6b\x0e\x19\xe3\xa1\x11\x69\xe1\xab\x8e\xc3\xcd\xb8\x32\x62\xdc\xbb\x85\xd4\xba\xd5\x49\x63\x70\xdb\x37\xd9\xe0\xa6\xd1\x44\x4b\x42\xf6\xb6\x19\x00\x25\x02\xd2\x43\x40\x06\x08\x9c\x52\x14\xb9\xc7\x6a\x79\xe9\x10\xe2\x5c\xf3\x86\x93\xd6\x35\xde\xa1\xc9\x6f\x8a\x7d\xf9\xc3\xed\x9a\x19\xf8\xea\x8a\x1f\x73\xcd\x6b\x4e\x5a\x17\xd2\x31\xc2\x0f\x2d\xc6\xf9\xf2\xf2\xd3\x27\x68\x7f\x58\x9a\xde\x48\x06\xfa\xb6\x7a\x5a\x67\x1a\x52\x8c\x6f\xbd\xc9\x4c\x78\x3e\xfa\xd2\xd6\xc1\x62\x73\xc4\x4e\xbe\xd2\x6d\x21\x5c\xd2\xb1\xd8\xf1\xcf\xb5\x2d\xca\x30\x49\x73\xe3\xbb\xa2\x06\xf0\xcd\x8c\x8f\x68\x37\x9c\x06\xfa\x1a\x26\xaf\x86\xf3\x40\xd7\xdd\x4b\x85\xaf\xb3\x95\x0a\x36\x49\x65\xbc\x9c\x77\xf7\x3b\xe1\x3d\x74\xd8\xe5\x7f\x0f\x7d\xdd\xff\x01\x88\xc3\x02\x4d\xb3\x9b\xeb\x9f\x64\x13\xd4\x27\xcf\xe1\xe9\xd3\x8c\x35\xf3\xc6\x39\x48\x74\x68\x54\xbd\x0e\x52\xcf\x02\x0e\x71\x1e\x1a\x37\xd3\xbd\xfc\xaf\x0b\xce\xff\xce\x87\x48\x67\x6c\x3d\xab\x8d\x7b\xab\xbb\xe8\x07\x5c\x7c\xca\x64\xe1\xf8\x9c\xd0\xf6\x29\xbd\x2d\x9d\xdf\x7d\x0e\xb1\xa5\x67\x9f\x95\xd3\x52\x43\x35\x31\xa7\x27\x9c\x3b\xcd\xcd\x69\xa8\xd4\xf4\x9c\x8e\xea\xba\xf3\x8a\xad\x28\xdc\x9d\x78\x32\xe8\xc4\x93\xeb\x76\xe2\xc9\xa0\x13\x4f\x76\xeb\x84\x59\x55\xd2\x74\x95\x93\x55\x4b\xb4\xe2\xd5\xaa\xe4\x1f\xb8\x61\x03\x22\x52\x87\xbb\x65\x3c\x38\xbf\x58\xcf\x6a\x36\x4c\x22\x32\x40\x3e\x1d\x42\x7e\x7a\x79\x62\xc3\xe9\xa1\x86\xf4\x74\xe8\xc2\xd6\xf3\x44\x37\xb4\x6b\xd2\x1e\xbf\xd4\x16\x4a\x43\x38\x6b\x0e\x3b\x6d\x11\x21\xb6\x5c\xcc\xa9\x3f\xb6\xfb\x33\x9d\x62\xff\xb2\x5d\xf3\x9a\xdb\x35\xfd\x5d\x37\x6b\xfa\x63\x5b\x35\x7d\xc7\x46\x4d\xff\xcb\x36\xcd\x9b\xde\xa6\xe9\x6f\xb9\x49\xd3\xa0\x96\xce\x16\x4d\x7f\x9b\x0d\x9a\xbe\xfd\x18\x7e\xb3\xf1\xf0\x1e\x0d\x3e\xbe\x9d\x52\xfc\x2f\xb2\x5d\xb3\x5f\x60\x27\xc4\xe4\x77\xdb\xc3\x59\x97\xdb\x11\x34\xff\x58\xe5\x76\xae\xb5\xdb\x52\x3d\x6e\x77\x7b\xd6\x30\x3b\x15\xe4\x09\x31\xe9\x6c\x0b\x09\x31\xb1\x6e\x33\xa1\x5b\x16\xe4\x11\x80\x9d\xad\x26\x54\x55\xb5\x08\x31\xb9\xb1\x23\xc4\x7a\xf7\xad\x35\x79\x06\x9b\x1c\xbc\x4d\x96\xa6\x69\x92\x87\xf9\x54\x2b\xd8\xb3\x37\x35\x41\x46\x24\x61\x24\x21\x4c\x2f\xe7\xb3\x67\xa8\xdb\x63\x68\x9a\xe0\x30\xf1\x70\xc8\xf4\xea\x3f\x66\x22\x38\x24\x05\xcf\x64\xcd\xa0\xba\x36\xd0\x96\x44\xa2\xd8\xf7\x49\x14\xc9\xb2\x42\xaa\x72\x90\x99\x08\xe5\x69\x10\x30\x1a\xeb\x75\x85\xb6\x24\x92\xa7\x5e\x46\xb8\x97\xeb\x65\x88\xcc\x44\x82\x38\x0d\x03\x8a\x73\xbd\x48\x51\x2f\x35\xbd\xe9\x2a\x45\xc2\x9e\xae\x59\xa5\x08\x47\x5f\xca\x14\xdd\x50\x4e\x44\x77\x2e\x53\x24\x9a\x8c\xe5\x45\x7a\xcc\x18\x66\x46\xf4\x4b\x99\xa2\x9b\xcf\x8d\xe8\xb6\x65\x8a\x8c\xca\xe9\xe6\x47\x74\xb4\x4c\x91\x4f\xdd\x65\x8a\xc4\x30\x7e\x8f\x12\x53\xb6\x44\xfe\x45\xb2\xa5\x7f\xe9\xc3\x2d\x37\x7b\xb0\xe5\x33\x1d\x59\xb9\x7e\x12\x25\x1f\x35\xdd\x55\x88\x7e\xae\x77\xf0\x1a\xee\xba\xe9\x6e\xf2\x3d\x60\xe7\xe7\xf3\xab\x89\xfa\x71\x8a\xd8\xea\xf4\xe2\x8c\x2f\xaa\x75\xff\x4e\x1e\xfd\xf8\x4c\xcb\x0f\x94\x52\x6a\x49\xf4\xc8\x7b\x9b\x80\x50\x46\x8a\x04\xf2\x8a\x3c\x26\x94\x71\x42\xf6\xa6\x43\xb8\x18\xfb\x71\x10\x24\x50\x66\x90\xf8\xbc\x88\xc2\x2c\xd7\x53\x83\x41\x83\x34\xcc\xbc\x22\xcd\x0a\xb8\x00\x21\x0b\x72\x3f\x25\x85\x09\x31\x4f\xd2\x30\x4f\x59\x08\xb7\x67\x63\x9a\xe4\x69\x9a\x39\x11\xfb\x49\x18\x65\x24\x4c\x21\x9d\xf1\x03\x9a\x86\x3e\x35\x21\x0e\x93\x02\x63\x5c\x00\xc7\x69\xe4\x85\xb9\x87\x13\x27\xe2\x84\xf8\x05\x25\x0c\xae\xdc\x66\x05\x4e\x82\x22\x49\x4d\x88\x59\x8a\xb3\x90\xe7\xc0\x71\xce\xa2\x9c\x62\x4c\x9d\x88\x73\xea\xc5\x8c\x49\x19\x33\xdf\xf3\x3d\x12\x18\x65\x8c\x09\xf5\xc3\x54\xde\x19\x11\x84\xb1\x17\x15\x29\x77\x22\x26\x81\x8f\x69\x98\xc2\xdd\x11\x01\xe7\x41\x4a\x68\x66\x14\x45\xe8\x65\x71\x9e\xc1\x05\xe2\x79\x58\x14\x69\xc0\x89\x13\x71\x4c\x52\x1e\xe6\x31\x88\xa2\x20\x71\x4a\x93\xc8\xa8\x3c\xea\xe5\x3c\xc5\xf2\xf2\x0a\x3f\xc5\x51\x12\xa5\xd8\x2d\xe3\x34\xcf\xbc\x48\x56\xa8\x24\x61\x16\x63\xe2\x87\x26\xc4\x19\x4e\xd2\x02\x4b\x06\xb2\x22\x4a\x48\x94\x04\x4e\xc4\x3c\x48\xd2\x28\xc9\x40\x76\x09\x2f\x70\xc0\x72\xa3\x8c\x79\x91\xf2\x20\xa6\x70\x8d\xb8\x4f\x83\x82\x84\xdc\x77\x22\xf6\x8a\x0c\x27\x79\x06\x0d\x68\x4a\xb3\x3c\x4c\x8d\x1c\x93\xc0\xcb\x18\xce\x32\xb8\xa4\x3d\x66\x59\x92\x45\xa1\x5b\x79\x39\x4f\x48\x16\x81\x83\x84\x09\x49\x3d\x12\x1b\x11\x07\x2c\x0e\x68\xc0\xe0\x1d\x21\xe2\x2c\xe2\x01\x75\x73\x1c\x66\xa9\xc7\x92\x1c\x38\x49\xf3\x00\x17\x69\x1e\x18\x5d\x3a\x2a\x12\x4a\x73\x40\x4c\x7d\x8c\x43\x3f\x75\x73\x9c\x50\x9f\x87\x38\x24\xe0\xd2\x3c\x8a\xf2\x82\x99\x1d\x84\xfa\x38\x8b\x22\xc8\xf0\x49\x9e\x06\x3e\xc1\x9e\x3b\x56\x78\x9e\x4f\xe2\x8c\xca\x3b\xdf\x8b\x94\x60\xdf\x68\x6e\x69\x11\x26\x71\x91\xa9\xfa\xa6\xbc\xf0\x38\x77\x5b\x45\x16\x71\xcf\x4b\x0b\x30\x7c\x3f\x67\x94\x16\x99\xd1\x2a\xf2\x90\xc5\x09\x0e\x00\x71\xe2\x7b\x8c\xc5\xc4\x2d\x0a\x2f\xca\x58\xe4\x87\xf2\x7a\x17\xcf\xf3\x29\x31\x3b\x08\x0e\x48\x42\x12\xf9\xee\xe5\x31\x8f\x47\x3c\x76\x8b\x82\xc4\x69\xec\x31\x0a\xc1\x25\x88\x72\x42\x8a\xc2\xe8\xd2\x84\x63\x21\x26\x10\x59\x98\x91\x28\x4b\x48\xe4\x44\x1c\xe4\x24\x8b\xf2\x02\xac\x22\x64\x59\x40\x18\xcf\x8d\xb1\xc2\xf7\xa9\x97\x63\x10\x59\x92\x27\x61\xea\xe7\x85\x13\x71\x14\x7a\x2c\xf6\xc3\x40\x3a\x08\x2b\x22\x3f\xe7\x66\x73\x8b\x98\xc7\x52\x88\xdb\x7e\x16\xc7\x29\x61\xee\xb0\x49\x71\x46\xb2\x84\xc8\xe8\x16\xf3\x9c\x71\x1e\x99\x10\x27\x24\x26\x24\x93\x22\xc3\x01\x25\x7e\xe8\xa7\x4e\xc4\x8c\xa4\x05\xa7\x4c\xc6\xd9\xac\xc0\x9e\x1f\x19\x1d\x84\x51\xcc\xa2\x28\x00\x8e\xd3\x2c\x20\xbe\xe7\xb9\xa3\x5b\x46\x82\x94\xa6\xb1\x07\x71\xd6\x2b\x68\x12\x27\xd8\x18\xdd\xe2\x28\x0b\x31\x03\x19\x7b\x51\x18\xa4\xdc\x77\x5b\x45\x8e\x13\xc2\x29\x4e\x00\x71\xc4\x8b\x90\x60\xe3\x98\x97\x47\x49\xe2\x45\x04\x74\x11\x86\x51\xc8\x92\x11\xcf\x2b\x02\x8f\xfb\xa1\x94\x5d\x18\xc7\x98\x78\x84\x19\xed\xd8\x8b\x18\xf3\x64\xcf\x7c\x92\xa6\x39\x4e\xdd\xca\xc3\x09\x0b\x32\x8c\x21\x6c\xa6\x34\x27\xb9\x97\x19\x39\xc6\xdc\x8f\xa3\xcc\x93\x76\x8c\x03\xcc\xd2\xd0\x1d\xdd\x48\x1c\xd0\x38\x0e\xc0\x8e\xf3\x82\x72\x9e\x26\x89\x09\xb1\x1f\xa4\x5e\x9a\xa5\xd0\x33\x8e\x93\x34\xa0\x23\xe6\xe6\x27\x38\xf3\xb2\x14\x94\x92\x85\x59\x12\xb2\xc8\x37\xc6\x63\x9e\x53\xc6\x02\x08\x9b\xdc\x0f\x30\x65\x99\xdb\xdc\xc2\x34\xc9\x32\x16\x14\x72\x64\x88\x7c\xee\xc7\x46\xc4\x11\x25\x3c\x2a\x64\xb0\xca\xa3\x94\xa4\x94\xb9\x45\x11\x07\xb4\xa0\x84\x83\x83\x84\x39\x2f\x52\x62\x8e\x15\x31\x65\x61\xe4\xcb\x91\x26\xf0\x71\x4c\x8a\xc8\x6d\x15\x34\xc8\x68\x4c\xb1\xcc\x84\x70\xe1\xb1\x34\x36\x86\x4d\x9a\x65\xb1\x47\xa4\xf2\x30\x8b\x02\x3f\xe1\xee\xdc\x2d\xf1\x52\x5e\x14\x05\x93\x59\x64\xe4\x63\x4e\x8c\x56\xc1\x82\xd0\x8b\x32\x0e\x9e\x97\x73\x4a\xd2\x9c\xbb\x73\xb7\x94\x17\x09\xf3\x0b\x39\x32\x90\x2c\x8a\x13\x6c\xce\x2b\xa2\x18\xc7\xb4\x90\x43\x98\x1f\x93\xd0\x27\x6e\xe5\x65\x8c\xc4\x3e\xcf\x40\xc6\x9c\x91\x28\xc2\x89\x51\xc6\x39\xa6\x51\x4a\xe5\xd0\x44\x84\x21\x91\xee\x24\xe0\x30\x11\x61\x39\x8b\xf3\x1c\x1c\x24\xcb\xb9\xc7\x53\x6c\x0c\x9b\x45\x18\xe7\x41\x11\x17\x6a\xd0\xe5\x39\x8e\xdd\x76\xec\x45\x85\x17\xc5\x32\x5f\x88\x09\x8e\xa3\x22\x35\xba\xb4\xc7\x22\x3f\xce\x33\x70\x10\x46\x32\x9a\x50\xe6\x1e\x41\x30\xf6\x8b\x84\x7a\x81\x9a\xb8\x4b\xbc\x9c\x19\x39\xc6\x69\x8c\xbd\xd4\x97\xf1\xd8\xc7\x59\x10\x63\xb7\x8c\x09\xcd\xd3\x38\x2e\x42\x69\x15\x5e\x10\xe7\xd4\x18\x8f\x7d\x92\x31\x96\xc6\x60\x15\x81\x97\xc5\x24\x48\xdc\x0e\xe2\x67\x09\x4f\xb9\x07\xa2\xc0\x61\x96\xa4\x3c\x35\x2a\x2f\xf0\x71\x1e\xc5\x19\xf4\x2c\xc9\xb0\xe7\xe5\x81\xdb\x8e\x83\x2c\x0b\xf3\x40\x26\xde\x59\xea\xf3\x80\xa4\xc6\xa1\x49\xa4\x2b\x24\x49\x20\x58\x15\x59\x14\xc6\x5c\x84\x57\x57\xac\x28\xb2\x34\x2a\x98\x1c\x24\x59\x1e\x15\x8c\x1b\x39\x8e\xb2\x20\xc0\x09\x05\xc4\x01\x0b\xe2\x90\xe2\x58\x4d\xa2\xbe\x75\x1c\x5b\x6d\xdf\x0b\x5f\x5d\xf7\x84\xaa\xed\x1a\xb4\x57\x9d\x13\xaa\x3f\x5f\xef\x84\x6a\x88\xc9\x76\x4b\x07\x86\xe5\x88\x9b\xaf\x3e\x7a\xdd\xa5\x83\x88\x79\x09\xaf\x27\xdc\xfd\x34\xcb\x12\xcf\xb2\x74\x90\xa6\x51\xcc\xb8\x1c\x7e\x69\x90\x31\x16\x77\x53\x17\x07\x11\x3f\x8b\x78\xe1\xc7\x10\xc9\x0a\x9e\x04\x05\x15\x91\xcc\x04\xc9\xc2\xa0\x28\x42\x1f\xbc\x20\x2c\x70\xee\x47\xc5\xb6\xb3\xfa\x21\xf6\x78\x48\x64\xf0\x61\x39\x8f\x28\xc9\x2d\x4b\x07\x49\xea\x85\x11\x95\x06\x49\x52\x9f\x47\x19\x2e\xb6\x24\x82\x0b\xea\xe7\x89\xb4\xf9\x22\x0d\x70\x9a\x47\x96\x9e\x84\x29\xf7\xb2\x5c\xa6\x41\xd8\x8f\x39\xc1\x71\xb2\xcb\xd2\xc1\x4d\x9f\x23\xdd\xa6\x34\x2c\xc0\x79\xf6\xca\xaf\x8f\xb1\xbd\xf4\xeb\x63\x62\xaf\xfd\xfa\xd8\xb7\x17\x7f\x7d\x1c\xd8\xab\xbf\x3e\x0e\xed\xe5\x5f\x1f\x47\xf6\xfa\xaf\x8f\x63\x4b\x01\x58\xd9\x41\x28\x0f\x6b\xdc\x07\x2e\x9f\xcf\xe5\xf3\xe1\x61\x0f\x29\x03\x68\x6e\x3c\x02\x25\x9f\xcf\xe5\x73\x4b\x73\x02\xcd\x89\xb5\x39\x99\xcb\xe7\x96\xe6\x3e\x34\xf7\xad\xcd\xfd\xb9\x7c\x6e\x69\x1e\x40\xf3\xc0\xda\x3c\x98\xcb\xe7\x96\xe6\x21\x34\x0f\xad\xcd\xc3\xb9\x7c\x6e\x69\x1e\x41\xf3\xc8\xda\x3c\x9a\xcb\xe7\x96\xe6\x31\x34\x8f\xad\xcd\xe3\xb9\x7c\x6e\xd8\xd6\xb7\x65\xd1\x63\x69\x19\x26\xe4\x4c\x1a\x45\xbf\xe2\x1e\x6c\xb9\x95\x06\x61\x6a\x95\x4a\x5b\x30\xb5\xca\xa4\x1d\x98\x5a\x65\xd2\x04\x4c\xad\x72\xa9\x7e\x53\xab\x5c\x6a\xde\xd4\x8a\x4b\xad\x9b\x5a\x71\xa9\x70\x53\xab\x42\x2a\xdb\xd4\xaa\x90\x7a\x36\xb5\x3a\x95\x3a\x36\xb5\x3a\x95\xea\x35\xb5\x9a\x49\xd5\x9a\x5a\xcd\xa4\x56\xe7\xa6\xba\x83\xae\xa3\xbb\x5b\x5e\x87\x6a\xad\xa7\x5d\xd3\x7f\x55\xca\xda\xc3\xb6\xe3\xe6\x0f\x61\x04\xaf\x97\xcf\x86\x20\x5b\x14\x8a\x96\x64\x84\x08\x5e\x95\xf5\x69\x03\xbd\x6a\x34\xfa\x1a\x91\xb7\x00\x69\xae\xe5\xda\xe2\x98\x4b\x1c\xea\x7c\x41\x1f\x07\x9c\x9a\xbf\x56\x05\xea\xc3\x43\xf4\x1f\x50\x8d\xd8\x4e\xbc\x2e\xe9\xbc\x53\x85\xea\xcd\xac\xa9\x73\xbc\x19\x3b\x8b\xa7\xc0\xe6\x5a\x0b\xf7\x79\x3c\x09\x35\xeb\x54\xc1\x9e\xc9\xe2\xbf\x7a\xf1\xea\x39\x94\x28\xae\xcb\x01\x77\xe0\xe8\x00\x0e\x36\xbd\xbe\x43\x5d\xb0\xd8\x75\xc2\x54\x42\xce\x3b\x5c\xcc\x87\x5c\xcc\x4c\x5c\xcc\x87\x5c\xcc\x74\x2e\xba\x70\xf1\x10\xce\x52\xc9\x58\x57\xa9\xa5\x66\xce\x07\xad\xf6\xf6\x2e\xc5\xb7\x5b\x8d\xe2\xed\x34\x8a\x5b\x8d\xe2\xad\x34\x8a\x67\x9d\x02\xdf\xb3\xba\x0a\xb7\x56\x98\x7b\xae\x6a\x75\x6b\x42\xc2\x4a\xc2\x5d\x30\xd8\xc7\x9c\x68\x2a\xad\xf1\x45\xa3\x2a\xc5\xf3\x0e\x1b\x73\x03\x1b\x33\x13\x1b\xf3\x01\x1b\xb3\x0e\x1b\x5d\x84\xd1\x00\x1f\x89\x9c\x3a\xdd\xa9\x76\xb8\x2b\x94\xc4\xad\xda\x63\x97\xda\x5f\x95\xb1\x8c\x5c\xc6\x81\xb9\x07\x39\x57\x90\x8e\x33\xe1\x12\x12\x47\x5a\x20\xb1\xde\x0a\x5d\xc3\x4a\x06\xb0\x31\xb3\xe8\xc3\xce\x6b\xd8\x51\x1e\xda\x48\x33\x17\x42\x2b\xe3\xfe\xc8\xd5\x05\x6f\x43\xd9\x4c\x82\xcf\xa0\x66\x9b\xc0\x23\x34\xe9\xed\xa1\x07\xb5\x77\x36\xbf\xfc\x0f\x84\xd1\x3d\x34\xd8\x36\x3d\xe4\x43\xfc\x5b\x6b\x70\x9c\x0d\xf1\xef\x7e\xe3\x2d\x16\x2e\xf0\x75\xb9\x00\x29\x6e\xc9\x83\xd4\xce\x90\x03\xa9\x89\x01\x7d\x33\xd2\x76\x54\x7c\x55\xda\xd4\xdb\x8e\x7a\xaf\x4a\x13\x73\xf6\x9a\xf8\xaa\x28\xfe\x0c\xdd\x46\xc5\x4c\x95\xc5\x17\x5f\xcc\xe7\xf8\x64\x1b\xe9\xfb\x7c\x2e\xda\xcc\x55\x1b\xf1\xe5\x74\xee\x28\xa6\x3f\x83\x6a\xfa\x02\x75\x2a\xe9\xc0\xe7\x4c\x7e\x4e\xd5\x67\x7b\xf3\x39\x34\x17\x54\x52\x49\x12\x3e\x67\xf2\x73\xaa\x3e\xbb\x4b\xf2\xcf\x64\x4d\x7e\x15\x70\xe4\xb8\xc2\xe6\xb2\xbc\xf4\x9e\x2c\x7e\xc0\x66\x75\xc5\x7e\xf5\xb0\x53\xb3\x7f\xa6\xdd\x22\xc1\xea\x51\xc7\x59\x99\x1f\xde\xa6\x26\x0d\x22\x45\x73\xd6\xa5\x39\xef\xd0\x9c\x75\x69\xce\x75\x9a\xb3\x6d\x68\x62\xd9\x4f\xae\x86\x06\x79\xde\x84\xcb\x41\x81\xd6\x65\xff\x67\xf5\xa5\x15\xda\xc3\xa0\x7d\x28\x68\xfa\xf5\x33\x59\x86\xdb\x4d\x53\xf6\x53\x01\xd7\x34\x67\x5d\x9a\xf3\x0e\xcd\x59\x97\xe6\x5c\xa7\x39\x6b\x69\x1a\xb3\xce\xf1\x7b\x08\xcc\xbc\xfe\x08\xd5\x97\x7e\xb4\x1f\xa6\xfa\x11\x9c\xf7\xc7\xd2\x75\x8c\xea\x47\x08\x06\x3f\x96\xb6\x10\xfa\x01\x2e\x4a\x10\x30\xb3\x79\xc3\xa2\xc9\x29\x25\xa0\x20\x38\x6b\xfb\x22\xc3\x45\x85\xf5\x70\x31\xdb\x26\x56\xb5\x64\xc5\xbf\x42\x22\x6e\x9a\x15\x90\xca\x66\x26\x82\xd9\xb5\x28\xfe\x68\x0c\x3d\x7d\x8a\x3f\x96\x26\x8a\x3f\x96\xd7\xa1\x68\x0e\x76\x7d\x8a\xaf\x8c\x14\x5f\x99\x28\x9a\xad\xad\x7f\x79\x85\x85\x24\x4c\x5e\xd4\x6e\x0f\x80\x56\xee\x60\x1e\xa4\x8e\x4a\xfb\x32\x3c\x02\x8b\x44\x67\xb1\xc6\xb5\x1d\x9b\x7f\x3d\xcf\x59\xc5\xd1\xa5\xfb\x4d\x5f\xfc\xc1\xfb\xa6\xd1\xbe\xe1\x75\xf3\xd4\xc4\x36\x0c\x40\x85\xa9\x0d\xbc\xd8\x16\xa6\x36\xf0\x0e\xcd\x4d\x6d\xe0\x15\x9a\x9b\xda\xc0\x2b\xf9\x24\x9f\xc3\xf5\x1d\x73\xdb\xfd\x1d\xf0\x4e\x3f\xc9\x67\x00\x25\x45\xc7\x75\xc9\xe5\x03\xa1\x59\x6f\x02\x11\x98\x32\x13\x8f\x30\xa5\x90\x99\x78\x84\xd9\x8b\xd4\xd4\x06\x26\x2f\x52\x53\x1b\x98\x27\x61\xa6\x36\x30\x4d\x32\xb8\xcd\x40\xfc\xc1\xb4\xcb\x44\x9a\x7a\x45\xac\xc2\x80\x89\x9b\x89\x94\x83\xb0\xac\xfd\x76\xc4\x91\xd2\xa8\x86\xc9\xce\x8d\x5e\x56\xa2\xcd\x19\x42\x66\xf0\x18\xec\x9f\x0d\xb2\x81\xc7\x4d\x31\x8a\xc9\x63\xb0\x7b\x26\x99\x7d\xec\xe9\xdc\xb2\x21\xb3\x7d\x3c\xda\x2c\xa3\x24\x08\x22\x4a\x87\x04\x71\x4b\x10\xc4\x93\x2a\x82\x9d\x48\x90\x8e\x13\xd4\xe6\x25\x25\x41\x02\x21\x76\x48\x90\xb4\x04\xc9\xac\x1e\x97\x26\x00\xaf\x85\xd7\x71\x82\xda\x4c\xa6\x24\xe8\x0b\x82\xf9\x90\xa0\xdf\x12\xf4\x05\xad\x5c\x11\xf4\x47\xdc\xa1\x8f\x47\x9b\xfb\x94\x04\x03\x41\x90\x0f\x09\x06\x2d\xc1\x40\xd0\xe2\x8a\x60\xa0\x13\xe4\xe3\x04\xb5\xd9\x52\x49\x30\x14\x04\x8b\x21\xc1\xb0\x25\x18\x0a\x5a\x85\x22\x18\xea\x04\x8b\x71\x82\xda\xfc\xaa\x24\x18\xc1\x4b\xc5\x90\x60\xd4\x12\x84\xec\xfd\x54\x11\x8c\x3a\x2f\x11\xe3\x04\xb5\x19\x59\x49\x30\x16\x04\x67\x43\x82\x71\x4b\x10\x5e\x9b\xd4\x98\x2c\xe0\x5d\x49\xc0\x27\x9f\xbd\xf8\x72\x29\xce\xcd\x5d\x8a\x83\x45\x72\xaf\x6e\x36\x13\xc8\xa0\x0e\x8b\xef\xdd\xf4\xb5\x38\x66\x32\xf8\x9f\xf2\x62\x9c\xe3\xe5\xe2\x03\x5f\xc9\x2a\xbf\xa8\x5a\x22\x9f\xdc\x4d\xcb\x4a\x24\x28\x39\x62\xb0\x3f\x3b\xe5\xc5\x72\xc5\xd5\x76\xea\x81\xd6\xb4\xb3\x26\xda\xda\x5d\xb5\x7c\xed\x93\x9b\xb8\x88\xe7\x8f\x7a\x05\x8f\xce\x67\x53\x1f\xe4\x1e\xc2\x1e\x09\x0e\x7d\x55\xa7\xf8\xcb\xe9\x26\xeb\x51\xa5\x10\x93\x5d\x4f\x37\x89\x26\x23\xa7\x9b\x3a\xdb\x1a\x06\xa7\x9b\x42\x4c\xbe\x9c\x6e\xba\xe9\xd3\x4d\x42\x2b\xdb\x9d\x6e\x32\x2a\xa7\x73\xba\x49\x2a\xc8\x79\xba\x49\x9e\xa3\xdd\xf2\xf4\xb7\xff\x87\x3e\xcf\xc4\x17\xd9\xdd\x94\xad\x79\x14\xf4\x1e\x9c\xe5\x61\x1f\xf4\xc3\xf9\xfb\xbc\xe8\xfd\x98\x95\xe7\x33\xbe\xfa\x5d\x8e\x44\x69\xac\xc2\x77\xc1\xa1\x7c\x20\x19\x83\xcf\x3a\x3f\xff\x0a\x47\xa7\x5e\x6d\x75\x27\x10\x6c\x9e\x39\x86\xae\x37\x70\xda\x6f\xe3\x47\xa1\x0e\x0f\xd1\x73\xbe\x3a\x83\x51\xf4\x78\xb6\x2c\x33\x8e\x70\xff\xda\x14\xd1\xfc\xf9\x31\xee\x9e\x5d\x0a\xe3\x29\x0a\x92\x29\x0a\xf0\x14\xf9\xfe\x14\x91\x70\x8a\x70\x3c\x45\xc9\x14\x21\xac\x6d\x35\x0a\xe9\x14\x85\xde\x14\x05\x64\x8a\xfc\x60\x8a\x48\x34\x45\x98\x4e\x11\xf6\xa6\x88\xe8\x70\xc9\x14\x85\x78\x8a\x02\x7f\x8a\xfc\x70\x8a\x48\x3c\x45\x38\x99\x22\x2c\xf0\x6b\x70\x91\x37\x45\x21\x99\xa2\x20\x98\x22\x3f\x9a\xa2\xc8\x9f\xa2\x30\x9c\xa2\x20\x9e\x22\x3f\xd1\x00\x7d\x3c\x45\xc4\x9f\x22\x1c\x4e\x51\x3c\x45\x28\x22\x53\x14\x06\x53\x14\xc0\xd5\x02\x3a\xa0\xe0\x84\x4c\x11\x0e\xa6\x28\x12\x80\x78\x8a\x42\x7f\x8a\x82\x70\x8a\xfc\x58\x03\x24\xc9\x14\x11\x3c\x45\x58\x90\x9c\x22\x44\xe8\x14\x11\x6f\x8a\xb0\x60\x47\x82\xbd\x75\xc8\x95\x98\xe5\x4a\xba\x72\x15\x5c\x08\x39\x8a\x7e\x13\xf1\x79\x8a\x50\xa8\x73\xab\x08\x8b\x6e\x09\x6e\x81\x21\x4f\xe7\xd2\x57\x82\x13\x5c\x09\x80\x68\x8a\xf4\xee\xe2\x48\xca\x43\x08\x18\xb8\xf7\xbb\x8a\x10\x0a\x15\x02\x16\xf2\xf3\x63\x29\xd8\x30\xec\xc9\x2b\xf0\x94\xb6\x42\xa9\xfd\x40\xa7\x20\x54\x23\x4c\xc3\x17\x2a\x8d\xa4\xda\x43\x5d\x87\x42\x05\xc2\x1e\x84\x5d\x08\x1d\x0a\xc1\xd6\x59\x4d\xe7\x46\xa8\x8b\xb3\x8b\x39\x83\x6b\x52\x44\x52\xb9\x9e\x95\xc5\xe0\x86\x27\xf0\x82\x1f\x4e\x7e\x7e\xf9\xf8\x87\x47\xf2\x4e\x29\x21\x31\x32\x45\xd0\x79\x21\x21\x2a\x2c\x52\xa9\x09\xa4\xab\x2c\x15\x2b\x75\x12\x65\xbd\x20\x10\xaa\xd3\x7f\xf9\xdd\xb3\xd7\x7c\x8d\xd8\x22\x57\xb5\xd1\xcf\x41\xa5\xf2\x3e\x0d\x03\x1f\x02\xfe\xe7\xe7\x5d\x7d\xf6\x52\x4a\x6f\xe3\xdd\x83\x97\x11\x4a\x3c\x6f\xda\x7f\x56\xbf\x2b\x48\x10\x03\x00\xe9\x00\x50\xcf\x23\x03\x10\x5f\x03\x19\x3e\x0d\xf4\xa7\x06\x02\x61\x97\x00\x31\x10\x88\xba\x4c\x9a\x40\xe2\x5e\x3f\x0c\x84\x68\x87\x91\x21\x8a\xa4\x4f\x65\x88\x82\xe9\x20\x26\x80\xb4\x2f\xad\x21\x48\xd6\x23\x33\x00\xc8\xfb\x5d\x19\x82\x70\x0d\x64\x48\xa1\xe8\x72\x39\x6c\x4e\x5d\xad\x31\x1d\xd5\x07\xa1\x23\x04\x7c\x3a\x62\x55\x41\x9f\x88\xc1\x2e\xa8\xdb\x6e\x22\x3a\x6a\x98\x31\x75\x19\x26\xa5\xa3\xfa\x4e\xe8\x88\xbe\x59\x9f\x09\x83\x49\xf4\xc9\x0c\x39\xc9\xe8\xa8\xc6\x73\x3a\x62\x35\x9c\xba\xad\xbb\xe8\xd3\x30\x68\xde\xaa\x2e\x15\x25\xb0\x59\x90\x44\x7b\x6a\x51\xa6\xdf\x01\x31\x52\x0f\xba\x58\x4c\x7d\x0c\x75\x10\xa3\x4d\xe8\x7c\x1a\x9e\xc7\x5d\x36\x1c\xbe\x81\x1d\xe6\x9f\xf4\x39\xb5\x06\x0a\xec\xd0\x68\xda\xed\x8c\xc1\x2a\x3a\x9d\xb1\xc6\x09\xec\xb0\x5f\xde\x03\xb1\x85\x0a\x6c\x0e\x05\x74\x54\x14\x98\x8e\x8a\x82\xd0\x51\xd5\xfb\xd4\xad\xb6\xa0\x87\xc2\x16\x2b\x5c\xe2\x8e\xa8\xcb\x84\x63\x3a\xa2\x0c\x4a\x47\x24\x99\xd0\x51\xd3\x62\xd4\xad\xd0\xb4\x2f\x6f\xc3\xe0\xd1\xa7\x32\x04\xc9\xa9\x4b\xa5\x9c\x8e\xb8\x50\xd1\xd7\xa8\x7e\x47\xd5\x74\x2c\xcb\x08\x3c\x8f\x06\x1e\xb6\x46\x10\x05\x63\x4d\x33\x1a\x05\xda\x22\x48\x4d\xc4\x33\x11\x09\xba\x44\x8c\x30\x61\x17\x8f\x91\x99\xa8\x8b\xc7\x08\x13\xb7\x30\x06\x2a\x7a\xb0\x35\x36\x4f\xfa\x24\x0c\x48\x58\xbf\x3b\xf6\x84\x43\x11\x32\x20\xc9\x3a\x82\x35\x00\xe4\x2d\x80\x35\x80\x48\x16\x0c\x8d\x8b\xbe\x56\xac\x79\x97\x53\x98\x98\x8e\xf4\x82\x50\x97\xb4\xfd\x3e\x09\x93\x6d\xd0\x9e\xde\x4d\xb6\x41\xc7\x05\x1e\xd1\x11\x43\x8d\xe9\xb8\xa1\x52\x3a\xa2\x94\x84\x3a\x94\xc2\xa8\xdb\x97\xd2\x3e\x07\xf6\x40\xe2\x74\x95\x9c\x8e\x18\x31\xef\xcb\xd4\x1e\x4f\xac\x16\xa4\xbf\x80\x18\x9e\xe2\x2d\xdc\x1e\x93\x2d\x9c\x09\xfb\x5b\x38\x3e\x0e\xb6\xb0\x67\x1c\x3a\x5d\x1f\x47\x63\x2e\x89\xe3\x91\x60\xa8\xa7\xe0\x66\x0c\xc9\x58\xb8\xc4\x6c\xcc\xef\x71\xba\x45\xb4\xc4\xd9\x58\x20\xc3\xf9\x16\xc1\x12\xf3\x2d\x42\x19\x2e\xfa\x1a\x32\x9a\xcb\x58\xa8\xc0\x78\xcc\x43\x31\xd9\xc2\x41\xb0\x3f\xe2\x65\x38\xd8\x26\xb0\x85\x5b\x84\x1d\x1c\x39\xa3\x1b\x8e\xb7\x08\x4b\x98\x6e\xe1\x8b\x38\xd9\xc2\xeb\x31\xdb\x22\x9a\xe2\x74\x2c\x82\xe1\xcc\x15\xc2\x70\x3e\x16\x16\xf8\x16\x61\x14\x17\xbd\x08\xb5\x4b\xaa\x82\xbd\xc0\x12\x8c\xcc\x2c\x93\x8e\x54\xb0\x35\x45\x91\xb8\x4d\xd8\x03\xed\xb9\x67\x78\x1e\xf6\x94\x33\x84\x88\x3a\x42\x33\xd1\x88\x3b\x10\xe3\xc3\xb1\x3d\x37\x69\xa9\xd8\x32\x93\xba\xa7\xb6\xac\xa4\xe5\x62\xc8\x67\xd6\x93\xe6\x10\x22\xef\x48\xcb\x96\x9a\x00\x06\x4b\x5a\xa2\xda\x9a\x25\xe0\xea\x1e\xa6\x63\xec\x13\x6a\x37\x14\x9f\x8e\x19\x4a\x40\xc7\x14\x1d\x52\x77\xe7\x23\xea\x36\xa5\x58\x7b\x3e\x7c\x4a\xa9\x5d\x74\x09\x75\x89\x8e\xd1\x31\xf3\x4a\xa9\xdb\x09\x32\xea\x36\x9d\x9c\x8e\x19\x06\xa7\x63\x4e\x50\xd0\x31\x13\xef\xa4\x15\x16\x23\xc0\x23\xee\x8a\xc9\x88\x85\x62\x7f\x34\x64\xe0\xc0\x69\xa9\x38\x1c\x75\x78\x1c\x8d\x46\x0d\x1c\xbb\x22\x31\x1d\xf5\x44\x9c\x8c\x86\x0c\xcc\x1c\xde\x88\xd3\x91\x70\x81\xb3\xd1\xa8\x85\xf5\x70\x60\x20\xc1\x47\x62\x2f\x2e\x46\x43\x92\x4a\x2d\x9c\xdd\xc4\x4e\xbf\xc2\x64\x3c\xb4\xf8\x8e\xc8\x81\x83\x11\xb7\xc6\xe1\x68\x6c\xc1\x91\xd3\x81\x71\x3c\x1a\xdb\x30\x1d\x09\x3e\x38\x19\xf5\x40\xcc\x46\xc2\x00\x4e\x47\x63\x20\xce\x46\x43\x01\xce\x47\xe3\x11\xe6\x8e\x60\x87\x8b\x6e\x34\xda\x25\x7f\xa0\x9e\x24\x69\x8e\x2d\x75\xf6\x89\xbd\xc0\x92\x4a\xd4\x4c\x1b\x9e\xfb\x2d\x86\xc0\x6c\x88\x81\xdd\x88\xc2\xae\x44\xcc\x39\x44\x93\x1c\x9b\xc8\xc7\x5e\x27\xfd\xb3\x8f\x9f\xf5\x8a\x8a\x39\x83\x68\x75\x6b\xce\x1f\xe4\x73\x73\xee\xd0\x8a\xcf\xb6\x82\xd2\x8a\xc7\x80\x23\xd7\xbc\xd4\x92\x39\xd4\xe6\x6d\xce\x1d\x5a\x05\x5b\xfa\xef\xd4\x2f\xa6\xf6\xee\x11\x3a\xc6\xbc\x4f\xc7\x04\x10\x50\xb7\x8a\x43\x3a\xd6\x85\x88\x5a\xed\x27\xa6\x63\xc6\x47\xa9\x4b\x7e\x49\x97\xb8\x2d\x89\x70\x58\x47\x4a\x5d\xda\xcb\xe8\x98\xf5\xe5\xd4\x6d\xbf\x9c\xba\xdd\xaf\xa0\x63\x1e\x82\xbd\x11\x17\xc1\x78\xc4\x0b\x31\x19\x75\x43\xec\xbb\x46\x0a\xa7\x85\xe3\x70\xd4\x45\x70\xe4\x8d\xe9\x09\xc7\xa3\x91\x0c\xd3\x51\x6f\xc1\xc9\x68\xb8\xc0\x6c\x34\xe0\xe1\x74\x24\x66\xe2\x6c\x34\x6e\xe0\x7c\x24\x2c\x61\xee\x88\x4b\xb8\x70\x86\x0d\x99\x3d\xb8\xfb\x80\x47\xfd\x12\x13\xbb\x63\x62\x7f\xc4\xed\x71\x30\x62\xf8\x38\x1c\xf5\x1d\x1c\x8d\x47\xb7\xd8\x11\xde\x30\x1d\x77\x9e\xc4\x19\x3f\x30\x1b\x8d\x7f\x38\x1d\x0d\xa2\x38\x73\x06\x11\x9c\x8f\x46\x29\xcc\x47\xc2\x14\x2e\xba\x71\x64\xb7\xe4\xc1\x18\x53\x6a\x7e\x6d\x2b\x24\x0d\x37\xc6\x94\xe1\x9e\xb6\x5d\xc3\x98\x31\x28\x00\x98\x4f\x31\xe6\x0d\x4d\xce\x67\x78\x1e\xd5\x08\x6c\x00\x71\xcb\xa0\xe1\xa9\xae\x73\x5b\xca\xd0\xf2\x67\xc9\x19\xda\x1e\x1a\x28\xa4\x2d\x83\x66\x16\xb2\x0e\x80\x69\xe0\xb0\xfa\x1e\xd7\x95\x63\x40\x5d\x74\x84\x63\x9e\x73\x70\xb5\xc7\x74\x44\xb8\x84\x7a\x36\xc3\xf1\xa9\xdb\x70\x02\xea\x32\x9c\x90\x8e\xd8\x45\x44\x47\xa4\x16\xd3\x11\xd3\xa3\x74\x44\xb5\x09\xb5\xc9\x9d\xd1\x11\x9d\xa6\xd4\x6d\xb5\x19\x1d\xb1\x9a\x9c\x8e\x68\x8e\x53\xb7\xe1\x16\xd4\x65\xf6\xd8\x73\xba\x2d\xc6\x9e\x55\xaf\x98\x8c\xf9\x34\xf6\xc7\x7c\x12\x07\x23\x5e\x8d\xc3\x31\xa7\xc0\xd1\x58\xe4\xc0\xf1\x88\x6f\x37\xe3\x9e\x55\x8d\x38\x19\x73\x20\xcc\x46\xe2\x23\x4e\xc7\x22\x08\xce\x9c\x11\x0a\xe7\x63\x11\x06\x73\xfb\xe0\x5c\x8c\x44\x08\xc8\x0f\xdc\xba\xc2\x23\x96\x86\xc9\x88\xa7\x63\x7f\xcc\x99\x71\x30\xe6\xac\x38\x1c\x0b\x55\x91\x3d\x14\xe1\x78\x2c\x58\x60\xea\x76\x97\x64\xcc\xe1\x31\xb3\x06\x0b\x9c\x8e\xf9\x32\xce\x46\xc2\x05\xce\x9d\xc1\x12\xf3\xb1\x50\x86\x8b\x5e\xc0\xd9\x25\x2b\x50\x6c\x53\x53\x14\xa9\x71\x9a\xf2\x02\xd9\x96\x98\xfb\xec\xb7\xcf\x89\x09\x77\xd0\x4a\xc4\x88\x3f\xd4\xfb\x63\xca\x0a\x9a\xa7\x43\xdc\x71\xc7\xa0\xad\xa3\xa2\x31\x1b\xd0\x98\x1a\x22\x66\x35\x59\x23\xcb\xa9\x32\x50\x53\x06\xa0\xc9\x6a\xf8\x3c\xd7\xd0\x0e\x9f\xf2\xa6\xaf\xc3\x67\x45\x47\xca\xa6\x9e\x3a\x95\x84\xa9\x5b\x49\x84\x5a\x7a\xe4\x53\x97\x76\x02\xea\xea\x4f\x48\xdd\x56\x17\x51\xb7\x65\xc4\xd4\x2e\x0f\x4a\x5d\x76\x91\x50\xbb\x3d\x33\xea\x56\x7d\x4a\xdd\x3a\xcc\xa8\xc5\xa6\x72\xea\x56\x11\xa7\x2e\x9b\x2a\xa8\xdb\x94\xb1\x37\xe2\x47\x18\x8f\x18\x1f\x26\x23\x9e\x8a\x7d\x87\x01\xe2\xc0\xe9\xa7\x38\x1c\x71\x45\x1c\x79\x23\x31\x28\x76\xfa\x5c\x93\xc1\x5a\x78\x4f\xac\x51\x9b\xd9\xbc\x15\xa7\x23\xa1\x0d\x67\x8e\xb8\x88\xf3\x91\x18\x82\xf9\x88\xcf\xe2\xc2\x19\xdc\xc4\x88\x6e\x61\x1c\x3b\x4d\x09\x13\xa7\xd3\x62\x7f\xc4\x2f\x71\x30\xe2\x98\x38\x74\x78\x26\x8e\x46\x62\x0d\x8e\x47\x83\xd5\x88\x27\xe1\x64\xc4\x47\x31\x73\x04\x00\x9c\x3a\xa3\x16\xce\x9c\xa1\x05\xe7\x36\xff\xc7\x7c\xcc\x85\x8b\x6e\xe8\xd9\x7d\xe8\x36\xd8\x48\xcd\x6a\xe0\x61\xc3\xd0\xad\x52\x0d\xc3\xa0\xad\x90\x9a\x9a\x05\x4d\x92\x63\x7a\x1a\x5a\xba\x1f\x49\x94\x86\x31\xba\x4d\x99\x86\x4f\xa9\xd6\x01\xd3\x30\xdd\xf4\x7d\xd8\x94\x69\x46\x3e\x7c\x9a\x6a\x9d\x30\xbd\xaa\x6b\x79\x9c\x61\x98\x96\x72\x1b\x62\xe5\xad\xdc\x4c\x2f\xe9\x5a\xe6\x3b\xec\xa9\x4b\x0c\x98\x9a\x85\x4a\xa8\x4b\xbf\x3e\x75\xf5\x31\xa0\x0e\xc3\x09\xa9\x4b\x78\x11\x75\xf5\x24\xa6\x36\xf1\x50\xea\x30\xab\x84\xba\x54\xcd\xa8\x4b\x23\x29\x75\x18\x42\x46\x6d\x66\x9e\x53\x97\x25\x73\x6a\xb6\xd8\x82\x3a\x94\x8c\x3d\xa7\x96\x31\x76\xba\x2b\x71\xfa\x2b\xf6\x9d\xbe\x82\x03\x97\x3b\xe0\xd0\xe9\x4a\x38\x72\x3a\x04\x8e\x5d\x11\x41\x8d\x37\xc6\x47\x89\x33\x5a\x60\xe6\xf2\x18\x9c\x5a\x82\x06\xce\x6c\x41\x36\x77\x7a\x2e\xe6\xce\xa0\x80\x0b\x6b\x44\xc4\x9e\x53\xeb\xd8\xe9\x88\x98\xb8\xbd\xdb\xb7\x58\x1a\x0e\x9c\x8e\x86\x43\x97\x0b\xe3\xc8\xea\x87\x38\x76\x46\x06\x4c\x9d\xde\x8f\x13\xa7\x2f\x62\x66\x09\x56\x38\x75\xba\x1b\xce\x5c\xd1\x01\xe7\x56\x2f\xc6\xdc\x19\x39\x70\xa1\x05\x87\x5d\xc6\x54\x2a\x06\x78\x62\x40\xd8\x08\x67\x18\x8f\xef\xb5\x8b\x1b\xc3\x70\x2c\xdb\x0d\x03\xb1\xc2\x67\x78\x14\x4a\x7c\xc4\xc8\x47\xd4\x3c\x34\x05\x61\xc5\x89\x79\x9c\xa1\x9e\x99\xff\xa4\xe9\xb7\x29\x04\x4b\x3e\x4d\x8f\xd2\x06\xa9\x81\xcf\xec\x9e\x3c\xec\x31\x0c\xbf\x66\x3b\xe1\x8d\x10\x0d\x6d\x0a\xc5\x84\xe1\x51\xbd\xa8\x64\xed\xb9\x7c\x8c\x5d\x32\x55\x30\xc4\xa5\x7f\x05\xe3\xbb\x74\xad\x7e\x0f\x5c\xc2\x56\x30\xa1\x5d\xac\x0a\x22\x1a\xed\x73\x6c\x31\x2d\xf5\x98\xba\x24\xaa\x60\x12\x9b\x96\xd4\x73\x66\xb7\x52\x05\x91\xba\xec\x51\xc1\x64\x66\x95\xab\xa7\xb9\xcb\x8c\x14\x0c\x77\x99\xa8\x82\x29\xec\x1e\x5a\x67\xc4\x46\xc7\xc6\xae\x1e\x60\x62\x11\x32\xf6\x6d\x16\x87\x03\x17\xb3\x38\x74\xa9\x05\x47\x2e\x61\xe0\xd8\xd1\x45\x5b\xfc\x4d\xec\x2a\xc4\xcc\x65\xa9\x38\x75\xc6\xc3\xcc\xe5\x51\x38\xb7\xdb\x37\xe6\x36\xa3\xc3\xc5\xb8\x77\xb5\x2f\x37\x56\x08\xec\x8e\x05\x98\x8c\x1b\x1c\xf6\xc7\xbc\x0f\x07\x4e\xef\xc3\xe1\x78\x10\xa8\x95\xed\xec\x6e\x3c\x1e\x94\x30\x1d\x0f\x6e\x38\x19\x8f\x06\xb5\x39\xb8\xbc\x4c\x1a\x85\xf5\x69\x36\x16\xd6\xa4\x61\x38\xf8\xe4\x63\x11\xa7\x36\x12\xa0\xa2\x8d\xec\xf2\xa3\x5e\xd7\xe0\x29\x5b\xbf\x5f\xa3\x6a\xc6\x2a\xb4\xe6\x73\x9e\x55\x50\x8f\xe8\xe5\x77\xcf\x5e\xa3\x72\x71\x5e\x5f\x13\xd1\x54\x34\x78\xfa\xed\xcb\xde\xc5\xc5\xed\xc1\xc4\x29\x6a\x37\xfe\xc3\x05\x8a\xea\x0b\x7c\x56\x5f\xa6\x7a\x43\x4f\xfd\x2a\x01\xe4\x97\xfa\xb3\xf8\x32\xd5\xfa\xd3\xe7\x5c\xab\xaa\xf4\xfd\xc3\x97\xb2\x30\x16\x92\x85\x5f\xdc\x77\x54\x09\xe8\xe6\x82\x2a\xf9\x45\xab\x92\x72\xdd\x2b\xaa\xdc\xa5\xf5\xde\xf3\xab\xa6\x04\xd8\x7b\x7e\x65\x28\x7d\xf7\x9e\x5f\xd5\x75\xf5\xde\xf3\x2b\x73\x59\x3d\x41\x43\xaa\x28\x8c\x50\x5a\x56\x6b\xc4\xb2\x6c\xb9\xca\xcb\xc5\x29\xaa\x96\xe8\xf9\x31\x36\xe2\xfd\xae\x84\x52\x40\x6f\xfa\x35\x90\x4d\x77\x87\x84\x91\xfd\xee\x90\x16\xdd\xf3\xa5\x40\xf8\xfc\x18\xbf\x29\xdf\xa2\xbb\x08\x1b\x6a\x94\x2a\xba\xb2\x3c\xff\xa4\xee\xdd\x9b\xb6\xbd\x2a\xc7\x27\xfe\x33\xf1\x31\xba\xab\xa1\x86\x3a\x7c\x7b\xe8\xf6\x00\xb1\xa1\x60\xe9\xb7\xeb\x35\x3f\x4b\xe7\x1c\xe1\x08\xad\x2f\xd2\xf7\xfc\xca\x20\xfe\xf5\x45\xfa\x23\xbf\x5a\x37\x2a\x68\xbf\xdb\x85\xb2\x78\x09\x40\x52\x34\xf5\x97\x07\x08\x47\xcd\x37\xfb\x15\x2b\xc7\x50\x71\x4a\xf1\x63\x16\xe4\xba\xc6\xae\x78\x79\xa3\x90\xbe\x55\x4c\x19\xf1\xba\xaf\x6e\x49\xcb\xea\x25\x54\x45\x39\xd2\x8a\xa0\x34\x78\x6d\x28\xa5\x41\x05\xd4\x68\x50\x64\xd8\xc6\x64\x35\x24\xb0\x5b\x4d\x97\x4e\xb1\x5a\x9e\x41\x80\x99\xf3\xa2\x42\x84\x82\x67\x08\xca\xe6\x86\x52\x38\x6f\x26\x25\x3a\x94\x77\x43\x78\x50\xc0\xb1\x36\xae\xc9\xe4\xf9\x31\x51\x36\xb8\x87\xf6\x1b\x09\xec\xa1\xbf\x20\x42\xdf\x42\x8d\x47\xb0\xad\x12\xfd\x05\xee\xb8\xd8\x9a\xbd\x55\x79\x3a\xdb\x9e\xbf\x00\xca\x77\xb6\x4c\xee\x75\xb8\x24\x14\x1e\x4b\x5e\xd1\x3e\x22\x81\x85\xe1\x3d\x03\xc7\x03\xb2\xa6\xca\xfe\xa2\x03\xe5\x22\xe3\x88\xb3\x6c\xa6\xcc\x0e\x95\x6b\xc4\xce\xcf\xe7\x25\xcf\x85\x2e\xd9\x02\xf1\xcd\x39\x5b\xe4\x3c\xaf\xeb\x32\x42\x78\x9f\x1a\xb1\x09\x11\x28\x34\x19\x5b\xa0\x94\xa3\x74\xb5\x7c\xcf\x17\xa8\x5c\x54\x4b\x44\x65\x51\xe0\x35\x5a\x67\x6c\x2e\xd1\x4b\x94\x6b\x33\xb6\xcb\x59\x99\xcd\x10\x9b\xcf\x97\x97\x6b\x40\x2d\xf0\x56\x4b\x81\xf6\x62\xcd\x73\x74\x59\x56\xb3\xe5\x45\x25\x19\x5c\x97\xcb\xc5\x10\x8b\x12\x34\x94\xd7\x9c\xb4\x5f\x1e\x3c\x50\xd7\xca\xb4\x3f\x89\x80\xe2\x63\x93\xe4\x3a\x96\x8b\xa5\xe5\xc6\x6e\xc3\x55\x68\x21\x88\xb5\x9f\x21\x66\x4d\x4a\xa9\xc4\xaf\x91\xd0\xbe\x6f\x56\x95\xad\x1f\xb1\xde\x8f\xf8\xad\x2a\xec\xf9\xab\xfe\x13\x5c\x0a\x30\xb8\x6a\xc7\x10\x01\x8f\x65\xe1\x4b\x54\x2e\x3e\xf0\xd5\x9a\xdb\xa3\x60\xb9\xf8\xf0\xb2\x17\x08\x3b\x3f\x6d\x35\x40\x60\xc7\x00\xd1\x62\xd3\x25\xb6\x7e\x83\x43\x61\xd0\x7d\xec\x1f\x3b\x13\x0e\xed\x17\xbe\xc8\x56\x57\xe7\xd5\x0e\x57\x01\xaa\x8a\xb5\xcb\xe3\xa6\x5d\x0b\x3c\xed\x86\x7c\x6b\x09\xdd\x9c\x7f\x0e\xaa\xad\x44\x5c\xb5\x7b\x8f\xdd\x94\xa7\xb5\x20\x4d\x49\xc7\x7f\xf0\x4a\xcf\xd3\xba\xcc\xcd\x01\xa9\x76\x35\x56\x5f\x07\x12\x6c\xd5\x07\x83\x9b\xb3\x0c\xd9\xc7\x0f\x8b\xb2\x2a\xd9\x5c\x2f\x7d\xd5\x85\xe1\x9b\x6c\xc6\x16\xa7\xfc\xc9\x8b\xb6\x2c\xaa\xac\x3c\xe6\x6d\xbc\x42\xfe\xaf\x6f\xd2\xe6\x36\xf2\x7e\x6a\x78\x63\x2d\x0a\x6b\x9b\x17\x4f\xf4\x36\x04\xe8\xf8\xea\x6f\xbb\x36\x54\xf2\xe6\x15\x85\xf8\xff\x96\xbc\x41\x9b\x50\xfd\x19\x2b\xd3\xba\xae\x6a\x93\xe5\xc3\xc0\xa3\xe4\x47\xe9\x55\xf0\x79\xfc\xda\x36\xc3\x48\x64\xcc\x27\x00\x9d\xed\xda\x8b\xc6\x30\x74\x3b\xb1\xc0\xae\xba\xb0\x2b\x05\x6b\x64\xf2\x11\x2f\xd7\x15\x9f\x37\x56\x6c\xc6\x58\x40\xe7\xb7\x4b\x2d\xa8\x3b\x40\x17\x62\xa0\x95\xa5\xd6\xde\x94\x6f\xdf\x4c\x26\x8a\xdb\x77\x6d\xb8\x16\x89\x64\xf3\xea\x02\xdf\xa1\xac\xb6\x49\x34\x86\x80\xdd\x73\xa4\x95\x4d\x52\x3d\x4f\x9a\xd7\x6c\x14\xe3\x01\xfc\xaf\x8b\x7c\x89\xd6\x97\xec\x5c\xa6\x1f\x73\xb6\xae\xa4\x31\x0c\x43\x78\xe5\x56\x59\x8f\xd9\xae\xc2\x5c\x8e\x5f\x19\x6c\x18\x2a\x8a\xef\xea\xea\x03\xd7\xb8\x31\x17\xbc\x8e\xab\x5f\x27\xa4\x8c\x84\x2e\xc3\x1b\x59\x85\x96\x17\xd5\x20\x02\x37\x21\xd7\xad\xb2\x4e\xc8\xb5\xeb\xac\x33\x64\xbc\xe7\x57\xb2\x04\x74\x14\x1c\xfa\x44\x7f\x52\x7e\xb0\x3c\xd0\xea\x46\x47\xc6\xaa\xd1\x87\xe8\xa5\xb0\x40\xf5\x12\xb0\x5a\xae\xd7\x6d\x9a\x0e\x35\x0f\x21\x21\x86\xd7\x52\xd9\xa2\x19\xa8\x5a\xc1\x4d\xea\xf1\xea\x8c\xad\xdf\x77\x5c\xb6\xb6\xdd\xc9\xa4\x63\xa2\xc2\x11\xeb\xd1\xf5\x5d\xa7\xeb\xc2\x69\x05\x16\x4d\x04\x1d\x93\x7d\x07\x36\xfb\x95\xd1\xf0\xc5\x33\x91\x51\x49\xcc\x0a\xaa\xf6\xbb\x01\xdb\x2f\x9e\x6c\xcf\xf6\xca\xce\xf6\xdc\xcd\xf6\xdc\xc1\xf6\x6a\x0b\xb6\x9d\x45\xa4\xd7\x75\x15\x69\x39\xfd\xb1\x5d\x1d\xe9\xb1\x22\xcc\x12\x57\xc5\x37\x95\x5e\x8a\xf9\xfb\x87\x2f\x0f\x54\x82\xd6\xa9\xc5\x3c\x45\x59\x71\x6a\x28\xae\x7d\x3e\x67\x82\x89\x4d\x85\xfa\x58\x54\xc2\x35\x69\xe9\x98\x10\x35\x95\x9d\x87\x13\x35\xdd\xa2\xdb\xdf\x3f\x7c\x69\xac\xb8\x7d\xb2\x2a\xcf\xe7\xfc\xee\x6e\x53\x44\xb2\x51\x67\xa2\x48\xff\xe9\x8f\x33\x5d\xa4\x26\x22\x04\xdb\x25\x54\x28\xcd\xfa\xd7\x03\xa9\x2c\x96\xaf\x31\x3a\x12\x70\x07\x52\xaa\x0f\xa5\x8e\x97\xab\x49\x7b\xcf\xba\xba\x38\xbe\x26\x7d\xb0\x9e\x97\x19\x9f\x78\x53\x44\xf6\x06\x77\x61\x34\x68\xc9\x35\xd1\x92\x29\x0a\x1c\x68\xfd\x6b\xa2\x0d\xa6\x28\xda\xb3\x5f\xa4\x71\xed\x77\x0f\xbe\xc6\x07\x7a\x63\xad\x85\x55\x32\x07\xfa\x3b\xc7\x16\x0d\xfc\x2d\x28\xdc\xcc\x3b\x8d\xa0\xb5\x23\x73\x64\xd7\xee\xe3\x2d\x28\x98\x47\x3d\x9c\x90\x1b\x1b\xf6\xfe\x49\xc2\x6a\x13\x5d\x6e\x20\xb8\xb6\xb8\x76\x0c\xb1\xb6\x10\xd7\x0d\xb4\x0d\x94\xb3\x7e\x7e\x03\xd5\x2b\xa1\xaf\x15\x66\xbf\x17\x92\x69\xaf\xaa\xbe\x56\xdc\xfd\x5e\x18\x4c\xdb\xaa\xee\xf7\xc2\x68\xaa\x8a\xbd\xdf\x8b\xf0\xc7\xb7\x53\x1a\x7c\x52\xc1\xfd\xdf\xb3\xd2\xfe\x67\xab\x87\xff\xdf\x53\xd9\x1e\x6e\x2a\x28\x17\x3c\xbf\xd9\x12\xf7\xdf\xb1\x35\x6f\xab\xd6\xb3\x35\xd7\x9e\xbd\xf6\x89\xb3\x02\xfe\xd0\x97\x37\x51\x80\x16\xec\x8c\xaf\xcf\x75\x2f\x3d\xd4\xd9\x10\x20\x82\x0d\xf9\xdf\x7f\x7c\x34\xa1\xf9\x16\x45\x41\x73\x85\x8d\x09\xcd\xeb\x28\x10\x7c\x00\x53\x9b\x28\x38\x50\x5f\x04\xff\x86\xcc\xa0\x45\x2d\xd1\xab\xe9\x94\xf2\xef\x7c\x8d\x18\x5a\xf0\xcb\xf9\x15\x92\xbe\x96\x9b\x08\xeb\x01\x05\x75\x6e\xf3\x58\x5c\x9c\xa5\x7c\xf5\x11\xc1\xad\x52\x70\xab\x8a\xf8\xe0\x13\x48\xe7\x0f\x9c\x4d\xe6\xcb\x4b\x68\x21\xfe\x6b\x6a\xd0\x6d\xdc\x8d\x6e\x43\x80\x5a\x2e\x9b\x56\x2e\x75\x44\xa8\xc5\x53\x0f\xcc\x72\xf5\xcf\x23\x9e\x0f\x6f\x65\x81\x17\x7a\x91\xd7\x9d\xef\xac\x25\x0d\x21\x7e\x51\x76\x32\x2a\xd1\xc3\xa9\xe0\xda\x3c\x86\xa9\xfb\xb5\x0c\xb7\x7a\xc2\x63\xd1\xdb\x23\xd4\xbd\x7d\x5b\x7f\x33\xef\x6b\xea\xbb\xb2\xba\x2c\xd7\x1c\xfd\xf4\xec\x64\x0d\x18\xc6\x14\x53\x5f\x94\xa2\x0c\xe4\x23\xfa\x56\xe8\x57\xc8\xe5\x2e\x08\x46\x8d\x24\xac\xa8\xf8\x0a\x2d\xf8\x29\xab\xca\xc5\xe9\x0d\x08\x1e\x50\x71\x21\x78\xa5\x82\x83\xc5\xb2\x9a\x58\xa5\x7a\x78\x88\x16\xcb\xd1\x4c\x15\xee\x64\x91\x02\xfd\xad\x91\xee\x7d\x23\x98\x14\xec\x6f\xb5\x90\x0d\x29\xa9\x92\x8c\x12\x4c\x6d\x0d\xad\x3a\xef\x77\xb8\xeb\x64\x00\x36\xad\x7c\xfb\xd3\xf7\x9a\x56\x60\x39\x01\xc6\xed\x73\xb6\x86\xe5\x85\xad\x7c\xa8\xd1\x14\xe0\x10\x2e\xd1\x28\xab\x5a\x0a\x12\x35\xde\x1b\x56\xfe\xb7\x3f\x7d\x7f\x33\xaa\x97\x6b\x3b\xad\xe2\xd9\x22\x9f\xb0\xc5\xb2\x9a\xf1\x95\x62\xc4\x65\x06\x6c\x91\xeb\x66\x20\x7a\x38\x62\x0a\xad\x9f\xdd\x96\x02\x19\xb3\x8a\xc6\xf3\x14\xfc\xef\x66\x1f\xcf\x5e\x7c\x6e\xf3\x78\xf6\xe2\x33\x59\xc7\xb3\x17\x37\x63\x1c\xcb\x55\xc7\x36\x96\xab\x1d\x4c\x63\xb9\xba\xb6\x65\xfc\xba\xa3\x65\xfc\xfa\x3b\x5b\xc6\xeb\xcf\x6f\x1a\xaf\x3f\x9b\x6d\xbc\xbe\x29\xe3\xd8\xf4\xac\x63\xb3\x93\x79\x6c\x3e\xc1\x3e\xde\xed\x68\x1f\xef\x7e\x27\xfb\x80\x45\x79\xdd\x32\x16\x72\x66\x54\xbd\x10\xce\x79\x51\x6d\x9f\x95\x2d\xc0\x26\xe4\x37\xb4\x2c\x1a\x4c\x70\x85\xcd\x4d\x19\x03\x20\xbb\x19\x73\x00\x54\x1d\x83\x80\x5f\x9e\x4c\x48\xe8\xb2\x03\x09\xa4\x9b\xc2\xc2\x64\x07\xe2\x15\x68\x81\x1e\x20\x9f\xd8\x56\xba\x34\x4b\x99\xb4\xa6\xf2\xe0\x01\x5a\xc0\x12\x79\x63\x0c\x72\xeb\x10\x41\x77\xd1\xc2\x78\x59\xbd\xd9\x84\x04\x9e\xa1\xad\x7d\x44\xf5\xcb\x93\x9b\x21\x1d\xcd\x64\x81\xee\x1a\x6e\x0c\x1d\x90\xee\x2f\x75\x09\x72\xff\x9d\xd6\x0b\x53\xf9\xff\x76\xe6\xfb\x62\x62\x7f\xb9\xa8\xad\xf7\xc5\x0d\x59\xaf\xd4\x7b\xd7\x52\x35\xe3\xad\xed\x79\x0b\xe3\x1d\x44\x4c\x40\x75\x0d\xfb\xd5\xbc\xa0\xc1\x33\x6e\xc0\x8a\xfc\xef\x6e\xc1\x2f\x96\x15\xab\xf8\xe7\x0e\xc0\x2b\xa0\x72\x53\x26\x0c\xd8\x6e\xc6\x84\x25\x63\xba\x09\xaf\x96\xa3\xf1\x57\x80\x8c\xda\xaf\xea\x11\xd8\x81\x8a\xea\x8b\x3d\x91\x0e\xb6\xbf\xbc\x98\x44\xc1\xc0\x2c\x3f\x55\x61\x37\x14\x73\xfe\x58\x1a\x1b\x09\x39\x02\x62\x77\x85\xbd\x18\x28\xec\xc9\x75\x14\xf6\x6d\x9e\x7f\xee\xcc\x97\xe5\xf9\x67\xca\x7c\xe5\x95\xdf\x37\xf1\xce\x9c\xf7\xde\x99\xf3\x9d\xde\x99\xf3\xad\xdf\x99\xfb\x23\xc2\x7e\x93\xc8\xc2\x86\x51\x73\xf2\x9b\xb1\xd5\xea\x4a\x34\xab\xc7\x10\x79\x31\x7c\x67\x58\x69\xaf\x87\x37\xe3\x18\x26\x52\xfb\x6d\xce\x8d\xf6\x25\x0d\xc5\xc3\xa7\x46\x74\xf9\xcd\xbc\xba\xf2\xed\x42\x5d\x01\xbe\x2c\xf4\xb9\xcd\xb5\xe9\x86\xe3\xd5\xf2\x9c\xaf\xaa\x2b\xf4\x0f\x75\xc5\x30\x00\x82\x79\x35\x28\x06\xd3\x8a\xca\x40\xd6\x07\x26\x3c\x75\x58\x69\xee\x44\xef\x46\x97\x75\x79\xba\x28\x8b\x32\x63\x8b\x0a\xa5\xf0\xbc\x5c\x68\xbe\x01\x44\x1d\xb3\xbf\xed\xbc\x74\xcd\x4c\xfd\xcb\x0d\xcc\x03\x0f\x39\xb0\xbb\x63\x47\x5c\x93\x67\xe7\xc2\x2c\xd9\x7c\xaf\x23\xfb\x51\xc1\x21\x63\x40\x6e\x24\xa7\xa1\xdd\x4a\x88\xbc\xab\xe6\x4f\xf0\xd5\x4b\x5d\xd4\xfd\x5e\x74\xd6\x7c\xbb\x3e\xfb\x89\xc8\xde\x0c\xda\x8b\xbf\x5d\xa7\xb5\xa7\xbb\x62\xc1\x14\x27\x98\xe1\x14\xce\xd4\x64\x38\xc7\x1c\x17\x7b\x03\x24\x6f\xff\x8d\xba\x3a\x45\xd8\xdb\x7a\x79\x00\x8c\x6e\xda\x98\xed\x20\x2c\x5f\xaa\xcd\x13\x10\x16\xeb\x2f\xf2\xbf\xbf\xfe\x6a\x38\x80\x21\xf2\xfe\xc6\x07\xfe\x74\x84\x86\xab\x60\xfa\x9f\x1c\x9b\x6b\xf0\xa3\x86\x8d\xfe\x5e\x40\x6b\xd2\xde\x47\x20\x7d\x68\xce\x17\xa7\xd5\x0c\x7d\x8d\xe8\x96\x5b\xa9\xfb\x81\xe6\x78\xb9\xf8\xc0\x57\xf5\xab\xa1\x16\x86\x55\x7c\x10\x83\x76\x7d\x3a\x60\xab\xc0\x53\x8f\xda\x8d\x76\x3b\x2b\x73\x1f\xd1\x49\x37\x88\xde\x59\xa3\x9c\x55\x0c\xb1\xf5\x8e\x74\xb6\x9e\xc9\xea\xae\x14\x6e\xb4\x00\x7d\x50\x2d\x5f\xfb\xc4\xbe\x14\x02\x8f\x3f\x61\xcf\x8e\xa2\xd5\x35\x2a\xc3\xce\x9d\x1a\xee\x89\x54\x66\xc3\x64\xad\x5e\xd3\x2e\x1e\xa9\x36\x03\x2e\xd9\xdd\xad\x37\xef\x77\x69\xbb\x4f\x7a\xb5\x4b\x78\x75\xab\x37\x83\x2d\xfc\xe2\xaf\xe6\xe1\xe0\xfc\x62\x3d\x9b\xd4\x89\x94\xc8\x11\x4c\xef\x95\x66\xe8\x5e\x2e\x81\x0c\xfb\x64\xeb\x54\x44\x53\x70\x1d\x41\x6a\x9c\xd3\xae\xdb\x58\x37\x92\x0c\xbc\x02\xd0\x08\x93\xcc\x96\xe7\x30\x48\x5a\xc6\x7e\x34\x9a\xb6\x36\x66\xcf\x51\x36\x5f\x2e\x5c\x6f\x2a\xdb\x9a\x34\xe0\xe9\xdb\x32\xfc\x68\xb7\x65\x78\xec\xb4\x65\x1d\x33\x64\x29\x92\xdd\x66\xe7\xab\x69\xa7\xeb\x31\xc0\xff\x19\x0c\xfb\xcf\x52\x32\x43\xa4\x75\x2c\x95\xf8\x86\x61\xb6\xde\x35\x66\x27\x00\x67\x98\xea\x85\x75\x99\x9c\x58\xc8\x34\x2e\x74\xd9\xf1\x9f\x51\x37\xb8\xdc\xc6\x07\x2e\x95\xc9\xd7\xe8\xdf\x94\x6f\x4d\x62\xb7\x9b\x2a\x00\x77\xd6\x97\x9b\xf4\xd8\xba\x6f\xa6\xb7\x5b\x46\x6d\x8d\xf9\xf8\x76\x4a\xc3\x6d\xf6\xbb\x1c\x7e\xfd\x27\x34\xab\xaa\xf3\xf5\xbd\xc3\xc3\xb3\x6a\xb6\x3e\x48\xf9\xe1\x45\x55\xd0\x5f\xd6\xe8\x03\x39\xc0\x07\x04\xa5\x57\xe8\x7f\x9e\xb1\x6a\x56\xb2\xb5\xb0\x98\x76\x83\x0c\xec\x0a\x91\x9b\x3d\x0e\x0f\xd1\xf7\xbc\x92\xc7\xe1\x38\x17\xe2\x2e\x59\x3a\xe7\x6b\xf4\x37\x45\xe9\x6f\xb7\xbe\x82\x6d\xfc\x2b\xce\x1f\x36\xfb\x5f\x06\x3b\x69\xd0\x1d\xa9\xbc\x3b\xe8\xf6\xed\xfa\xe7\xfb\x76\xf4\xe8\x6f\xb2\x3b\x1a\xf2\xa7\xf0\x43\x8b\xfb\x4c\x7d\xef\xa2\x56\xbf\xde\xbe\x6d\xd8\x9f\x73\xd4\x61\xb2\x01\x76\xb2\x71\x0a\x3b\x67\xfe\x36\x95\xbb\xf1\x7f\x5a\xe6\xfc\xe0\x97\x35\x5a\xae\xd0\x77\x72\x2b\x4d\x59\x94\x3c\x47\xd9\x32\xe7\x53\xc0\xc2\x16\x39\xba\x58\x73\x54\x56\x62\x5c\xfb\x9b\x90\xa3\xd6\x07\xb5\x0f\xa7\xe9\xc3\xa9\xfa\xde\xed\x83\xfc\xf5\xbe\xdc\x93\xd4\x36\x3b\x68\xa0\x8f\x74\x64\xbf\xfe\xaa\x7d\x3b\xb8\x2c\x17\xb9\x78\xbb\xec\xc0\xc8\xad\x43\x82\x17\xa4\xff\x0c\x9b\x7d\x6e\x7d\x75\xf8\xf5\xdd\x1b\xfb\xfb\xfa\xf0\x96\xec\xed\xba\x5a\x95\x8b\xd3\x47\xab\xe5\xd9\xf1\x8c\xad\x8e\x97\xb9\xd0\xdc\x4b\xf8\xf1\xa0\xd0\x7e\x55\xc2\x3f\x61\xef\xf9\x42\xca\xb8\x6f\xb2\xe7\x17\x8b\x2b\x21\xdf\x5b\x5f\x35\x11\xec\x22\x5b\x93\x9c\x8b\x1f\x27\x92\x8e\xec\x20\x2c\x6d\xc2\xe6\xfb\x7a\x08\x84\x9f\xb2\xe5\xc5\xa2\xe2\x2b\x35\x73\x09\x3f\xcd\xeb\x58\x21\x9b\xb7\xc1\x02\x9e\xc2\x79\xc6\xfa\x0b\xdf\x54\x2b\x26\xbe\x5c\xce\xca\x39\x47\x93\x1a\xdb\x03\x85\x44\x92\xfe\x0a\xda\xb4\x08\x33\xd5\xbd\x6f\xab\xba\xc1\xfe\xbe\x70\xf5\xaf\x40\xa7\x12\xf8\x9b\x23\xe4\x6d\xbe\xa7\x9e\x27\x74\x2e\x7f\x7a\x00\x3f\x7d\xf7\xe8\x91\xf8\xc9\x42\x49\x88\x0b\x5e\xd7\xd7\x17\xab\xd5\xf2\x94\x55\x7c\x0a\x56\x57\xcd\xf8\x8a\xc3\x39\x4f\xb4\xe0\x9b\x0a\x09\x16\x58\x56\xf1\x15\x34\x82\x6e\x6c\xc3\x1f\x30\x38\x91\xe0\xb7\x91\xb7\x79\x74\xec\x79\x7b\xc2\x42\xbd\xcd\xf7\xf0\xf1\x1f\x22\x38\xcf\x97\x97\x2d\x7d\x68\xf6\x95\x94\xbc\x1c\xca\x27\xaa\x8b\x02\x81\xff\xe8\xd1\x1e\x1c\xcd\xf4\xf6\xd0\x3e\xd2\x30\xc3\x83\xfd\xba\xe2\x90\xa2\xde\x66\xc1\xaa\xab\x17\x8b\x33\x56\x65\x33\x9e\xb7\xf4\xee\xa3\xe5\x62\x7e\x85\xd8\xf9\x39\x87\x7e\x97\x6b\x70\x40\x74\xb1\x28\xab\xa9\x78\xd1\xcc\xd8\x9a\xc3\xdb\xa6\x10\x44\x83\xa9\x81\x11\x42\xaa\xea\x7d\x51\x0d\x56\x31\xd4\x33\xed\xeb\x39\x2b\x57\xc3\x9e\x41\xbf\x14\xaf\x5f\x29\xd1\xdd\xbd\xab\x78\xbf\xd5\xef\x80\xa5\xa5\x00\x14\xff\x57\xf1\x5e\x42\xd5\xde\x78\x1d\x67\xe0\x0b\x70\x06\x18\x85\x5b\x5f\x68\xac\x5c\xe6\x2d\x5d\x23\x2f\x17\x39\xdf\xa0\x23\x74\x17\x1b\xcd\xbe\xf1\xa3\x3b\x77\x34\xe3\xdf\xdf\x97\xcd\x2c\xc6\x0f\x74\xde\x00\xc8\xdb\xbe\xb1\x0b\x53\x7a\x24\x34\x2e\x25\x23\x7f\xbd\x7b\x54\xab\xff\xbe\x26\x2f\xb4\x7f\x64\x88\x1f\x35\xa2\x6f\xbe\x41\xd8\xab\x0d\x08\xfd\xaa\x7c\x48\xa9\xa4\xe6\x44\x1a\x2b\xfa\x15\x75\xec\xb0\x11\xfe\x16\x84\x00\xa1\x4d\x49\x8d\xf0\xb3\x19\xcf\xde\xbf\xcc\xd8\x9c\xad\xfe\xb7\x68\x35\x11\x7a\x78\xbe\x2c\x17\x72\x37\x35\x08\xa0\xf9\xa9\xeb\xf1\xed\xcf\xd2\xeb\x5b\xe1\x54\xb3\xd5\xf2\x12\x3d\x5c\xad\x96\xab\x09\xf4\xea\xce\x13\x91\x0a\xb5\xa6\xf9\xd7\xfd\x3b\x68\xbf\x45\x70\x50\x2d\x65\x64\x9d\xe0\x68\xef\xa0\x5a\xfe\xf5\xfc\x9c\xaf\x8e\xd9\x9a\x4f\xf6\xd0\xbe\x44\x20\x4c\x7e\xb1\xac\x84\x81\x03\xb3\x52\x2e\x77\xc4\xc3\xba\xa3\x1f\x3f\xc3\x48\xd0\xca\x09\xb2\x6a\x91\x89\xb7\xe2\x98\xca\x65\x36\x35\x38\x49\x29\x1b\xb4\x31\xd1\x05\xf8\x4d\xdd\x46\x6a\x14\xa6\x2a\x37\xd4\xdb\xeb\xeb\x45\x3a\xc4\x71\xdd\xd0\xa4\x16\x0d\xed\x6d\x65\x9c\x8f\x1e\x51\x15\xeb\x54\x98\xc3\x77\xd3\xab\x8a\xa3\x35\xff\xaf\x0b\xbe\xc8\x20\xd0\xd9\x19\x6d\x69\xd4\xa6\x03\x03\xe1\xd5\x59\xba\x9c\x37\x8e\x64\xa3\x4c\xbd\x2e\x65\x32\xa4\xdc\x60\x1a\x17\x52\x24\x05\x84\x95\x80\x8e\xbd\x86\xa5\x66\xe3\xb1\x81\x09\x08\xc3\x3a\x13\xfe\x90\x09\x87\xc1\xdf\xdf\x91\x49\x4c\x24\x97\x9e\xe2\xf2\xa1\xd7\x41\xb1\x7f\x64\xb1\x9a\x68\x8b\xce\x3c\xf4\x06\x9d\x09\x3e\x49\xa2\x98\x2a\x66\x63\xc9\xec\xa3\x2d\x99\xc5\x64\xd7\x4e\xb5\x90\x26\xae\xba\x1d\xed\x7a\x40\x63\x9b\x80\xa1\xef\x12\x22\xf5\x57\xe3\x44\x3f\x69\x6a\x90\x8a\xd4\x7d\x98\x5c\x0d\xb2\xa6\x16\x7e\x74\x50\x69\x40\xeb\x1f\x84\x12\x64\xb4\xda\x72\x70\x69\x7b\xac\x13\xd6\x47\x19\x0d\xe5\xfe\x91\xc3\xf5\x7b\x11\xbd\x6d\xf6\xb9\x12\xe1\x46\xf6\x2b\xce\xf2\xe3\xe5\xa2\x2a\x17\x17\x70\x78\x16\xb4\xdf\x86\x22\xc1\xc9\x0f\xd0\xf7\x6f\x8e\x80\xad\x63\x91\x58\x18\x46\x83\x3b\x3f\x2c\x3e\xb0\x79\x99\x03\x90\x94\xf6\x1d\xd5\xad\x46\xde\x5d\x2a\x48\x22\x84\x89\x82\x37\x0d\x9d\xb7\xca\x4d\x44\xd3\xe6\xc7\xfd\x7d\x91\x8c\xd7\x11\xaa\x87\xe6\xb6\x0c\x23\x32\x11\x14\x51\xf2\x1f\x5a\x30\x34\x42\xfb\x8f\x1a\xc6\x0e\x0f\xd1\x0f\x05\xba\xe4\x48\xe4\x6b\x17\xe7\x48\x64\xaa\x53\x54\x56\xff\xef\xff\xfc\xdf\x7a\x58\xd2\x51\x00\xc7\xb7\x2c\x3d\x1f\x00\xde\x19\x04\x7f\x69\xbd\x2f\xc1\x0b\x26\xad\x95\x0b\x60\xac\x9b\x21\xd1\xbf\xf8\xfa\x97\xc0\x60\xbe\x43\x5d\x7d\x82\xaa\xba\x98\x8e\x86\x5a\x57\x92\x2d\xd8\x1c\x0e\x3f\x34\x72\x7c\xc1\x59\x8e\x8a\x72\xb5\xae\x6a\x29\x41\xb7\x76\x57\xf3\x70\x74\x43\x93\xc5\x72\x28\xde\xf5\x5e\x6d\x13\x92\xd0\x6d\xa5\x7f\x15\x59\x35\x5e\x1b\xf9\xd6\xbc\x0e\xc7\xb0\x1e\x9e\x87\xb5\x41\x1d\xd7\xa8\x40\x2d\xe8\xc8\xe2\x30\xf7\xfb\xf1\x40\x47\x86\xe5\x6b\x06\xd4\xdc\x69\xb4\x6b\x4a\xc0\x1a\xeb\x6d\xcd\x57\x8b\x51\xdd\x04\x7e\x07\x13\xac\xd3\x7a\xd9\x77\xbf\x2f\xdb\x33\x76\x85\xca\x45\x36\xbf\x80\x97\x10\xf1\x72\xa1\xbf\xd2\x98\xa4\xfc\xa8\x96\xce\xc3\x1d\xa4\x03\xa6\x7c\x3d\x01\x7a\xea\x3d\x8d\xc0\xde\x24\x49\x4b\x17\xa8\x6f\x13\xa8\x07\xc9\x8b\x14\xd8\x58\x7e\xf0\x39\x65\x3e\x1c\xe1\xfb\x12\xa5\x4a\xa2\x8f\x6e\x56\xa2\x10\x32\xae\x29\xf4\x18\x84\xee\x6d\xfa\x62\xf7\x36\xde\xf1\x1e\xfa\x15\x24\x32\x91\x3c\xc8\x5f\x1b\x7d\x04\x56\x7d\xc0\x1b\x95\xe1\x1d\x03\x7b\xfa\x2b\x98\x59\x13\xb5\x3c\x8d\x5a\xf8\xeb\xc9\xa3\xbb\x14\xe5\x30\x53\xc6\xf3\x26\xf2\xd6\x61\x53\x9d\xc0\x6a\xbe\x43\x40\xd3\xbe\x43\xfc\xb9\xdf\xcb\x49\x54\xae\xd1\x8e\xc6\x92\xbf\x06\x5f\x37\x25\xd1\xc0\xea\xa8\x06\x54\xf4\x00\xa8\x25\x25\x5a\x8c\x6d\x67\x7f\x3a\xe9\x4e\x3b\x4f\x54\x9d\x9d\x6b\xd9\xc8\xa4\x3a\x3b\x47\x47\xbd\xb1\x64\x0f\xfd\xe9\xe8\x48\x06\xe5\x7e\x76\xa2\x16\x31\xaa\xb3\xf3\x7e\x9e\xa1\xbd\xa0\xb7\xd0\x7b\x9f\x73\xf2\x4d\x88\x15\x1d\x01\x83\x77\x3e\xf0\xd5\xba\x5c\x2e\xee\xdc\x43\x77\x60\xd2\xf7\xce\x54\xfc\x2a\xf9\xb9\x73\x4f\xcb\x0a\xe1\x77\xd9\x5d\xf5\xbb\xfc\x72\xeb\xab\x8f\x6a\x92\xee\xe5\xf2\x8c\xa3\x6f\x9f\x7e\x8f\xd2\x8b\x72\x9e\xa3\xe5\x79\x55\x9e\x95\x7f\xe7\xab\xf5\x14\xcd\xcb\xf7\x1c\xad\x0e\x7e\x59\x4f\xe5\x2b\x31\xcc\xb4\xaf\xcf\x79\x56\x16\x65\x26\x9c\x37\x2f\x41\xe1\xe7\xac\xaa\xf8\x6a\xb1\x06\x7c\xd0\xa8\x9a\x71\x54\x2c\xe7\xf3\xe5\x65\xb9\x38\xbd\x27\xe7\x3c\x85\xf9\xf5\xce\x45\xa2\x3b\xb5\xd1\xdc\x91\x93\xbb\x1d\x80\x03\x76\x96\xf7\x66\x51\x9b\x23\x92\xe2\xd9\xad\xaf\xa4\xba\xd4\xa1\xc9\x66\x9a\xbb\x3b\x80\x89\x3e\x83\xee\x40\x39\xed\xdb\x45\x6f\xd6\xf8\x4f\xda\xf7\x83\xc5\x32\xe7\x27\x57\xe7\xbc\x4d\xe6\xda\xb9\x6a\xf5\xe2\x51\x2e\xf4\x79\xe3\x17\xe5\xe2\x74\xf9\xbf\x5e\xa2\x0f\xde\x01\x3d\xf0\xe0\xf5\xbc\x6d\xa1\x9d\x25\x6d\x98\x51\xa1\xb1\xc6\xc4\x56\x97\x33\x36\xef\x61\x8a\x0f\xbc\xbb\x72\x22\x66\x55\xef\x8d\x92\xa7\x18\xd5\x6f\x33\xb6\x7e\x76\xb9\x78\x5e\x6f\x81\x39\x52\x40\x07\xdd\xdf\x01\xbc\x59\x22\x81\xaa\x71\x52\x28\x75\xc4\xe8\x82\xcb\xf5\x21\xf1\x1c\x0e\x12\xef\x09\xd9\xe8\xb2\x7a\xf3\x5e\x16\x30\x14\x10\xf0\xb9\x33\xf9\xd5\xeb\xd7\x8b\x59\xb9\x58\x8a\x5e\x31\x74\xc9\x53\xa4\x0e\xaa\xaa\x59\xeb\x03\x65\xd0\x4a\x26\x1f\x6f\xa9\x23\xaa\xb0\x6c\xf2\x71\xfa\x8f\x8f\x6f\xa7\x34\xda\x66\x49\x64\x70\x62\xf7\xf5\xd3\x27\x8f\xab\xea\xfc\x85\x18\x32\xd6\x55\x83\xed\xcf\x69\x79\x2a\x37\xb3\x1c\xfc\xb2\xfe\xf3\x36\x98\xef\x5c\xac\x39\xbc\xb0\x65\xd5\x9d\xfb\xb7\x86\x84\xbe\x2b\x4f\x7f\x02\x84\xf7\x45\x87\x7f\x59\xcf\x44\x50\x2e\x4f\x17\xcb\x15\xbf\x37\x2f\x17\xfc\x56\x43\xfa\x92\xa7\xfe\x56\x24\x85\x92\x5e\xf1\x54\x8e\x4d\xf2\x98\xf1\x9d\x83\xc3\x79\x99\x1e\x0a\x14\x22\x38\xdf\x3a\x3c\x44\xf9\x72\x71\xa7\x42\xcb\x0f\x7c\xb5\x2a\x73\x5e\xaf\x38\xd4\x0b\x1c\xb7\xb4\x33\xc8\x6a\xe9\x40\x44\xb8\x3b\xcd\x8e\x06\x58\x90\xe8\x00\x1c\x48\x9a\x5d\x28\x61\x21\xb0\x4e\xa6\x83\x00\x77\xf7\x6f\x7d\x34\x88\x43\x3e\x51\x2b\x5b\x35\xcb\x7f\xbe\x47\xc8\xc7\xb7\x42\x0c\xd3\x37\x52\x0c\x6f\xf7\x6e\xdd\xfa\xff\x01\x00\x00\xff\xff\x02\x09\x77\x52\x22\x24\x06\x00") func web3JsBytes() ([]byte, error) { return bindataRead( @@ -106,7 +106,7 @@ func web3Js() (*asset, error) { } info := bindataFileInfo{name: "web3.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4c, 0xb2, 0xb5, 0x69, 0x9e, 0x5e, 0xe3, 0x56, 0x47, 0x6, 0x96, 0x4a, 0x7a, 0xcd, 0xdf, 0x79, 0xc5, 0xf2, 0x71, 0x64, 0x77, 0x8a, 0x53, 0x58, 0x6, 0xb1, 0xff, 0x4f, 0x0, 0xd6, 0x7f, 0x61}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x12, 0x22, 0x81, 0x1, 0xe2, 0x72, 0xd3, 0xd5, 0x4d, 0x2d, 0x30, 0xa5, 0x3, 0x90, 0x3a, 0xf8, 0x17, 0x2d, 0xe3, 0x5, 0x44, 0x21, 0x63, 0xba, 0x1a, 0x37, 0x3f, 0x3f, 0xa5, 0x30, 0x5e, 0x6f}} return a, nil } diff --git a/internal/jsre/deps/web3.js b/internal/jsre/deps/web3.js index c130f80de..69686ce92 100644 --- a/internal/jsre/deps/web3.js +++ b/internal/jsre/deps/web3.js @@ -3949,10 +3949,18 @@ var outputSyncingFormatter = function(result) { result.startingBlock = utils.toDecimal(result.startingBlock); result.currentBlock = utils.toDecimal(result.currentBlock); result.highestBlock = utils.toDecimal(result.highestBlock); - if (result.knownStates) { - result.knownStates = utils.toDecimal(result.knownStates); - result.pulledStates = utils.toDecimal(result.pulledStates); - } + result.syncedAccounts = utils.toDecimal(result.syncedAccounts); + result.syncedAccountBytes = utils.toDecimal(result.syncedAccountBytes); + result.syncedBytecodes = utils.toDecimal(result.syncedBytecodes); + result.syncedBytecodeBytes = utils.toDecimal(result.syncedBytecodeBytes); + result.syncedStorage = utils.toDecimal(result.syncedStorage); + result.syncedStorageBytes = utils.toDecimal(result.syncedStorageBytes); + result.healedTrienodes = utils.toDecimal(result.healedTrienodes); + result.healedTrienodeBytes = utils.toDecimal(result.healedTrienodeBytes); + result.healedBytecodes = utils.toDecimal(result.healedBytecodes); + result.healedBytecodeBytes = utils.toDecimal(result.healedBytecodeBytes); + result.healingTrienodes = utils.toDecimal(result.healingTrienodes); + result.healingBytecode = utils.toDecimal(result.healingBytecode); return result; }; diff --git a/internal/jsre/jsre.go b/internal/jsre/jsre.go index bc8869b25..24fedd8d2 100644 --- a/internal/jsre/jsre.go +++ b/internal/jsre/jsre.go @@ -20,6 +20,7 @@ package jsre import ( crand "crypto/rand" "encoding/binary" + "errors" "fmt" "io" "io/ioutil" @@ -220,19 +221,33 @@ loop: } // Do executes the given function on the JS event loop. +// When the runtime is stopped, fn will not execute. func (re *JSRE) Do(fn func(*goja.Runtime)) { done := make(chan bool) req := &evalReq{fn, done} - re.evalQueue <- req - <-done + select { + case re.evalQueue <- req: + <-done + case <-re.closed: + } } -// stops the event loop before exit, optionally waits for all timers to expire +// Stop terminates the event loop, optionally waiting for all timers to expire. func (re *JSRE) Stop(waitForCallbacks bool) { - select { - case <-re.closed: - case re.stopEventLoop <- waitForCallbacks: - <-re.closed + timeout := time.NewTimer(10 * time.Millisecond) + defer timeout.Stop() + + for { + select { + case <-re.closed: + return + case re.stopEventLoop <- waitForCallbacks: + <-re.closed + return + case <-timeout.C: + // JS is blocked, interrupt and try again. + re.vm.Interrupt(errors.New("JS runtime stopped")) + } } } @@ -282,6 +297,19 @@ func (re *JSRE) Evaluate(code string, w io.Writer) { }) } +// Interrupt stops the current JS evaluation. +func (re *JSRE) Interrupt(v interface{}) { + done := make(chan bool) + noop := func(*goja.Runtime) {} + + select { + case re.evalQueue <- &evalReq{noop, done}: + // event loop is not blocked. + default: + re.vm.Interrupt(v) + } +} + // Compile compiles and then runs a piece of JS code. func (re *JSRE) Compile(filename string, src string) (err error) { re.Do(func(vm *goja.Runtime) { _, err = compileAndRun(vm, filename, src) }) diff --git a/internal/shutdowncheck/shutdown_tracker.go b/internal/shutdowncheck/shutdown_tracker.go new file mode 100644 index 000000000..c95b4f02f --- /dev/null +++ b/internal/shutdowncheck/shutdown_tracker.go @@ -0,0 +1,85 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package shutdowncheck + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" +) + +// ShutdownTracker is a service that reports previous unclean shutdowns +// upon start. It needs to be started after a successful start-up and stopped +// after a successful shutdown, just before the db is closed. +type ShutdownTracker struct { + db ethdb.Database + stopCh chan struct{} +} + +// NewShutdownTracker creates a new ShutdownTracker instance and has +// no other side-effect. +func NewShutdownTracker(db ethdb.Database) *ShutdownTracker { + return &ShutdownTracker{ + db: db, + stopCh: make(chan struct{}), + } +} + +// MarkStartup is to be called in the beginning when the node starts. It will: +// - Push a new startup marker to the db +// - Report previous unclean shutdowns +func (t *ShutdownTracker) MarkStartup() { + if uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(t.db); err != nil { + log.Error("Could not update unclean-shutdown-marker list", "error", err) + } else { + if discards > 0 { + log.Warn("Old unclean shutdowns found", "count", discards) + } + for _, tstamp := range uncleanShutdowns { + t := time.Unix(int64(tstamp), 0) + log.Warn("Unclean shutdown detected", "booted", t, + "age", common.PrettyAge(t)) + } + } +} + +// Start runs an event loop that updates the current marker's timestamp every 5 minutes. +func (t *ShutdownTracker) Start() { + go func() { + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() + for { + select { + case <-ticker.C: + rawdb.UpdateUncleanShutdownMarker(t.db) + case <-t.stopCh: + return + } + } + }() +} + +// Stop will stop the update loop and clear the current marker. +func (t *ShutdownTracker) Stop() { + // Stop update loop. + t.stopCh <- struct{}{} + // Clear last marker. + rawdb.PopUncleanShutdownMarker(t.db) +} diff --git a/les/client.go b/les/client.go index 93319cb93..43207f344 100644 --- a/les/client.go +++ b/les/client.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/internal/shutdowncheck" "github.com/ethereum/go-ethereum/les/downloader" "github.com/ethereum/go-ethereum/les/vflux" vfc "github.com/ethereum/go-ethereum/les/vflux/client" @@ -63,6 +64,7 @@ type LightEthereum struct { serverPool *vfc.ServerPool serverPoolIterator enode.Iterator pruner *pruner + merger *consensus.Merger bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests bloomIndexer *core.ChainIndexer // Bloom indexer operating during block imports @@ -76,6 +78,8 @@ type LightEthereum struct { p2pServer *p2p.Server p2pConfig *p2p.Config udpEnabled bool + + shutdownTracker *shutdowncheck.ShutdownTracker // Tracks if and when the node has shutdown ungracefully } // New creates an instance of the light client. @@ -88,13 +92,14 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { if err != nil { return nil, err } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier, config.OverrideTerminalTotalDifficulty) if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { return nil, genesisErr } log.Info("Initialised chain configuration", "config", chainConfig) peers := newServerPeerSet() + merger := consensus.NewMerger(chainDb) leth := &LightEthereum{ lesCommons: lesCommons{ genesis: genesisHash, @@ -105,16 +110,18 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { lesDb: lesDb, closeCh: make(chan struct{}), }, - peers: peers, - eventMux: stack.EventMux(), - reqDist: newRequestDistributor(peers, &mclock.System{}), - accountManager: stack.AccountManager(), - engine: ethconfig.CreateConsensusEngine(stack, chainConfig, &config.Ethash, nil, false, chainDb), - bloomRequests: make(chan chan *bloombits.Retrieval), - bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations), - p2pServer: stack.Server(), - p2pConfig: &stack.Config().P2P, - udpEnabled: stack.Config().P2P.DiscoveryV5, + peers: peers, + eventMux: stack.EventMux(), + reqDist: newRequestDistributor(peers, &mclock.System{}), + accountManager: stack.AccountManager(), + merger: merger, + engine: ethconfig.CreateConsensusEngine(stack, chainConfig, &config.Ethash, nil, false, chainDb), + bloomRequests: make(chan chan *bloombits.Retrieval), + bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations), + p2pServer: stack.Server(), + p2pConfig: &stack.Config().P2P, + udpEnabled: stack.Config().P2P.DiscoveryV5, + shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb), } var prenegQuery vfc.QueryFunc @@ -182,19 +189,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { stack.RegisterProtocols(leth.Protocols()) stack.RegisterLifecycle(leth) - // Check for unclean shutdown - if uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(chainDb); err != nil { - log.Error("Could not update unclean-shutdown-marker list", "error", err) - } else { - if discards > 0 { - log.Warn("Old unclean shutdowns found", "count", discards) - } - for _, tstamp := range uncleanShutdowns { - t := time.Unix(int64(tstamp), 0) - log.Warn("Unclean shutdown detected", "booted", t, - "age", common.PrettyAge(t)) - } - } + // Successful startup; push a marker and check previous unclean shutdowns. + leth.shutdownTracker.MarkStartup() + return leth, nil } @@ -332,6 +329,7 @@ func (s *LightEthereum) Engine() consensus.Engine { return s.engine } func (s *LightEthereum) LesVersion() int { return int(ClientProtocolVersions[0]) } func (s *LightEthereum) Downloader() *downloader.Downloader { return s.handler.downloader } func (s *LightEthereum) EventMux() *event.TypeMux { return s.eventMux } +func (s *LightEthereum) Merger() *consensus.Merger { return s.merger } // Protocols returns all the currently configured network protocols to start. func (s *LightEthereum) Protocols() []p2p.Protocol { @@ -348,6 +346,9 @@ func (s *LightEthereum) Protocols() []p2p.Protocol { func (s *LightEthereum) Start() error { log.Warn("Light client mode is an experimental feature") + // Regularly update shutdown marker + s.shutdownTracker.Start() + if s.udpEnabled && s.p2pServer.DiscV5 == nil { s.udpEnabled = false log.Error("Discovery v5 is not initialized") @@ -383,7 +384,9 @@ func (s *LightEthereum) Stop() error { s.engine.Close() s.pruner.close() s.eventMux.Stop() - rawdb.PopUncleanShutdownMarker(s.chainDb) + // Clean shutdown marker as the last thing before closing db + s.shutdownTracker.Stop() + s.chainDb.Close() s.lesDb.Close() s.wg.Wait() diff --git a/les/client_handler.go b/les/client_handler.go index 9583bd57c..e416f92e2 100644 --- a/les/client_handler.go +++ b/les/client_handler.go @@ -74,7 +74,7 @@ func newClientHandler(ulcServers []string, ulcFraction int, checkpoint *params.T height = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1 } handler.fetcher = newLightFetcher(backend.blockchain, backend.engine, backend.peers, handler.ulc, backend.chainDb, backend.reqDist, handler.synchronise) - handler.downloader = downloader.New(height, backend.chainDb, nil, backend.eventMux, nil, backend.blockchain, handler.removePeer) + handler.downloader = downloader.New(height, backend.chainDb, backend.eventMux, nil, backend.blockchain, handler.removePeer) handler.backend.peers.subscribe((*downloaderPeerNotify)(handler)) return handler } @@ -143,11 +143,13 @@ func (h *clientHandler) handle(p *serverPeer, noInitAnnounce bool) error { connectionTimer.Update(time.Duration(mclock.Now() - connectedAt)) serverConnectionGauge.Update(int64(h.backend.peers.len())) }() - // It's mainly used in testing which requires discarding initial - // signal to prevent syncing. - if !noInitAnnounce { + + // Discard all the announces after the transition + // Also discarding initial signal to prevent syncing during testing. + if !(noInitAnnounce || h.backend.merger.TDDReached()) { h.fetcher.announce(p, &announceData{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td}) } + // Mark the peer starts to be served. atomic.StoreUint32(&p.serving, 1) defer atomic.StoreUint32(&p.serving, 0) @@ -212,7 +214,11 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { // Update peer head information first and then notify the announcement p.updateHead(req.Hash, req.Number, req.Td) - h.fetcher.announce(p, &req) + + // Discard all the announces after the transition + if !h.backend.merger.TDDReached() { + h.fetcher.announce(p, &req) + } } case msg.Code == BlockHeadersMsg: p.Log().Trace("Received block header response message") diff --git a/les/downloader/downloader.go b/les/downloader/downloader.go index e7dfc4158..448a94192 100644 --- a/les/downloader/downloader.go +++ b/les/downloader/downloader.go @@ -40,7 +40,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" ) var ( @@ -97,8 +96,7 @@ type Downloader struct { queue *queue // Scheduler for selecting the hashes to download peers *peerSet // Set of active peers from which download can proceed - stateDB ethdb.Database // Database to state sync into (and deduplicate via) - stateBloom *trie.SyncBloom // Bloom filter for fast trie node and contract code existence checks + stateDB ethdb.Database // Database to state sync into (and deduplicate via) // Statistics syncStatsChainOrigin uint64 // Origin block number where syncing started at @@ -207,13 +205,12 @@ type BlockChain interface { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { +func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { if lightchain == nil { lightchain = chain } dl := &Downloader{ stateDB: stateDb, - stateBloom: stateBloom, mux: mux, checkpoint: checkpoint, queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), @@ -231,9 +228,9 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, stateCh: make(chan dataPack), SnapSyncer: snap.NewSyncer(stateDb), stateSyncStart: make(chan *stateSync), - syncStatsState: stateSyncStats{ - processed: rawdb.ReadFastTrieProgress(stateDb), - }, + //syncStatsState: stateSyncStats{ + // processed: rawdb.ReadFastTrieProgress(stateDb), + //}, trackStateReq: make(chan *stateReq), } go dl.stateFetcher() @@ -268,8 +265,8 @@ func (d *Downloader) Progress() ethereum.SyncProgress { StartingBlock: d.syncStatsChainOrigin, CurrentBlock: current, HighestBlock: d.syncStatsChainHeight, - PulledStates: d.syncStatsState.processed, - KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, + //PulledStates: d.syncStatsState.processed, + //KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, } } @@ -367,12 +364,6 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { log.Info("Block synchronisation started") } - // If we are already full syncing, but have a fast-sync bloom filter laying - // around, make sure it doesn't use memory any more. This is a special case - // when the user attempts to fast sync a new empty network. - if mode == FullSync && d.stateBloom != nil { - d.stateBloom.Close() - } // If snap sync was requested, create the snap scheduler and switch to fast // sync mode. Long term we could drop fast sync or merge the two together, // but until snap becomes prevalent, we should support both. TODO(karalabe). @@ -628,9 +619,6 @@ func (d *Downloader) Terminate() { default: close(d.quitCh) } - if d.stateBloom != nil { - d.stateBloom.Close() - } d.quitLock.Unlock() // Cancel any pending download requests @@ -1930,15 +1918,6 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error { return err } atomic.StoreInt32(&d.committed, 1) - - // If we had a bloom filter for the state sync, deallocate it now. Note, we only - // deallocate internally, but keep the empty wrapper. This ensures that if we do - // a rollback after committing the pivot and restarting fast sync, we don't end - // up using a nil bloom. Empty bloom is fine, it just returns that it does not - // have the info we need, so reach down to the database instead. - if d.stateBloom != nil { - d.stateBloom.Close() - } return nil } diff --git a/les/downloader/downloader_test.go b/les/downloader/downloader_test.go index 17cd3630c..69bdb90ed 100644 --- a/les/downloader/downloader_test.go +++ b/les/downloader/downloader_test.go @@ -89,7 +89,7 @@ func newTester() *downloadTester { tester.stateDb = rawdb.NewMemoryDatabase() tester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00}) - tester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer) + tester.downloader = New(0, tester.stateDb, new(event.TypeMux), tester, nil, tester.dropPeer) return tester } @@ -1207,8 +1207,8 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync t.Helper() p := d.Progress() - p.KnownStates, p.PulledStates = 0, 0 - want.KnownStates, want.PulledStates = 0, 0 + //p.KnownStates, p.PulledStates = 0, 0 + //want.KnownStates, want.PulledStates = 0, 0 if p != want { t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) } diff --git a/les/downloader/statesync.go b/les/downloader/statesync.go index 6c53e5577..2b3278822 100644 --- a/les/downloader/statesync.go +++ b/les/downloader/statesync.go @@ -22,7 +22,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" @@ -298,7 +297,7 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync { return &stateSync{ d: d, root: root, - sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil), + sched: state.NewStateSync(root, d.stateDB, nil), keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), trieTasks: make(map[common.Hash]*trieTask), codeTasks: make(map[common.Hash]*codeTask), @@ -610,6 +609,6 @@ func (s *stateSync) updateStats(written, duplicate, unexpected int, duration tim log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected) } if written > 0 { - rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed) + //rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed) } } diff --git a/les/fetcher.go b/les/fetcher.go index d944d3285..bfe3aa16e 100644 --- a/les/fetcher.go +++ b/les/fetcher.go @@ -71,8 +71,8 @@ type fetcherPeer struct { // These following two fields can track the latest announces // from the peer with limited size for caching. We hold the // assumption that all enqueued announces are td-monotonic. - announces map[common.Hash]*announce // Announcement map - announcesList []common.Hash // FIFO announces list + announces map[common.Hash]*announce // Announcement map + fifo []common.Hash // FIFO announces list } // addAnno enqueues an new trusted announcement. If the queued announces overflow, @@ -87,15 +87,15 @@ func (fp *fetcherPeer) addAnno(anno *announce) { return } fp.announces[hash] = anno - fp.announcesList = append(fp.announcesList, hash) + fp.fifo = append(fp.fifo, hash) // Evict oldest if the announces are oversized. - if len(fp.announcesList)-cachedAnnosThreshold > 0 { - for i := 0; i < len(fp.announcesList)-cachedAnnosThreshold; i++ { - delete(fp.announces, fp.announcesList[i]) + if len(fp.fifo)-cachedAnnosThreshold > 0 { + for i := 0; i < len(fp.fifo)-cachedAnnosThreshold; i++ { + delete(fp.announces, fp.fifo[i]) } - copy(fp.announcesList, fp.announcesList[len(fp.announcesList)-cachedAnnosThreshold:]) - fp.announcesList = fp.announcesList[:cachedAnnosThreshold] + copy(fp.fifo, fp.fifo[len(fp.fifo)-cachedAnnosThreshold:]) + fp.fifo = fp.fifo[:cachedAnnosThreshold] } } @@ -106,8 +106,8 @@ func (fp *fetcherPeer) forwardAnno(td *big.Int) []*announce { cutset int evicted []*announce ) - for ; cutset < len(fp.announcesList); cutset++ { - anno := fp.announces[fp.announcesList[cutset]] + for ; cutset < len(fp.fifo); cutset++ { + anno := fp.announces[fp.fifo[cutset]] if anno == nil { continue // In theory it should never ever happen } @@ -118,8 +118,8 @@ func (fp *fetcherPeer) forwardAnno(td *big.Int) []*announce { delete(fp.announces, anno.data.Hash) } if cutset > 0 { - copy(fp.announcesList, fp.announcesList[cutset:]) - fp.announcesList = fp.announcesList[:len(fp.announcesList)-cutset] + copy(fp.fifo, fp.fifo[cutset:]) + fp.fifo = fp.fifo[:len(fp.fifo)-cutset] } return evicted } diff --git a/les/fetcher/block_fetcher_test.go b/les/fetcher/block_fetcher_test.go index b6d1125b5..de066ac26 100644 --- a/les/fetcher/block_fetcher_test.go +++ b/les/fetcher/block_fetcher_test.go @@ -60,8 +60,8 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common block.AddTx(tx) } // If the block number is a multiple of 5, add a bonus uncle to the block - if i%5 == 0 { - block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))}) + if i > 0 && i%5 == 0 { + block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i - 1))}) } }) hashes := make([]common.Hash, n+1) diff --git a/les/test_helper.go b/les/test_helper.go index 21d0f191c..10367ea80 100644 --- a/les/test_helper.go +++ b/les/test_helper.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/contracts/checkpointoracle/contract" "github.com/ethereum/go-ethereum/core" @@ -239,6 +240,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index engine: engine, blockchain: chain, eventMux: evmux, + merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), } client.handler = newClientHandler(ulcServers, ulcFraction, nil, client) diff --git a/les/vflux/client/serverpool.go b/les/vflux/client/serverpool.go index cc0254c12..9703bf4c8 100644 --- a/les/vflux/client/serverpool.go +++ b/les/vflux/client/serverpool.go @@ -44,7 +44,7 @@ const ( queryWaitStep = 1.02 // exponential multiplier of redial wait time when no value was provided by the server waitThreshold = time.Hour * 2000 // drop node if waiting time is over the threshold nodeWeightMul = 1000000 // multiplier constant for node weight calculation - nodeWeightThreshold = 100 // minimum weight for keeping a node in the the known (valuable) set + nodeWeightThreshold = 100 // minimum weight for keeping a node in the known (valuable) set minRedialWait = 10 // minimum redial wait time in seconds preNegLimit = 5 // maximum number of simultaneous pre-negotiation queries warnQueryFails = 20 // number of consecutive UDP query failures before we print a warning diff --git a/les/vflux/client/serverpool_test.go b/les/vflux/client/serverpool_test.go index 763f72f03..c7d0245ef 100644 --- a/les/vflux/client/serverpool_test.go +++ b/les/vflux/client/serverpool_test.go @@ -63,7 +63,11 @@ type ServerPoolTest struct { trusted []string waitCount, waitEnded int32 - lock sync.Mutex + // preNegLock protects the cycle counter, testNodes list and its connected field + // (accessed from both the main thread and the preNeg callback) + preNegLock sync.Mutex + queryWg *sync.WaitGroup // a new wait group is created each time the simulation is started + stopping bool // stopping avoid callind queryWg.Add after queryWg.Wait cycle, conn, servedConn int serviceCycles, dialCount int @@ -111,13 +115,21 @@ func (s *ServerPoolTest) addTrusted(i int) { func (s *ServerPoolTest) start() { var testQuery QueryFunc + s.queryWg = new(sync.WaitGroup) if s.preNeg { testQuery = func(node *enode.Node) int { + s.preNegLock.Lock() + if s.stopping { + s.preNegLock.Unlock() + return 0 + } + s.queryWg.Add(1) idx := testNodeIndex(node.ID()) n := &s.testNodes[idx] - s.lock.Lock() canConnect := !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle - s.lock.Unlock() + s.preNegLock.Unlock() + defer s.queryWg.Done() + if s.preNegFail { // simulate a scenario where UDP queries never work s.beginWait() @@ -181,11 +193,20 @@ func (s *ServerPoolTest) start() { } func (s *ServerPoolTest) stop() { + // disable further queries and wait if one is currently running + s.preNegLock.Lock() + s.stopping = true + s.preNegLock.Unlock() + s.queryWg.Wait() + quit := make(chan struct{}) s.quit <- quit <-quit s.sp.Stop() s.spi.Close() + s.preNegLock.Lock() + s.stopping = false + s.preNegLock.Unlock() for i := range s.testNodes { n := &s.testNodes[i] if n.connected { @@ -205,7 +226,9 @@ func (s *ServerPoolTest) run() { n := &s.testNodes[idx] s.sp.UnregisterNode(n.node) n.totalConn += s.cycle + s.preNegLock.Lock() n.connected = false + s.preNegLock.Unlock() n.node = nil s.conn-- if n.service { @@ -230,7 +253,9 @@ func (s *ServerPoolTest) run() { s.servedConn++ } n.totalConn -= s.cycle + s.preNegLock.Lock() n.connected = true + s.preNegLock.Unlock() dc := s.cycle + n.connectCycles s.disconnect[dc] = append(s.disconnect[dc], idx) n.node = dial @@ -242,9 +267,9 @@ func (s *ServerPoolTest) run() { } s.serviceCycles += s.servedConn s.clock.Run(time.Second) - s.lock.Lock() + s.preNegLock.Lock() s.cycle++ - s.lock.Unlock() + s.preNegLock.Unlock() } } @@ -255,11 +280,13 @@ func (s *ServerPoolTest) setNodes(count, conn, wait int, service, trusted bool) idx = rand.Intn(spTestNodes) } res = append(res, idx) + s.preNegLock.Lock() s.testNodes[idx] = spTestNode{ connectCycles: conn, waitCycles: wait, service: service, } + s.preNegLock.Unlock() if trusted { s.addTrusted(idx) } @@ -273,7 +300,9 @@ func (s *ServerPoolTest) resetNodes() { n.totalConn += s.cycle s.sp.UnregisterNode(n.node) } + s.preNegLock.Lock() s.testNodes[i] = spTestNode{totalConn: n.totalConn} + s.preNegLock.Unlock() } s.conn, s.servedConn = 0, 0 s.disconnect = make(map[int][]int) diff --git a/light/lightchain.go b/light/lightchain.go index c481734ff..61309ce35 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -59,6 +59,7 @@ type LightChain struct { chainHeadFeed event.Feed scope event.SubscriptionScope genesisBlock *types.Block + forker *core.ForkChoice bodyCache *lru.Cache // Cache for the most recent block bodies bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format @@ -92,6 +93,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus. blockCache: blockCache, engine: engine, } + bc.forker = core.NewForkChoice(bc, nil) var err error bc.hc, err = core.NewHeaderChain(odr.Database(), config, bc.engine, bc.getProcInterrupt) if err != nil { @@ -369,6 +371,42 @@ func (lc *LightChain) postChainEvents(events []interface{}) { } } +func (lc *LightChain) InsertHeader(header *types.Header) error { + // Verify the header first before obtaining the lock + headers := []*types.Header{header} + if _, err := lc.hc.ValidateHeaderChain(headers, 100); err != nil { + return err + } + // Make sure only one thread manipulates the chain at once + lc.chainmu.Lock() + defer lc.chainmu.Unlock() + + lc.wg.Add(1) + defer lc.wg.Done() + + _, err := lc.hc.WriteHeaders(headers) + log.Info("Inserted header", "number", header.Number, "hash", header.Hash()) + return err +} + +func (lc *LightChain) SetChainHead(header *types.Header) error { + lc.chainmu.Lock() + defer lc.chainmu.Unlock() + + lc.wg.Add(1) + defer lc.wg.Done() + + if err := lc.hc.Reorg([]*types.Header{header}); err != nil { + return err + } + // Emit events + block := types.NewBlockWithHeader(header) + lc.chainFeed.Send(core.ChainEvent{Block: block, Hash: block.Hash()}) + lc.chainHeadFeed.Send(core.ChainHeadEvent{Block: block}) + log.Info("Set the chain head", "number", block.Number(), "hash", block.Hash()) + return nil +} + // InsertHeaderChain attempts to insert the given header chain in to the local // chain, possibly creating a reorg. If an error is returned, it will return the // index number of the failing header as well an error describing what went wrong. @@ -396,25 +434,23 @@ func (lc *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (i lc.wg.Add(1) defer lc.wg.Done() - status, err := lc.hc.InsertHeaderChain(chain, start) + status, err := lc.hc.InsertHeaderChain(chain, start, lc.forker) if err != nil || len(chain) == 0 { return 0, err } // Create chain event for the new head block of this insertion. var ( - events = make([]interface{}, 0, 1) lastHeader = chain[len(chain)-1] block = types.NewBlockWithHeader(lastHeader) ) switch status { case core.CanonStatTy: - events = append(events, core.ChainEvent{Block: block, Hash: block.Hash()}) + lc.chainFeed.Send(core.ChainEvent{Block: block, Hash: block.Hash()}) + lc.chainHeadFeed.Send(core.ChainHeadEvent{Block: block}) case core.SideStatTy: - events = append(events, core.ChainSideEvent{Block: block}) + lc.chainSideFeed.Send(core.ChainSideEvent{Block: block}) } - lc.postChainEvents(events) - return 0, err } diff --git a/miner/miner.go b/miner/miner.go index 1c33b3bd2..c8aaa5b92 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -68,7 +68,7 @@ type Miner struct { wg sync.WaitGroup } -func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(block *types.Block) bool) *Miner { +func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(header *types.Header) bool, merger *consensus.Merger) *Miner { miner := &Miner{ eth: eth, mux: mux, @@ -76,7 +76,7 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even exitCh: make(chan struct{}), startCh: make(chan common.Address), stopCh: make(chan struct{}), - worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true), + worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true, merger), } miner.wg.Add(1) go miner.update() diff --git a/miner/miner_test.go b/miner/miner_test.go index 4b5bff1df..de7ca73e2 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" @@ -245,6 +246,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) { // Create consensus engine engine := clique.New(chainConfig.Clique, chainDB) // Create Ethereum backend + merger := consensus.NewMerger(rawdb.NewMemoryDatabase()) bc, err := core.NewBlockChain(chainDB, nil, chainConfig, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("can't create new chain %v", err) @@ -257,5 +259,5 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) { // Create event Mux mux := new(event.TypeMux) // Create Miner - return New(backend, &config, chainConfig, mux, engine, nil), mux + return New(backend, &config, chainConfig, mux, engine, nil, merger), mux } diff --git a/miner/stress/beacon/main.go b/miner/stress/beacon/main.go new file mode 100644 index 000000000..70005e20d --- /dev/null +++ b/miner/stress/beacon/main.go @@ -0,0 +1,521 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// This file contains a miner stress test for the eth1/2 transition +package main + +import ( + "crypto/ecdsa" + "errors" + "io/ioutil" + "math/big" + "math/rand" + "os" + "path/filepath" + "time" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/fdlimit" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/catalyst" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/les" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/miner" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/params" +) + +type nodetype int + +const ( + legacyMiningNode nodetype = iota + legacyNormalNode + eth2MiningNode + eth2NormalNode + eth2LightClient +) + +func (typ nodetype) String() string { + switch typ { + case legacyMiningNode: + return "legacyMiningNode" + case legacyNormalNode: + return "legacyNormalNode" + case eth2MiningNode: + return "eth2MiningNode" + case eth2NormalNode: + return "eth2NormalNode" + case eth2LightClient: + return "eth2LightClient" + default: + return "undefined" + } +} + +var ( + // transitionDifficulty is the target total difficulty for transition + transitionDifficulty = new(big.Int).Mul(big.NewInt(20), params.MinimumDifficulty) + + // blockInterval is the time interval for creating a new eth2 block + blockInterval = time.Second * 3 + blockIntervalInt = 3 + + // finalizationDist is the block distance for finalizing block + finalizationDist = 10 +) + +type ethNode struct { + typ nodetype + api *catalyst.ConsensusAPI + ethBackend *eth.Ethereum + lesBackend *les.LightEthereum + stack *node.Node + enode *enode.Node +} + +func newNode(typ nodetype, genesis *core.Genesis, enodes []*enode.Node) *ethNode { + var ( + err error + api *catalyst.ConsensusAPI + stack *node.Node + ethBackend *eth.Ethereum + lesBackend *les.LightEthereum + ) + // Start the node and wait until it's up + if typ == eth2LightClient { + stack, lesBackend, api, err = makeLightNode(genesis) + } else { + stack, ethBackend, api, err = makeFullNode(genesis) + } + if err != nil { + panic(err) + } + for stack.Server().NodeInfo().Ports.Listener == 0 { + time.Sleep(250 * time.Millisecond) + } + // Connect the node to all the previous ones + for _, n := range enodes { + stack.Server().AddPeer(n) + } + enode := stack.Server().Self() + + // Inject the signer key and start sealing with it + stack.AccountManager().AddBackend(keystore.NewPlaintextKeyStore("beacon-stress")) + store := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) + if _, err := store.NewAccount(""); err != nil { + panic(err) + } + return ðNode{ + typ: typ, + api: api, + ethBackend: ethBackend, + lesBackend: lesBackend, + stack: stack, + enode: enode, + } +} + +func (n *ethNode) assembleBlock(parentHash common.Hash, parentTimestamp uint64) (*catalyst.ExecutableDataV1, error) { + if n.typ != eth2MiningNode { + return nil, errors.New("invalid node type") + } + payloadAttribute := catalyst.PayloadAttributesV1{ + Timestamp: uint64(time.Now().Unix()), + } + fcState := catalyst.ForkchoiceStateV1{ + HeadBlockHash: parentHash, + SafeBlockHash: common.Hash{}, + FinalizedBlockHash: common.Hash{}, + } + payload, err := n.api.ForkchoiceUpdatedV1(fcState, &payloadAttribute) + if err != nil { + return nil, err + } + return n.api.GetPayloadV1(*payload.PayloadID) +} + +func (n *ethNode) insertBlock(eb catalyst.ExecutableDataV1) error { + if !eth2types(n.typ) { + return errors.New("invalid node type") + } + newResp, err := n.api.ExecutePayloadV1(eb) + if err != nil { + return err + } else if newResp.Status != "VALID" { + return errors.New("failed to insert block") + } + return nil +} + +func (n *ethNode) insertBlockAndSetHead(parent *types.Header, ed catalyst.ExecutableDataV1) error { + if !eth2types(n.typ) { + return errors.New("invalid node type") + } + if err := n.insertBlock(ed); err != nil { + return err + } + block, err := catalyst.ExecutableDataToBlock(ed) + if err != nil { + return err + } + fcState := catalyst.ForkchoiceStateV1{ + HeadBlockHash: block.ParentHash(), + SafeBlockHash: common.Hash{}, + FinalizedBlockHash: common.Hash{}, + } + if _, err := n.api.ForkchoiceUpdatedV1(fcState, nil); err != nil { + return err + } + return nil +} + +type nodeManager struct { + genesis *core.Genesis + genesisBlock *types.Block + nodes []*ethNode + enodes []*enode.Node + close chan struct{} +} + +func newNodeManager(genesis *core.Genesis) *nodeManager { + return &nodeManager{ + close: make(chan struct{}), + genesis: genesis, + genesisBlock: genesis.ToBlock(nil), + } +} + +func (mgr *nodeManager) createNode(typ nodetype) { + node := newNode(typ, mgr.genesis, mgr.enodes) + mgr.nodes = append(mgr.nodes, node) + mgr.enodes = append(mgr.enodes, node.enode) +} + +func (mgr *nodeManager) getNodes(typ nodetype) []*ethNode { + var ret []*ethNode + for _, node := range mgr.nodes { + if node.typ == typ { + ret = append(ret, node) + } + } + return ret +} + +func (mgr *nodeManager) startMining() { + for _, node := range append(mgr.getNodes(eth2MiningNode), mgr.getNodes(legacyMiningNode)...) { + if err := node.ethBackend.StartMining(1); err != nil { + panic(err) + } + } +} + +func (mgr *nodeManager) shutdown() { + close(mgr.close) + for _, node := range mgr.nodes { + node.stack.Close() + } +} + +func (mgr *nodeManager) run() { + if len(mgr.nodes) == 0 { + return + } + chain := mgr.nodes[0].ethBackend.BlockChain() + sink := make(chan core.ChainHeadEvent, 1024) + sub := chain.SubscribeChainHeadEvent(sink) + defer sub.Unsubscribe() + + var ( + transitioned bool + parentBlock *types.Block + waitFinalise []*types.Block + ) + timer := time.NewTimer(0) + defer timer.Stop() + <-timer.C // discard the initial tick + + // Handle the by default transition. + if transitionDifficulty.Sign() == 0 { + transitioned = true + parentBlock = mgr.genesisBlock + timer.Reset(blockInterval) + log.Info("Enable the transition by default") + } + + // Handle the block finalization. + checkFinalise := func() { + if parentBlock == nil { + return + } + if len(waitFinalise) == 0 { + return + } + oldest := waitFinalise[0] + if oldest.NumberU64() > parentBlock.NumberU64() { + return + } + distance := parentBlock.NumberU64() - oldest.NumberU64() + if int(distance) < finalizationDist { + return + } + nodes := mgr.getNodes(eth2MiningNode) + nodes = append(nodes, mgr.getNodes(eth2NormalNode)...) + nodes = append(nodes, mgr.getNodes(eth2LightClient)...) + for _, node := range append(nodes) { + fcState := catalyst.ForkchoiceStateV1{ + HeadBlockHash: oldest.Hash(), + SafeBlockHash: common.Hash{}, + FinalizedBlockHash: common.Hash{}, + } + node.api.ForkchoiceUpdatedV1(fcState, nil) + } + log.Info("Finalised eth2 block", "number", oldest.NumberU64(), "hash", oldest.Hash()) + waitFinalise = waitFinalise[1:] + } + + for { + checkFinalise() + select { + case <-mgr.close: + return + + case ev := <-sink: + if transitioned { + continue + } + td := chain.GetTd(ev.Block.Hash(), ev.Block.NumberU64()) + if td.Cmp(transitionDifficulty) < 0 { + continue + } + transitioned, parentBlock = true, ev.Block + timer.Reset(blockInterval) + log.Info("Transition difficulty reached", "td", td, "target", transitionDifficulty, "number", ev.Block.NumberU64(), "hash", ev.Block.Hash()) + + case <-timer.C: + producers := mgr.getNodes(eth2MiningNode) + if len(producers) == 0 { + continue + } + hash, timestamp := parentBlock.Hash(), parentBlock.Time() + if parentBlock.NumberU64() == 0 { + timestamp = uint64(time.Now().Unix()) - uint64(blockIntervalInt) + } + ed, err := producers[0].assembleBlock(hash, timestamp) + if err != nil { + log.Error("Failed to assemble the block", "err", err) + continue + } + block, _ := catalyst.ExecutableDataToBlock(*ed) + + nodes := mgr.getNodes(eth2MiningNode) + nodes = append(nodes, mgr.getNodes(eth2NormalNode)...) + nodes = append(nodes, mgr.getNodes(eth2LightClient)...) + + for _, node := range nodes { + if err := node.insertBlockAndSetHead(parentBlock.Header(), *ed); err != nil { + log.Error("Failed to insert block", "type", node.typ, "err", err) + } + } + log.Info("Create and insert eth2 block", "number", ed.Number) + parentBlock = block + waitFinalise = append(waitFinalise, block) + timer.Reset(blockInterval) + } + } +} + +func main() { + log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + fdlimit.Raise(2048) + + // Generate a batch of accounts to seal and fund with + faucets := make([]*ecdsa.PrivateKey, 16) + for i := 0; i < len(faucets); i++ { + faucets[i], _ = crypto.GenerateKey() + } + // Pre-generate the ethash mining DAG so we don't race + ethash.MakeDataset(1, filepath.Join(os.Getenv("HOME"), ".ethash")) + + // Create an Ethash network based off of the Ropsten config + genesis := makeGenesis(faucets) + manager := newNodeManager(genesis) + defer manager.shutdown() + + manager.createNode(eth2NormalNode) + manager.createNode(eth2MiningNode) + manager.createNode(legacyMiningNode) + manager.createNode(legacyNormalNode) + manager.createNode(eth2LightClient) + + // Iterate over all the nodes and start mining + time.Sleep(3 * time.Second) + if transitionDifficulty.Sign() != 0 { + manager.startMining() + } + go manager.run() + + // Start injecting transactions from the faucets like crazy + time.Sleep(3 * time.Second) + nonces := make([]uint64, len(faucets)) + for { + // Pick a random mining node + nodes := manager.getNodes(eth2MiningNode) + + index := rand.Intn(len(faucets)) + node := nodes[index%len(nodes)] + + // Create a self transaction and inject into the pool + tx, err := types.SignTx(types.NewTransaction(nonces[index], crypto.PubkeyToAddress(faucets[index].PublicKey), new(big.Int), 21000, big.NewInt(100000000000+rand.Int63n(65536)), nil), types.HomesteadSigner{}, faucets[index]) + if err != nil { + panic(err) + } + if err := node.ethBackend.TxPool().AddLocal(tx); err != nil { + panic(err) + } + nonces[index]++ + + // Wait if we're too saturated + if pend, _ := node.ethBackend.TxPool().Stats(); pend > 2048 { + time.Sleep(100 * time.Millisecond) + } + } +} + +// makeGenesis creates a custom Ethash genesis block based on some pre-defined +// faucet accounts. +func makeGenesis(faucets []*ecdsa.PrivateKey) *core.Genesis { + genesis := core.DefaultRopstenGenesisBlock() + genesis.Difficulty = params.MinimumDifficulty + genesis.GasLimit = 25000000 + + genesis.Config.ChainID = big.NewInt(18) + genesis.Config.EIP150Hash = common.Hash{} + genesis.BaseFee = big.NewInt(params.InitialBaseFee) + genesis.Config.TerminalTotalDifficulty = transitionDifficulty + + genesis.Alloc = core.GenesisAlloc{} + for _, faucet := range faucets { + genesis.Alloc[crypto.PubkeyToAddress(faucet.PublicKey)] = core.GenesisAccount{ + Balance: new(big.Int).Exp(big.NewInt(2), big.NewInt(128), nil), + } + } + return genesis +} + +func makeFullNode(genesis *core.Genesis) (*node.Node, *eth.Ethereum, *catalyst.ConsensusAPI, error) { + // Define the basic configurations for the Ethereum node + datadir, _ := ioutil.TempDir("", "") + + config := &node.Config{ + Name: "geth", + Version: params.Version, + DataDir: datadir, + P2P: p2p.Config{ + ListenAddr: "0.0.0.0:0", + NoDiscovery: true, + MaxPeers: 25, + }, + UseLightweightKDF: true, + } + // Create the node and configure a full Ethereum node on it + stack, err := node.New(config) + if err != nil { + return nil, nil, nil, err + } + econfig := ðconfig.Config{ + Genesis: genesis, + NetworkId: genesis.Config.ChainID.Uint64(), + SyncMode: downloader.FullSync, + DatabaseCache: 256, + DatabaseHandles: 256, + TxPool: core.DefaultTxPoolConfig, + GPO: ethconfig.Defaults.GPO, + Ethash: ethconfig.Defaults.Ethash, + Miner: miner.Config{ + GasFloor: genesis.GasLimit * 9 / 10, + GasCeil: genesis.GasLimit * 11 / 10, + GasPrice: big.NewInt(1), + Recommit: 10 * time.Second, // Disable the recommit + }, + LightServ: 100, + LightPeers: 10, + LightNoSyncServe: true, + } + ethBackend, err := eth.New(stack, econfig) + if err != nil { + return nil, nil, nil, err + } + _, err = les.NewLesServer(stack, ethBackend, econfig) + if err != nil { + log.Crit("Failed to create the LES server", "err", err) + } + err = stack.Start() + return stack, ethBackend, catalyst.NewConsensusAPI(ethBackend, nil), err +} + +func makeLightNode(genesis *core.Genesis) (*node.Node, *les.LightEthereum, *catalyst.ConsensusAPI, error) { + // Define the basic configurations for the Ethereum node + datadir, _ := ioutil.TempDir("", "") + + config := &node.Config{ + Name: "geth", + Version: params.Version, + DataDir: datadir, + P2P: p2p.Config{ + ListenAddr: "0.0.0.0:0", + NoDiscovery: true, + MaxPeers: 25, + }, + UseLightweightKDF: true, + } + // Create the node and configure a full Ethereum node on it + stack, err := node.New(config) + if err != nil { + return nil, nil, nil, err + } + lesBackend, err := les.New(stack, ðconfig.Config{ + Genesis: genesis, + NetworkId: genesis.Config.ChainID.Uint64(), + SyncMode: downloader.LightSync, + DatabaseCache: 256, + DatabaseHandles: 256, + TxPool: core.DefaultTxPoolConfig, + GPO: ethconfig.Defaults.GPO, + Ethash: ethconfig.Defaults.Ethash, + LightPeers: 10, + }) + if err != nil { + return nil, nil, nil, err + } + err = stack.Start() + return stack, lesBackend, catalyst.NewConsensusAPI(nil, lesBackend), err +} + +func eth2types(typ nodetype) bool { + if typ == eth2LightClient || typ == eth2NormalNode || typ == eth2MiningNode { + return true + } + return false +} diff --git a/miner/worker.go b/miner/worker.go index 77e868c2b..2c576ad08 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -128,6 +128,7 @@ type worker struct { engine consensus.Engine eth Backend chain *core.BlockChain + merger *consensus.Merger // Feeds pendingLogsFeed event.Feed @@ -181,7 +182,7 @@ type worker struct { noempty uint32 // External functions - isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. + isLocalBlock func(header *types.Header) bool // Function used to determine whether the specified block is mined by local miner. // Test hooks newTaskHook func(*task) // Method to call upon receiving a new sealing task. @@ -190,7 +191,7 @@ type worker struct { resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. } -func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker { +func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(header *types.Header) bool, init bool, merger *consensus.Merger) *worker { worker := &worker{ config: config, chainConfig: chainConfig, @@ -198,6 +199,7 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus eth: eth, mux: mux, chain: eth.BlockChain(), + merger: merger, isLocalBlock: isLocalBlock, localUncles: make(map[common.Hash]*types.Block), remoteUncles: make(map[common.Hash]*types.Block), @@ -472,7 +474,7 @@ func (w *worker) mainLoop() { continue } // Add side block to possible uncle block set depending on the author. - if w.isLocalBlock != nil && w.isLocalBlock(ev.Block) { + if w.isLocalBlock != nil && w.isLocalBlock(ev.Block.Header()) { w.localUncles[ev.Block.Hash()] = ev.Block } else { w.remoteUncles[ev.Block.Hash()] = ev.Block @@ -657,7 +659,7 @@ func (w *worker) resultLoop() { logs = append(logs, receipt.Logs...) } // Commit block and state to database. - _, err := w.chain.WriteBlockWithState(block, receipts, logs, task.state, true) + _, err := w.chain.WriteBlockAndSetHead(block, receipts, logs, task.state, true) if err != nil { log.Error("Failed writing block to chain", "err", err) continue @@ -1041,6 +1043,11 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st if interval != nil { interval() } + // If we're post merge, just ignore + td, ttd := w.chain.GetTd(block.ParentHash(), block.NumberU64()-1), w.chain.Config().TerminalTotalDifficulty + if td != nil && ttd != nil && td.Cmp(ttd) >= 0 { + return nil + } select { case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}: w.unconfirmed.Shift(block.NumberU64() - 1) diff --git a/miner/worker_test.go b/miner/worker_test.go index 5b35c66dc..c8ddd2c32 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -197,7 +197,7 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) backend.txPool.AddLocals(pendingTxs) - w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false) + w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false, consensus.NewMerger(rawdb.NewMemoryDatabase())) w.setEtherbase(testBankAddress) return w, backend } diff --git a/mobile/ethereum.go b/mobile/ethereum.go index 97c46ddca..d5058e4e2 100644 --- a/mobile/ethereum.go +++ b/mobile/ethereum.go @@ -78,11 +78,21 @@ type SyncProgress struct { progress ethereum.SyncProgress } -func (p *SyncProgress) GetStartingBlock() int64 { return int64(p.progress.StartingBlock) } -func (p *SyncProgress) GetCurrentBlock() int64 { return int64(p.progress.CurrentBlock) } -func (p *SyncProgress) GetHighestBlock() int64 { return int64(p.progress.HighestBlock) } -func (p *SyncProgress) GetPulledStates() int64 { return int64(p.progress.PulledStates) } -func (p *SyncProgress) GetKnownStates() int64 { return int64(p.progress.KnownStates) } +func (p *SyncProgress) GetStartingBlock() int64 { return int64(p.progress.StartingBlock) } +func (p *SyncProgress) GetCurrentBlock() int64 { return int64(p.progress.CurrentBlock) } +func (p *SyncProgress) GetHighestBlock() int64 { return int64(p.progress.HighestBlock) } +func (p *SyncProgress) GetSyncedAccounts() int64 { return int64(p.progress.SyncedAccounts) } +func (p *SyncProgress) GetSyncedAccountBytes() int64 { return int64(p.progress.SyncedAccountBytes) } +func (p *SyncProgress) GetSyncedBytecodes() int64 { return int64(p.progress.SyncedBytecodes) } +func (p *SyncProgress) GetSyncedBytecodeBytes() int64 { return int64(p.progress.SyncedBytecodeBytes) } +func (p *SyncProgress) GetSyncedStorage() int64 { return int64(p.progress.SyncedStorage) } +func (p *SyncProgress) GetSyncedStorageBytes() int64 { return int64(p.progress.SyncedStorageBytes) } +func (p *SyncProgress) GetHealedTrienodes() int64 { return int64(p.progress.HealedTrienodes) } +func (p *SyncProgress) GetHealedTrienodeBytes() int64 { return int64(p.progress.HealedTrienodeBytes) } +func (p *SyncProgress) GetHealedBytecodes() int64 { return int64(p.progress.HealedBytecodes) } +func (p *SyncProgress) GetHealedBytecodeBytes() int64 { return int64(p.progress.HealedBytecodeBytes) } +func (p *SyncProgress) GetHealingTrienodes() int64 { return int64(p.progress.HealingTrienodes) } +func (p *SyncProgress) GetHealingBytecode() int64 { return int64(p.progress.HealingBytecode) } // Topics is a set of topic lists to filter events with. type Topics struct{ topics [][]common.Hash } diff --git a/oss-fuzz.sh b/oss-fuzz.sh index 9a24f6b17..745a5ba7c 100644 --- a/oss-fuzz.sh +++ b/oss-fuzz.sh @@ -120,5 +120,10 @@ compile_fuzzer tests/fuzzers/bls12381 FuzzCrossG1MultiExp fuzz_cross_g1_multiex compile_fuzzer tests/fuzzers/bls12381 FuzzCrossG2Add fuzz_cross_g2_add compile_fuzzer tests/fuzzers/bls12381 FuzzCrossPairing fuzz_cross_pairing +compile_fuzzer tests/fuzzers/snap FuzzARange fuzz_account_range +compile_fuzzer tests/fuzzers/snap FuzzSRange fuzz_storage_range +compile_fuzzer tests/fuzzers/snap FuzzByteCodes fuzz_byte_codes +compile_fuzzer tests/fuzzers/snap FuzzTrieNodes fuzz_trie_nodes + #TODO: move this to tests/fuzzers, if possible compile_fuzzer crypto/blake2b Fuzz fuzzBlake2b diff --git a/p2p/discover/v5wire/msg.go b/p2p/discover/v5wire/msg.go index 7c3686111..c04966847 100644 --- a/p2p/discover/v5wire/msg.go +++ b/p2p/discover/v5wire/msg.go @@ -84,7 +84,7 @@ type ( ReqID []byte ENRSeq uint64 ToIP net.IP // These fields should mirror the UDP envelope address of the ping - ToPort uint16 // packet, which provides a way to discover the the external address (after NAT). + ToPort uint16 // packet, which provides a way to discover the external address (after NAT). } // FINDNODE is a query for nodes in the given bucket. diff --git a/p2p/enr/enr.go b/p2p/enr/enr.go index 05e43fd80..15891813b 100644 --- a/p2p/enr/enr.go +++ b/p2p/enr/enr.go @@ -304,7 +304,7 @@ func (r *Record) AppendElements(list []interface{}) []interface{} { } func (r *Record) encode(sig []byte) (raw []byte, err error) { - list := make([]interface{}, 1, 2*len(r.pairs)+1) + list := make([]interface{}, 1, 2*len(r.pairs)+2) list[0] = sig list = r.AppendElements(list) if raw, err = rlp.EncodeToBytes(list); err != nil { diff --git a/p2p/nodestate/nodestate.go b/p2p/nodestate/nodestate.go index 9323d53cb..2af0d0a6b 100644 --- a/p2p/nodestate/nodestate.go +++ b/p2p/nodestate/nodestate.go @@ -808,7 +808,14 @@ func (ns *NodeStateMachine) addTimeout(n *enode.Node, mask bitMask, timeout time ns.removeTimeouts(node, mask) t := &nodeStateTimeout{mask: mask} t.timer = ns.clock.AfterFunc(timeout, func() { - ns.SetState(n, Flags{}, Flags{mask: t.mask, setup: ns.setup}, 0) + ns.lock.Lock() + defer ns.lock.Unlock() + + if !ns.opStart() { + return + } + ns.setState(n, Flags{}, Flags{mask: t.mask, setup: ns.setup}, 0) + ns.opFinish() }) node.timeouts = append(node.timeouts, t) if mask&ns.saveFlags != 0 { diff --git a/p2p/peer.go b/p2p/peer.go index b6d0dbd1a..8f564e776 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -121,10 +121,18 @@ type Peer struct { // NewPeer returns a peer for testing purposes. func NewPeer(id enode.ID, name string, caps []Cap) *Peer { + // Generate a fake set of local protocols to match as running caps. Almost + // no fields needs to be meaningful here as we're only using it to cross- + // check with the "remote" caps array. + protos := make([]Protocol, len(caps)) + for i, cap := range caps { + protos[i].Name = cap.Name + protos[i].Version = cap.Version + } pipe, _ := net.Pipe() node := enode.SignNull(new(enr.Record), id) conn := &conn{fd: pipe, transport: nil, node: node, caps: caps, name: name} - peer := newPeer(log.Root(), conn, nil) + peer := newPeer(log.Root(), conn, protos) close(peer.closed) // ensures Disconnect doesn't block return peer } diff --git a/p2p/simulations/README.md b/p2p/simulations/README.md index 871d71b2c..023f73a09 100644 --- a/p2p/simulations/README.md +++ b/p2p/simulations/README.md @@ -1,7 +1,7 @@ # devp2p Simulations -The `p2p/simulations` package implements a simulation framework which supports -creating a collection of devp2p nodes, connecting them together to form a +The `p2p/simulations` package implements a simulation framework that supports +creating a collection of devp2p nodes, connecting them to form a simulation network, performing simulation actions in that network and then extracting useful information. @@ -65,10 +65,10 @@ localhost ports. ## Network -A simulation network is created with an ID and default service (which is used -if a node is created without an explicit service), exposes methods for -creating, starting, stopping, connecting and disconnecting nodes, and emits -events when certain actions occur. +A simulation network is created with an ID and default service. The default +service is used if a node is created without an explicit service. The +network has exposed methods for creating, starting, stopping, connecting +and disconnecting nodes. It also emits events when certain actions occur. ### Events @@ -80,7 +80,7 @@ A simulation network emits the following events: The events have a "control" flag which when set indicates that the event is the outcome of a controlled simulation action (e.g. creating a node or explicitly -connecting two nodes together). +connecting two nodes). This is in contrast to a non-control event, otherwise called a "live" event, which is the outcome of something happening in the network as a result of a @@ -98,12 +98,12 @@ network and then wait for expectations to be met. With a running simulation network, the `Simulation.Run` method can be called with a `Step` which has the following fields: -* `Action` - a function which performs some action in the network +* `Action` - a function that performs some action in the network * `Expect` - an expectation function which returns whether or not a given node meets the expectation -* `Trigger` - a channel which receives node IDs which then trigger a check +* `Trigger` - a channel that receives node IDs which then trigger a check of the expectation function to be performed against that node As a concrete example, consider a simulated network of Ethereum nodes. An @@ -116,7 +116,7 @@ the expectation and what network events were emitted during the step run. ## HTTP API -The simulation framework includes a HTTP API which can be used to control the +The simulation framework includes a HTTP API that can be used to control the simulation. The API is initialised with a particular node adapter and has the following diff --git a/params/config.go b/params/config.go index f767c1c4b..36482f238 100644 --- a/params/config.go +++ b/params/config.go @@ -257,16 +257,16 @@ var ( // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil} + AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} // AllCliqueProtocolChanges contains every protocol change (EIPs) introduced // and accepted by the Ethereum core developers into the Clique consensus. // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} + AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} - TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil} + TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} TestRules = TestChainConfig.Rules(new(big.Int)) ) @@ -346,6 +346,7 @@ type ChainConfig struct { BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin) LondonBlock *big.Int `json:"londonBlock,omitempty"` // London switch block (nil = no fork, 0 = already on london) ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // Eip-4345 (bomb delay) switch block (nil = no fork, 0 = already activated) + MergeForkBlock *big.Int `json:"mergeForkBlock,omitempty"` // EIP-3675 (TheMerge) switch block (nil = no fork, 0 = already in merge proceedings) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. @@ -386,7 +387,7 @@ func (c *ChainConfig) String() string { default: engine = "unknown" } - return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, MergeFork: %v, Engine: %v}", c.ChainID, c.HomesteadBlock, c.DAOForkBlock, @@ -402,6 +403,7 @@ func (c *ChainConfig) String() string { c.BerlinBlock, c.LondonBlock, c.ArrowGlacierBlock, + c.MergeForkBlock, engine, ) } @@ -522,6 +524,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "berlinBlock", block: c.BerlinBlock}, {name: "londonBlock", block: c.LondonBlock}, {name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true}, + {name: "mergeStartBlock", block: c.MergeForkBlock, optional: true}, } { if lastFork.name != "" { // Next one must be higher number @@ -594,6 +597,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi if isForkIncompatible(c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock, head) { return newCompatError("Arrow Glacier fork block", c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock) } + if isForkIncompatible(c.MergeForkBlock, newcfg.MergeForkBlock, head) { + return newCompatError("Merge Start fork block", c.MergeForkBlock, newcfg.MergeForkBlock) + } return nil } diff --git a/params/protocol_params.go b/params/protocol_params.go index 7abb2441b..5f154597a 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -19,9 +19,10 @@ package params import "math/big" const ( - GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations. - MinGasLimit uint64 = 5000 // Minimum the gas limit may ever be. - GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block. + GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations. + MinGasLimit uint64 = 5000 // Minimum the gas limit may ever be. + MaxGasLimit uint64 = 0x7fffffffffffffff // Maximum the gas limit (2^63-1). + GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block. MaximumExtraDataSize uint64 = 32 // Maximum size extra data may be after Genesis. ExpByteGas uint64 = 10 // Times ceil(log256(exponent)) for the EXP instruction. @@ -35,8 +36,8 @@ const ( LogDataGas uint64 = 8 // Per byte in a LOG* operation's data. CallStipend uint64 = 2300 // Free gas given at beginning of call. - Sha3Gas uint64 = 30 // Once per SHA3 operation. - Sha3WordGas uint64 = 6 // Once per word of the SHA3 operation's data. + Keccak256Gas uint64 = 30 // Once per KECCAK256 operation. + Keccak256WordGas uint64 = 6 // Once per word of the KECCAK256 operation's data. SstoreSetGas uint64 = 20000 // Once per SSTORE operation. SstoreResetGas uint64 = 5000 // Once per SSTORE operation if the zeroness changes from zero. diff --git a/params/version.go b/params/version.go index 4a5b9835f..d45fee55a 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 10 // Minor version component of the current release - VersionPatch = 13 // Patch version component of the current release + VersionPatch = 14 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/plugins/wrappers/backendwrapper/backendwrapper.go b/plugins/wrappers/backendwrapper/backendwrapper.go index 1c89de3fa..9c0c1b7e2 100644 --- a/plugins/wrappers/backendwrapper/backendwrapper.go +++ b/plugins/wrappers/backendwrapper/backendwrapper.go @@ -206,11 +206,50 @@ func (p *progress) CurrentBlock() uint64 { func (p *progress) HighestBlock() uint64 { return p.p.HighestBlock } + func (p *progress) PulledStates() uint64 { - return p.p.PulledStates + log.Warn("PulledStates is no longer supported by Geth") + return 0 } func (p *progress) KnownStates() uint64 { - return p.p.KnownStates + log.Warn("KnownStates is no longer supported by Geth") + return 0 +} +func (p *progress) SyncedAccounts() uint64 { + return p.p.SyncedAccounts +} +func (p *progress) SyncedAccountBytes() uint64 { + return p.p.SyncedAccountBytes +} +func (p *progress) SyncedBytecodes() uint64 { + return p.p.SyncedBytecodes +} +func (p *progress) SyncedBytecodeBytes() uint64 { + return p.p.SyncedBytecodeBytes +} +func (p *progress) SyncedStorage() uint64 { + return p.p.SyncedStorage +} +func (p *progress) SyncedStorageBytes() uint64 { + return p.p.SyncedStorageBytes +} +func (p *progress) HealedTrienodes() uint64 { + return p.p.HealedTrienodes +} +func (p *progress) HealedTrienodeBytes() uint64 { + return p.p.HealedTrienodeBytes +} +func (p *progress) HealedBytecodes() uint64 { + return p.p.HealedBytecodes +} +func (p *progress) HealedBytecodeBytes() uint64 { + return p.p.HealedBytecodeBytes +} +func (p *progress) HealingTrienodes() uint64 { + return p.p.HealingTrienodes +} +func (p *progress) HealingBytecode() uint64 { + return p.p.HealingBytecode } func (d *dl) Progress() core.Progress { diff --git a/plugins/wrappers/wrappers.go b/plugins/wrappers/wrappers.go index e50280c12..0b0389f4a 100644 --- a/plugins/wrappers/wrappers.go +++ b/plugins/wrappers/wrappers.go @@ -44,7 +44,7 @@ func (w *WrappedContract) GetOp(n uint64) core.OpCode { } func (w *WrappedContract) GetByte(n uint64) byte { - return w.c.GetByte(n) + return byte(w.c.GetOp(n)) } func (w *WrappedContract) Caller() core.Address { diff --git a/rpc/errors.go b/rpc/errors.go index 4c06a745f..75425b925 100644 --- a/rpc/errors.go +++ b/rpc/errors.go @@ -54,6 +54,7 @@ var ( _ Error = new(invalidRequestError) _ Error = new(invalidMessageError) _ Error = new(invalidParamsError) + _ Error = new(CustomError) ) const defaultErrorCode = -32000 @@ -101,3 +102,12 @@ type invalidParamsError struct{ message string } func (e *invalidParamsError) ErrorCode() int { return -32602 } func (e *invalidParamsError) Error() string { return e.message } + +type CustomError struct { + Code int + ValidationError string +} + +func (e *CustomError) ErrorCode() int { return e.Code } + +func (e *CustomError) Error() string { return e.ValidationError } diff --git a/rpc/handler.go b/rpc/handler.go index 14513ca10..776a887d7 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -240,7 +240,7 @@ func (h *handler) handleImmediate(msg *jsonrpcMessage) bool { return false case msg.isResponse(): h.handleResponse(msg) - h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "t", time.Since(start)) + h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(start)) return true default: return false @@ -292,12 +292,12 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMess switch { case msg.isNotification(): h.handleCall(ctx, msg) - h.log.Debug("Served "+msg.Method, "t", time.Since(start)) + h.log.Debug("Served "+msg.Method, "duration", time.Since(start)) return nil case msg.isCall(): resp := h.handleCall(ctx, msg) var ctx []interface{} - ctx = append(ctx, "reqid", idForLog{msg.ID}, "t", time.Since(start)) + ctx = append(ctx, "reqid", idForLog{msg.ID}, "duration", time.Since(start)) if resp.Error != nil { ctx = append(ctx, "err", resp.Error.Message) if resp.Error.Data != nil { diff --git a/signer/core/api.go b/signer/core/api.go index fb68018a6..48b54b8f4 100644 --- a/signer/core/api.go +++ b/signer/core/api.go @@ -57,7 +57,7 @@ type ExternalAPI interface { // SignData - request to sign the given data (plus prefix) SignData(ctx context.Context, contentType string, addr common.MixedcaseAddress, data interface{}) (hexutil.Bytes, error) // SignTypedData - request to sign the given structured data (plus prefix) - SignTypedData(ctx context.Context, addr common.MixedcaseAddress, data TypedData) (hexutil.Bytes, error) + SignTypedData(ctx context.Context, addr common.MixedcaseAddress, data apitypes.TypedData) (hexutil.Bytes, error) // EcRecover - recover public key from given message and signature EcRecover(ctx context.Context, data hexutil.Bytes, sig hexutil.Bytes) (common.Address, error) // Version info about the APIs @@ -235,7 +235,7 @@ type ( ContentType string `json:"content_type"` Address common.MixedcaseAddress `json:"address"` Rawdata []byte `json:"raw_data"` - Messages []*NameValueType `json:"messages"` + Messages []*apitypes.NameValueType `json:"messages"` Callinfo []apitypes.ValidationInfo `json:"call_info"` Hash hexutil.Bytes `json:"hash"` Meta Metadata `json:"meta"` diff --git a/signer/core/signed_data_internal_test.go b/signer/core/apitypes/signed_data_internal_test.go similarity index 99% rename from signer/core/signed_data_internal_test.go rename to signer/core/apitypes/signed_data_internal_test.go index 9768ee0b3..121cc00de 100644 --- a/signer/core/signed_data_internal_test.go +++ b/signer/core/apitypes/signed_data_internal_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package core +package apitypes import ( "bytes" diff --git a/signer/core/apitypes/types.go b/signer/core/apitypes/types.go index 625959219..15ab15341 100644 --- a/signer/core/apitypes/types.go +++ b/signer/core/apitypes/types.go @@ -17,16 +17,29 @@ package apitypes import ( + "bytes" "encoding/json" + "errors" "fmt" "math/big" + "reflect" + "regexp" + "sort" + "strconv" "strings" + "unicode" + "unicode/utf8" + "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" ) +var typedDataReferenceTypeRegexp = regexp.MustCompile(`^[A-Z](\w*)(\[\])?$`) + type ValidationInfo struct { Typ string `json:"type"` Message string `json:"message"` @@ -154,3 +167,708 @@ func (args *SendTxArgs) ToTransaction() *types.Transaction { } return types.NewTx(data) } + +type SigFormat struct { + Mime string + ByteVersion byte +} + +var ( + IntendedValidator = SigFormat{ + accounts.MimetypeDataWithValidator, + 0x00, + } + DataTyped = SigFormat{ + accounts.MimetypeTypedData, + 0x01, + } + ApplicationClique = SigFormat{ + accounts.MimetypeClique, + 0x02, + } + TextPlain = SigFormat{ + accounts.MimetypeTextPlain, + 0x45, + } +) + +type ValidatorData struct { + Address common.Address + Message hexutil.Bytes +} + +// TypedData is a type to encapsulate EIP-712 typed messages +type TypedData struct { + Types Types `json:"types"` + PrimaryType string `json:"primaryType"` + Domain TypedDataDomain `json:"domain"` + Message TypedDataMessage `json:"message"` +} + +// Type is the inner type of an EIP-712 message +type Type struct { + Name string `json:"name"` + Type string `json:"type"` +} + +func (t *Type) isArray() bool { + return strings.HasSuffix(t.Type, "[]") +} + +// typeName returns the canonical name of the type. If the type is 'Person[]', then +// this method returns 'Person' +func (t *Type) typeName() string { + if strings.HasSuffix(t.Type, "[]") { + return strings.TrimSuffix(t.Type, "[]") + } + return t.Type +} + +func (t *Type) isReferenceType() bool { + if len(t.Type) == 0 { + return false + } + // Reference types must have a leading uppercase character + r, _ := utf8.DecodeRuneInString(t.Type) + return unicode.IsUpper(r) +} + +type Types map[string][]Type + +type TypePriority struct { + Type string + Value uint +} + +type TypedDataMessage = map[string]interface{} + +// TypedDataDomain represents the domain part of an EIP-712 message. +type TypedDataDomain struct { + Name string `json:"name"` + Version string `json:"version"` + ChainId *math.HexOrDecimal256 `json:"chainId"` + VerifyingContract string `json:"verifyingContract"` + Salt string `json:"salt"` +} + +// HashStruct generates a keccak256 hash of the encoding of the provided data +func (typedData *TypedData) HashStruct(primaryType string, data TypedDataMessage) (hexutil.Bytes, error) { + encodedData, err := typedData.EncodeData(primaryType, data, 1) + if err != nil { + return nil, err + } + return crypto.Keccak256(encodedData), nil +} + +// Dependencies returns an array of custom types ordered by their hierarchical reference tree +func (typedData *TypedData) Dependencies(primaryType string, found []string) []string { + includes := func(arr []string, str string) bool { + for _, obj := range arr { + if obj == str { + return true + } + } + return false + } + + if includes(found, primaryType) { + return found + } + if typedData.Types[primaryType] == nil { + return found + } + found = append(found, primaryType) + for _, field := range typedData.Types[primaryType] { + for _, dep := range typedData.Dependencies(field.Type, found) { + if !includes(found, dep) { + found = append(found, dep) + } + } + } + return found +} + +// EncodeType generates the following encoding: +// `name ‖ "(" ‖ member₁ ‖ "," ‖ member₂ ‖ "," ‖ … ‖ memberₙ ")"` +// +// each member is written as `type ‖ " " ‖ name` encodings cascade down and are sorted by name +func (typedData *TypedData) EncodeType(primaryType string) hexutil.Bytes { + // Get dependencies primary first, then alphabetical + deps := typedData.Dependencies(primaryType, []string{}) + if len(deps) > 0 { + slicedDeps := deps[1:] + sort.Strings(slicedDeps) + deps = append([]string{primaryType}, slicedDeps...) + } + + // Format as a string with fields + var buffer bytes.Buffer + for _, dep := range deps { + buffer.WriteString(dep) + buffer.WriteString("(") + for _, obj := range typedData.Types[dep] { + buffer.WriteString(obj.Type) + buffer.WriteString(" ") + buffer.WriteString(obj.Name) + buffer.WriteString(",") + } + buffer.Truncate(buffer.Len() - 1) + buffer.WriteString(")") + } + return buffer.Bytes() +} + +// TypeHash creates the keccak256 hash of the data +func (typedData *TypedData) TypeHash(primaryType string) hexutil.Bytes { + return crypto.Keccak256(typedData.EncodeType(primaryType)) +} + +// EncodeData generates the following encoding: +// `enc(value₁) ‖ enc(value₂) ‖ … ‖ enc(valueₙ)` +// +// each encoded member is 32-byte long +func (typedData *TypedData) EncodeData(primaryType string, data map[string]interface{}, depth int) (hexutil.Bytes, error) { + if err := typedData.validate(); err != nil { + return nil, err + } + + buffer := bytes.Buffer{} + + // Verify extra data + if exp, got := len(typedData.Types[primaryType]), len(data); exp < got { + return nil, fmt.Errorf("there is extra data provided in the message (%d < %d)", exp, got) + } + + // Add typehash + buffer.Write(typedData.TypeHash(primaryType)) + + // Add field contents. Structs and arrays have special handlers. + for _, field := range typedData.Types[primaryType] { + encType := field.Type + encValue := data[field.Name] + if encType[len(encType)-1:] == "]" { + arrayValue, ok := encValue.([]interface{}) + if !ok { + return nil, dataMismatchError(encType, encValue) + } + + arrayBuffer := bytes.Buffer{} + parsedType := strings.Split(encType, "[")[0] + for _, item := range arrayValue { + if typedData.Types[parsedType] != nil { + mapValue, ok := item.(map[string]interface{}) + if !ok { + return nil, dataMismatchError(parsedType, item) + } + encodedData, err := typedData.EncodeData(parsedType, mapValue, depth+1) + if err != nil { + return nil, err + } + arrayBuffer.Write(encodedData) + } else { + bytesValue, err := typedData.EncodePrimitiveValue(parsedType, item, depth) + if err != nil { + return nil, err + } + arrayBuffer.Write(bytesValue) + } + } + + buffer.Write(crypto.Keccak256(arrayBuffer.Bytes())) + } else if typedData.Types[field.Type] != nil { + mapValue, ok := encValue.(map[string]interface{}) + if !ok { + return nil, dataMismatchError(encType, encValue) + } + encodedData, err := typedData.EncodeData(field.Type, mapValue, depth+1) + if err != nil { + return nil, err + } + buffer.Write(crypto.Keccak256(encodedData)) + } else { + byteValue, err := typedData.EncodePrimitiveValue(encType, encValue, depth) + if err != nil { + return nil, err + } + buffer.Write(byteValue) + } + } + return buffer.Bytes(), nil +} + +// Attempt to parse bytes in different formats: byte array, hex string, hexutil.Bytes. +func parseBytes(encType interface{}) ([]byte, bool) { + switch v := encType.(type) { + case []byte: + return v, true + case hexutil.Bytes: + return v, true + case string: + bytes, err := hexutil.Decode(v) + if err != nil { + return nil, false + } + return bytes, true + default: + return nil, false + } +} + +func parseInteger(encType string, encValue interface{}) (*big.Int, error) { + var ( + length int + signed = strings.HasPrefix(encType, "int") + b *big.Int + ) + if encType == "int" || encType == "uint" { + length = 256 + } else { + lengthStr := "" + if strings.HasPrefix(encType, "uint") { + lengthStr = strings.TrimPrefix(encType, "uint") + } else { + lengthStr = strings.TrimPrefix(encType, "int") + } + atoiSize, err := strconv.Atoi(lengthStr) + if err != nil { + return nil, fmt.Errorf("invalid size on integer: %v", lengthStr) + } + length = atoiSize + } + switch v := encValue.(type) { + case *math.HexOrDecimal256: + b = (*big.Int)(v) + case string: + var hexIntValue math.HexOrDecimal256 + if err := hexIntValue.UnmarshalText([]byte(v)); err != nil { + return nil, err + } + b = (*big.Int)(&hexIntValue) + case float64: + // JSON parses non-strings as float64. Fail if we cannot + // convert it losslessly + if float64(int64(v)) == v { + b = big.NewInt(int64(v)) + } else { + return nil, fmt.Errorf("invalid float value %v for type %v", v, encType) + } + } + if b == nil { + return nil, fmt.Errorf("invalid integer value %v/%v for type %v", encValue, reflect.TypeOf(encValue), encType) + } + if b.BitLen() > length { + return nil, fmt.Errorf("integer larger than '%v'", encType) + } + if !signed && b.Sign() == -1 { + return nil, fmt.Errorf("invalid negative value for unsigned type %v", encType) + } + return b, nil +} + +// EncodePrimitiveValue deals with the primitive values found +// while searching through the typed data +func (typedData *TypedData) EncodePrimitiveValue(encType string, encValue interface{}, depth int) ([]byte, error) { + switch encType { + case "address": + stringValue, ok := encValue.(string) + if !ok || !common.IsHexAddress(stringValue) { + return nil, dataMismatchError(encType, encValue) + } + retval := make([]byte, 32) + copy(retval[12:], common.HexToAddress(stringValue).Bytes()) + return retval, nil + case "bool": + boolValue, ok := encValue.(bool) + if !ok { + return nil, dataMismatchError(encType, encValue) + } + if boolValue { + return math.PaddedBigBytes(common.Big1, 32), nil + } + return math.PaddedBigBytes(common.Big0, 32), nil + case "string": + strVal, ok := encValue.(string) + if !ok { + return nil, dataMismatchError(encType, encValue) + } + return crypto.Keccak256([]byte(strVal)), nil + case "bytes": + bytesValue, ok := parseBytes(encValue) + if !ok { + return nil, dataMismatchError(encType, encValue) + } + return crypto.Keccak256(bytesValue), nil + } + if strings.HasPrefix(encType, "bytes") { + lengthStr := strings.TrimPrefix(encType, "bytes") + length, err := strconv.Atoi(lengthStr) + if err != nil { + return nil, fmt.Errorf("invalid size on bytes: %v", lengthStr) + } + if length < 0 || length > 32 { + return nil, fmt.Errorf("invalid size on bytes: %d", length) + } + if byteValue, ok := parseBytes(encValue); !ok || len(byteValue) != length { + return nil, dataMismatchError(encType, encValue) + } else { + // Right-pad the bits + dst := make([]byte, 32) + copy(dst, byteValue) + return dst, nil + } + } + if strings.HasPrefix(encType, "int") || strings.HasPrefix(encType, "uint") { + b, err := parseInteger(encType, encValue) + if err != nil { + return nil, err + } + return math.U256Bytes(b), nil + } + return nil, fmt.Errorf("unrecognized type '%s'", encType) + +} + +// dataMismatchError generates an error for a mismatch between +// the provided type and data +func dataMismatchError(encType string, encValue interface{}) error { + return fmt.Errorf("provided data '%v' doesn't match type '%s'", encValue, encType) +} + +// validate makes sure the types are sound +func (typedData *TypedData) validate() error { + if err := typedData.Types.validate(); err != nil { + return err + } + if err := typedData.Domain.validate(); err != nil { + return err + } + return nil +} + +// Map generates a map version of the typed data +func (typedData *TypedData) Map() map[string]interface{} { + dataMap := map[string]interface{}{ + "types": typedData.Types, + "domain": typedData.Domain.Map(), + "primaryType": typedData.PrimaryType, + "message": typedData.Message, + } + return dataMap +} + +// Format returns a representation of typedData, which can be easily displayed by a user-interface +// without in-depth knowledge about 712 rules +func (typedData *TypedData) Format() ([]*NameValueType, error) { + domain, err := typedData.formatData("EIP712Domain", typedData.Domain.Map()) + if err != nil { + return nil, err + } + ptype, err := typedData.formatData(typedData.PrimaryType, typedData.Message) + if err != nil { + return nil, err + } + var nvts []*NameValueType + nvts = append(nvts, &NameValueType{ + Name: "EIP712Domain", + Value: domain, + Typ: "domain", + }) + nvts = append(nvts, &NameValueType{ + Name: typedData.PrimaryType, + Value: ptype, + Typ: "primary type", + }) + return nvts, nil +} + +func (typedData *TypedData) formatData(primaryType string, data map[string]interface{}) ([]*NameValueType, error) { + var output []*NameValueType + + // Add field contents. Structs and arrays have special handlers. + for _, field := range typedData.Types[primaryType] { + encName := field.Name + encValue := data[encName] + item := &NameValueType{ + Name: encName, + Typ: field.Type, + } + if field.isArray() { + arrayValue, _ := encValue.([]interface{}) + parsedType := field.typeName() + for _, v := range arrayValue { + if typedData.Types[parsedType] != nil { + mapValue, _ := v.(map[string]interface{}) + mapOutput, err := typedData.formatData(parsedType, mapValue) + if err != nil { + return nil, err + } + item.Value = mapOutput + } else { + primitiveOutput, err := formatPrimitiveValue(field.Type, encValue) + if err != nil { + return nil, err + } + item.Value = primitiveOutput + } + } + } else if typedData.Types[field.Type] != nil { + if mapValue, ok := encValue.(map[string]interface{}); ok { + mapOutput, err := typedData.formatData(field.Type, mapValue) + if err != nil { + return nil, err + } + item.Value = mapOutput + } else { + item.Value = "" + } + } else { + primitiveOutput, err := formatPrimitiveValue(field.Type, encValue) + if err != nil { + return nil, err + } + item.Value = primitiveOutput + } + output = append(output, item) + } + return output, nil +} + +func formatPrimitiveValue(encType string, encValue interface{}) (string, error) { + switch encType { + case "address": + if stringValue, ok := encValue.(string); !ok { + return "", fmt.Errorf("could not format value %v as address", encValue) + } else { + return common.HexToAddress(stringValue).String(), nil + } + case "bool": + if boolValue, ok := encValue.(bool); !ok { + return "", fmt.Errorf("could not format value %v as bool", encValue) + } else { + return fmt.Sprintf("%t", boolValue), nil + } + case "bytes", "string": + return fmt.Sprintf("%s", encValue), nil + } + if strings.HasPrefix(encType, "bytes") { + return fmt.Sprintf("%s", encValue), nil + + } + if strings.HasPrefix(encType, "uint") || strings.HasPrefix(encType, "int") { + if b, err := parseInteger(encType, encValue); err != nil { + return "", err + } else { + return fmt.Sprintf("%d (0x%x)", b, b), nil + } + } + return "", fmt.Errorf("unhandled type %v", encType) +} + +// Validate checks if the types object is conformant to the specs +func (t Types) validate() error { + for typeKey, typeArr := range t { + if len(typeKey) == 0 { + return fmt.Errorf("empty type key") + } + for i, typeObj := range typeArr { + if len(typeObj.Type) == 0 { + return fmt.Errorf("type %q:%d: empty Type", typeKey, i) + } + if len(typeObj.Name) == 0 { + return fmt.Errorf("type %q:%d: empty Name", typeKey, i) + } + if typeKey == typeObj.Type { + return fmt.Errorf("type %q cannot reference itself", typeObj.Type) + } + if typeObj.isReferenceType() { + if _, exist := t[typeObj.typeName()]; !exist { + return fmt.Errorf("reference type %q is undefined", typeObj.Type) + } + if !typedDataReferenceTypeRegexp.MatchString(typeObj.Type) { + return fmt.Errorf("unknown reference type %q", typeObj.Type) + } + } else if !isPrimitiveTypeValid(typeObj.Type) { + return fmt.Errorf("unknown type %q", typeObj.Type) + } + } + } + return nil +} + +// Checks if the primitive value is valid +func isPrimitiveTypeValid(primitiveType string) bool { + if primitiveType == "address" || + primitiveType == "address[]" || + primitiveType == "bool" || + primitiveType == "bool[]" || + primitiveType == "string" || + primitiveType == "string[]" { + return true + } + if primitiveType == "bytes" || + primitiveType == "bytes[]" || + primitiveType == "bytes1" || + primitiveType == "bytes1[]" || + primitiveType == "bytes2" || + primitiveType == "bytes2[]" || + primitiveType == "bytes3" || + primitiveType == "bytes3[]" || + primitiveType == "bytes4" || + primitiveType == "bytes4[]" || + primitiveType == "bytes5" || + primitiveType == "bytes5[]" || + primitiveType == "bytes6" || + primitiveType == "bytes6[]" || + primitiveType == "bytes7" || + primitiveType == "bytes7[]" || + primitiveType == "bytes8" || + primitiveType == "bytes8[]" || + primitiveType == "bytes9" || + primitiveType == "bytes9[]" || + primitiveType == "bytes10" || + primitiveType == "bytes10[]" || + primitiveType == "bytes11" || + primitiveType == "bytes11[]" || + primitiveType == "bytes12" || + primitiveType == "bytes12[]" || + primitiveType == "bytes13" || + primitiveType == "bytes13[]" || + primitiveType == "bytes14" || + primitiveType == "bytes14[]" || + primitiveType == "bytes15" || + primitiveType == "bytes15[]" || + primitiveType == "bytes16" || + primitiveType == "bytes16[]" || + primitiveType == "bytes17" || + primitiveType == "bytes17[]" || + primitiveType == "bytes18" || + primitiveType == "bytes18[]" || + primitiveType == "bytes19" || + primitiveType == "bytes19[]" || + primitiveType == "bytes20" || + primitiveType == "bytes20[]" || + primitiveType == "bytes21" || + primitiveType == "bytes21[]" || + primitiveType == "bytes22" || + primitiveType == "bytes22[]" || + primitiveType == "bytes23" || + primitiveType == "bytes23[]" || + primitiveType == "bytes24" || + primitiveType == "bytes24[]" || + primitiveType == "bytes25" || + primitiveType == "bytes25[]" || + primitiveType == "bytes26" || + primitiveType == "bytes26[]" || + primitiveType == "bytes27" || + primitiveType == "bytes27[]" || + primitiveType == "bytes28" || + primitiveType == "bytes28[]" || + primitiveType == "bytes29" || + primitiveType == "bytes29[]" || + primitiveType == "bytes30" || + primitiveType == "bytes30[]" || + primitiveType == "bytes31" || + primitiveType == "bytes31[]" || + primitiveType == "bytes32" || + primitiveType == "bytes32[]" { + return true + } + if primitiveType == "int" || + primitiveType == "int[]" || + primitiveType == "int8" || + primitiveType == "int8[]" || + primitiveType == "int16" || + primitiveType == "int16[]" || + primitiveType == "int32" || + primitiveType == "int32[]" || + primitiveType == "int64" || + primitiveType == "int64[]" || + primitiveType == "int128" || + primitiveType == "int128[]" || + primitiveType == "int256" || + primitiveType == "int256[]" { + return true + } + if primitiveType == "uint" || + primitiveType == "uint[]" || + primitiveType == "uint8" || + primitiveType == "uint8[]" || + primitiveType == "uint16" || + primitiveType == "uint16[]" || + primitiveType == "uint32" || + primitiveType == "uint32[]" || + primitiveType == "uint64" || + primitiveType == "uint64[]" || + primitiveType == "uint128" || + primitiveType == "uint128[]" || + primitiveType == "uint256" || + primitiveType == "uint256[]" { + return true + } + return false +} + +// validate checks if the given domain is valid, i.e. contains at least +// the minimum viable keys and values +func (domain *TypedDataDomain) validate() error { + if domain.ChainId == nil && len(domain.Name) == 0 && len(domain.Version) == 0 && len(domain.VerifyingContract) == 0 && len(domain.Salt) == 0 { + return errors.New("domain is undefined") + } + + return nil +} + +// Map is a helper function to generate a map version of the domain +func (domain *TypedDataDomain) Map() map[string]interface{} { + dataMap := map[string]interface{}{} + + if domain.ChainId != nil { + dataMap["chainId"] = domain.ChainId + } + + if len(domain.Name) > 0 { + dataMap["name"] = domain.Name + } + + if len(domain.Version) > 0 { + dataMap["version"] = domain.Version + } + + if len(domain.VerifyingContract) > 0 { + dataMap["verifyingContract"] = domain.VerifyingContract + } + + if len(domain.Salt) > 0 { + dataMap["salt"] = domain.Salt + } + return dataMap +} + +// NameValueType is a very simple struct with Name, Value and Type. It's meant for simple +// json structures used to communicate signing-info about typed data with the UI +type NameValueType struct { + Name string `json:"name"` + Value interface{} `json:"value"` + Typ string `json:"type"` +} + +// Pprint returns a pretty-printed version of nvt +func (nvt *NameValueType) Pprint(depth int) string { + output := bytes.Buffer{} + output.WriteString(strings.Repeat("\u00a0", depth*2)) + output.WriteString(fmt.Sprintf("%s [%s]: ", nvt.Name, nvt.Typ)) + if nvts, ok := nvt.Value.([]*NameValueType); ok { + output.WriteString("\n") + for _, next := range nvts { + sublevel := next.Pprint(depth + 1) + output.WriteString(sublevel) + } + } else { + if nvt.Value != nil { + output.WriteString(fmt.Sprintf("%q\n", nvt.Value)) + } else { + output.WriteString("\n") + } + } + return output.String() +} diff --git a/signer/core/auditlog.go b/signer/core/auditlog.go index 84877ee71..663d6d131 100644 --- a/signer/core/auditlog.go +++ b/signer/core/auditlog.go @@ -89,7 +89,7 @@ func (l *AuditLogger) SignGnosisSafeTx(ctx context.Context, addr common.Mixedcas return res, e } -func (l *AuditLogger) SignTypedData(ctx context.Context, addr common.MixedcaseAddress, data TypedData) (hexutil.Bytes, error) { +func (l *AuditLogger) SignTypedData(ctx context.Context, addr common.MixedcaseAddress, data apitypes.TypedData) (hexutil.Bytes, error) { l.log.Info("SignTypedData", "type", "request", "metadata", MetadataFromContext(ctx).String(), "addr", addr.String(), "data", data) b, e := l.api.SignTypedData(ctx, addr, data) diff --git a/signer/core/gnosis_safe.go b/signer/core/gnosis_safe.go index bdf7f837a..016b1fff3 100644 --- a/signer/core/gnosis_safe.go +++ b/signer/core/gnosis_safe.go @@ -34,15 +34,15 @@ type GnosisSafeTx struct { } // ToTypedData converts the tx to a EIP-712 Typed Data structure for signing -func (tx *GnosisSafeTx) ToTypedData() TypedData { +func (tx *GnosisSafeTx) ToTypedData() apitypes.TypedData { var data hexutil.Bytes if tx.Data != nil { data = *tx.Data } - gnosisTypedData := TypedData{ - Types: Types{ - "EIP712Domain": []Type{{Name: "verifyingContract", Type: "address"}}, - "SafeTx": []Type{ + gnosisTypedData := apitypes.TypedData{ + Types: apitypes.Types{ + "EIP712Domain": []apitypes.Type{{Name: "verifyingContract", Type: "address"}}, + "SafeTx": []apitypes.Type{ {Name: "to", Type: "address"}, {Name: "value", Type: "uint256"}, {Name: "data", Type: "bytes"}, @@ -55,11 +55,11 @@ func (tx *GnosisSafeTx) ToTypedData() TypedData { {Name: "nonce", Type: "uint256"}, }, }, - Domain: TypedDataDomain{ + Domain: apitypes.TypedDataDomain{ VerifyingContract: tx.Safe.Address().Hex(), }, PrimaryType: "SafeTx", - Message: TypedDataMessage{ + Message: apitypes.TypedDataMessage{ "to": tx.To.Address().Hex(), "value": tx.Value.String(), "data": data, diff --git a/signer/core/signed_data.go b/signer/core/signed_data.go index daa84313d..03494c098 100644 --- a/signer/core/signed_data.go +++ b/signer/core/signed_data.go @@ -17,24 +17,14 @@ package core import ( - "bytes" "context" "errors" "fmt" - "math/big" "mime" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf8" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -42,88 +32,6 @@ import ( "github.com/ethereum/go-ethereum/signer/core/apitypes" ) -type SigFormat struct { - Mime string - ByteVersion byte -} - -var ( - IntendedValidator = SigFormat{ - accounts.MimetypeDataWithValidator, - 0x00, - } - DataTyped = SigFormat{ - accounts.MimetypeTypedData, - 0x01, - } - ApplicationClique = SigFormat{ - accounts.MimetypeClique, - 0x02, - } - TextPlain = SigFormat{ - accounts.MimetypeTextPlain, - 0x45, - } -) - -type ValidatorData struct { - Address common.Address - Message hexutil.Bytes -} - -type TypedData struct { - Types Types `json:"types"` - PrimaryType string `json:"primaryType"` - Domain TypedDataDomain `json:"domain"` - Message TypedDataMessage `json:"message"` -} - -type Type struct { - Name string `json:"name"` - Type string `json:"type"` -} - -func (t *Type) isArray() bool { - return strings.HasSuffix(t.Type, "[]") -} - -// typeName returns the canonical name of the type. If the type is 'Person[]', then -// this method returns 'Person' -func (t *Type) typeName() string { - if strings.HasSuffix(t.Type, "[]") { - return strings.TrimSuffix(t.Type, "[]") - } - return t.Type -} - -func (t *Type) isReferenceType() bool { - if len(t.Type) == 0 { - return false - } - // Reference types must have a leading uppercase character - r, _ := utf8.DecodeRuneInString(t.Type) - return unicode.IsUpper(r) -} - -type Types map[string][]Type - -type TypePriority struct { - Type string - Value uint -} - -type TypedDataMessage = map[string]interface{} - -type TypedDataDomain struct { - Name string `json:"name"` - Version string `json:"version"` - ChainId *math.HexOrDecimal256 `json:"chainId"` - VerifyingContract string `json:"verifyingContract"` - Salt string `json:"salt"` -} - -var typedDataReferenceTypeRegexp = regexp.MustCompile(`^[A-Z](\w*)(\[\])?$`) - // sign receives a request and produces a signature // // Note, the produced signature conforms to the secp256k1 curve R, S and V values, @@ -195,14 +103,14 @@ func (api *SignerAPI) determineSignatureFormat(ctx context.Context, contentType } switch mediaType { - case IntendedValidator.Mime: + case apitypes.IntendedValidator.Mime: // Data with an intended validator validatorData, err := UnmarshalValidatorData(data) if err != nil { return nil, useEthereumV, err } sighash, msg := SignTextValidator(validatorData) - messages := []*NameValueType{ + messages := []*apitypes.NameValueType{ { Name: "This is a request to sign data intended for a particular validator (see EIP 191 version 0)", Typ: "description", @@ -225,11 +133,11 @@ func (api *SignerAPI) determineSignatureFormat(ctx context.Context, contentType }, } req = &SignDataRequest{ContentType: mediaType, Rawdata: []byte(msg), Messages: messages, Hash: sighash} - case ApplicationClique.Mime: + case apitypes.ApplicationClique.Mime: // Clique is the Ethereum PoA standard stringData, ok := data.(string) if !ok { - return nil, useEthereumV, fmt.Errorf("input for %v must be an hex-encoded string", ApplicationClique.Mime) + return nil, useEthereumV, fmt.Errorf("input for %v must be an hex-encoded string", apitypes.ApplicationClique.Mime) } cliqueData, err := hexutil.Decode(stringData) if err != nil { @@ -251,7 +159,7 @@ func (api *SignerAPI) determineSignatureFormat(ctx context.Context, contentType if err != nil { return nil, useEthereumV, err } - messages := []*NameValueType{ + messages := []*apitypes.NameValueType{ { Name: "Clique header", Typ: "clique", @@ -272,7 +180,7 @@ func (api *SignerAPI) determineSignatureFormat(ctx context.Context, contentType return nil, useEthereumV, err } else { sighash, msg := accounts.TextAndHash(textData) - messages := []*NameValueType{ + messages := []*apitypes.NameValueType{ { Name: "message", Typ: accounts.MimetypeTextPlain, @@ -291,7 +199,7 @@ func (api *SignerAPI) determineSignatureFormat(ctx context.Context, contentType // SignTextWithValidator signs the given message which can be further recovered // with the given validator. // hash = keccak256("\x19\x00"${address}${data}). -func SignTextValidator(validatorData ValidatorData) (hexutil.Bytes, string) { +func SignTextValidator(validatorData apitypes.ValidatorData) (hexutil.Bytes, string) { msg := fmt.Sprintf("\x19\x00%s%s", string(validatorData.Address.Bytes()), string(validatorData.Message)) return crypto.Keccak256([]byte(msg)), msg } @@ -318,7 +226,7 @@ func cliqueHeaderHashAndRlp(header *types.Header) (hash, rlp []byte, err error) // It returns // - the signature, // - and/or any error -func (api *SignerAPI) SignTypedData(ctx context.Context, addr common.MixedcaseAddress, typedData TypedData) (hexutil.Bytes, error) { +func (api *SignerAPI) SignTypedData(ctx context.Context, addr common.MixedcaseAddress, typedData apitypes.TypedData) (hexutil.Bytes, error) { signature, _, err := api.signTypedData(ctx, addr, typedData, nil) return signature, err } @@ -326,7 +234,7 @@ func (api *SignerAPI) SignTypedData(ctx context.Context, addr common.MixedcaseAd // signTypedData is identical to the capitalized version, except that it also returns the hash (preimage) // - the signature preimage (hash) func (api *SignerAPI) signTypedData(ctx context.Context, addr common.MixedcaseAddress, - typedData TypedData, validationMessages *apitypes.ValidationMessages) (hexutil.Bytes, hexutil.Bytes, error) { + typedData apitypes.TypedData, validationMessages *apitypes.ValidationMessages) (hexutil.Bytes, hexutil.Bytes, error) { domainSeparator, err := typedData.HashStruct("EIP712Domain", typedData.Domain.Map()) if err != nil { return nil, nil, err @@ -342,7 +250,7 @@ func (api *SignerAPI) signTypedData(ctx context.Context, addr common.MixedcaseAd return nil, nil, err } req := &SignDataRequest{ - ContentType: DataTyped.Mime, + ContentType: apitypes.DataTyped.Mime, Rawdata: rawData, Messages: messages, Hash: sighash, @@ -358,289 +266,6 @@ func (api *SignerAPI) signTypedData(ctx context.Context, addr common.MixedcaseAd return signature, sighash, nil } -// HashStruct generates a keccak256 hash of the encoding of the provided data -func (typedData *TypedData) HashStruct(primaryType string, data TypedDataMessage) (hexutil.Bytes, error) { - encodedData, err := typedData.EncodeData(primaryType, data, 1) - if err != nil { - return nil, err - } - return crypto.Keccak256(encodedData), nil -} - -// Dependencies returns an array of custom types ordered by their hierarchical reference tree -func (typedData *TypedData) Dependencies(primaryType string, found []string) []string { - includes := func(arr []string, str string) bool { - for _, obj := range arr { - if obj == str { - return true - } - } - return false - } - - if includes(found, primaryType) { - return found - } - if typedData.Types[primaryType] == nil { - return found - } - found = append(found, primaryType) - for _, field := range typedData.Types[primaryType] { - for _, dep := range typedData.Dependencies(field.Type, found) { - if !includes(found, dep) { - found = append(found, dep) - } - } - } - return found -} - -// EncodeType generates the following encoding: -// `name ‖ "(" ‖ member₁ ‖ "," ‖ member₂ ‖ "," ‖ … ‖ memberₙ ")"` -// -// each member is written as `type ‖ " " ‖ name` encodings cascade down and are sorted by name -func (typedData *TypedData) EncodeType(primaryType string) hexutil.Bytes { - // Get dependencies primary first, then alphabetical - deps := typedData.Dependencies(primaryType, []string{}) - if len(deps) > 0 { - slicedDeps := deps[1:] - sort.Strings(slicedDeps) - deps = append([]string{primaryType}, slicedDeps...) - } - - // Format as a string with fields - var buffer bytes.Buffer - for _, dep := range deps { - buffer.WriteString(dep) - buffer.WriteString("(") - for _, obj := range typedData.Types[dep] { - buffer.WriteString(obj.Type) - buffer.WriteString(" ") - buffer.WriteString(obj.Name) - buffer.WriteString(",") - } - buffer.Truncate(buffer.Len() - 1) - buffer.WriteString(")") - } - return buffer.Bytes() -} - -// TypeHash creates the keccak256 hash of the data -func (typedData *TypedData) TypeHash(primaryType string) hexutil.Bytes { - return crypto.Keccak256(typedData.EncodeType(primaryType)) -} - -// EncodeData generates the following encoding: -// `enc(value₁) ‖ enc(value₂) ‖ … ‖ enc(valueₙ)` -// -// each encoded member is 32-byte long -func (typedData *TypedData) EncodeData(primaryType string, data map[string]interface{}, depth int) (hexutil.Bytes, error) { - if err := typedData.validate(); err != nil { - return nil, err - } - - buffer := bytes.Buffer{} - - // Verify extra data - if exp, got := len(typedData.Types[primaryType]), len(data); exp < got { - return nil, fmt.Errorf("there is extra data provided in the message (%d < %d)", exp, got) - } - - // Add typehash - buffer.Write(typedData.TypeHash(primaryType)) - - // Add field contents. Structs and arrays have special handlers. - for _, field := range typedData.Types[primaryType] { - encType := field.Type - encValue := data[field.Name] - if encType[len(encType)-1:] == "]" { - arrayValue, ok := encValue.([]interface{}) - if !ok { - return nil, dataMismatchError(encType, encValue) - } - - arrayBuffer := bytes.Buffer{} - parsedType := strings.Split(encType, "[")[0] - for _, item := range arrayValue { - if typedData.Types[parsedType] != nil { - mapValue, ok := item.(map[string]interface{}) - if !ok { - return nil, dataMismatchError(parsedType, item) - } - encodedData, err := typedData.EncodeData(parsedType, mapValue, depth+1) - if err != nil { - return nil, err - } - arrayBuffer.Write(encodedData) - } else { - bytesValue, err := typedData.EncodePrimitiveValue(parsedType, item, depth) - if err != nil { - return nil, err - } - arrayBuffer.Write(bytesValue) - } - } - - buffer.Write(crypto.Keccak256(arrayBuffer.Bytes())) - } else if typedData.Types[field.Type] != nil { - mapValue, ok := encValue.(map[string]interface{}) - if !ok { - return nil, dataMismatchError(encType, encValue) - } - encodedData, err := typedData.EncodeData(field.Type, mapValue, depth+1) - if err != nil { - return nil, err - } - buffer.Write(crypto.Keccak256(encodedData)) - } else { - byteValue, err := typedData.EncodePrimitiveValue(encType, encValue, depth) - if err != nil { - return nil, err - } - buffer.Write(byteValue) - } - } - return buffer.Bytes(), nil -} - -// Attempt to parse bytes in different formats: byte array, hex string, hexutil.Bytes. -func parseBytes(encType interface{}) ([]byte, bool) { - switch v := encType.(type) { - case []byte: - return v, true - case hexutil.Bytes: - return v, true - case string: - bytes, err := hexutil.Decode(v) - if err != nil { - return nil, false - } - return bytes, true - default: - return nil, false - } -} - -func parseInteger(encType string, encValue interface{}) (*big.Int, error) { - var ( - length int - signed = strings.HasPrefix(encType, "int") - b *big.Int - ) - if encType == "int" || encType == "uint" { - length = 256 - } else { - lengthStr := "" - if strings.HasPrefix(encType, "uint") { - lengthStr = strings.TrimPrefix(encType, "uint") - } else { - lengthStr = strings.TrimPrefix(encType, "int") - } - atoiSize, err := strconv.Atoi(lengthStr) - if err != nil { - return nil, fmt.Errorf("invalid size on integer: %v", lengthStr) - } - length = atoiSize - } - switch v := encValue.(type) { - case *math.HexOrDecimal256: - b = (*big.Int)(v) - case string: - var hexIntValue math.HexOrDecimal256 - if err := hexIntValue.UnmarshalText([]byte(v)); err != nil { - return nil, err - } - b = (*big.Int)(&hexIntValue) - case float64: - // JSON parses non-strings as float64. Fail if we cannot - // convert it losslessly - if float64(int64(v)) == v { - b = big.NewInt(int64(v)) - } else { - return nil, fmt.Errorf("invalid float value %v for type %v", v, encType) - } - } - if b == nil { - return nil, fmt.Errorf("invalid integer value %v/%v for type %v", encValue, reflect.TypeOf(encValue), encType) - } - if b.BitLen() > length { - return nil, fmt.Errorf("integer larger than '%v'", encType) - } - if !signed && b.Sign() == -1 { - return nil, fmt.Errorf("invalid negative value for unsigned type %v", encType) - } - return b, nil -} - -// EncodePrimitiveValue deals with the primitive values found -// while searching through the typed data -func (typedData *TypedData) EncodePrimitiveValue(encType string, encValue interface{}, depth int) ([]byte, error) { - switch encType { - case "address": - stringValue, ok := encValue.(string) - if !ok || !common.IsHexAddress(stringValue) { - return nil, dataMismatchError(encType, encValue) - } - retval := make([]byte, 32) - copy(retval[12:], common.HexToAddress(stringValue).Bytes()) - return retval, nil - case "bool": - boolValue, ok := encValue.(bool) - if !ok { - return nil, dataMismatchError(encType, encValue) - } - if boolValue { - return math.PaddedBigBytes(common.Big1, 32), nil - } - return math.PaddedBigBytes(common.Big0, 32), nil - case "string": - strVal, ok := encValue.(string) - if !ok { - return nil, dataMismatchError(encType, encValue) - } - return crypto.Keccak256([]byte(strVal)), nil - case "bytes": - bytesValue, ok := parseBytes(encValue) - if !ok { - return nil, dataMismatchError(encType, encValue) - } - return crypto.Keccak256(bytesValue), nil - } - if strings.HasPrefix(encType, "bytes") { - lengthStr := strings.TrimPrefix(encType, "bytes") - length, err := strconv.Atoi(lengthStr) - if err != nil { - return nil, fmt.Errorf("invalid size on bytes: %v", lengthStr) - } - if length < 0 || length > 32 { - return nil, fmt.Errorf("invalid size on bytes: %d", length) - } - if byteValue, ok := parseBytes(encValue); !ok || len(byteValue) != length { - return nil, dataMismatchError(encType, encValue) - } else { - // Right-pad the bits - dst := make([]byte, 32) - copy(dst, byteValue) - return dst, nil - } - } - if strings.HasPrefix(encType, "int") || strings.HasPrefix(encType, "uint") { - b, err := parseInteger(encType, encValue) - if err != nil { - return nil, err - } - return math.U256Bytes(b), nil - } - return nil, fmt.Errorf("unrecognized type '%s'", encType) - -} - -// dataMismatchError generates an error for a mismatch between -// the provided type and data -func dataMismatchError(encType string, encValue interface{}) error { - return fmt.Errorf("provided data '%v' doesn't match type '%s'", encValue, encType) -} - // EcRecover recovers the address associated with the given sig. // Only compatible with `text/plain` func (api *SignerAPI) EcRecover(ctx context.Context, data hexutil.Bytes, sig hexutil.Bytes) (common.Address, error) { @@ -671,376 +296,37 @@ func (api *SignerAPI) EcRecover(ctx context.Context, data hexutil.Bytes, sig hex } // UnmarshalValidatorData converts the bytes input to typed data -func UnmarshalValidatorData(data interface{}) (ValidatorData, error) { +func UnmarshalValidatorData(data interface{}) (apitypes.ValidatorData, error) { raw, ok := data.(map[string]interface{}) if !ok { - return ValidatorData{}, errors.New("validator input is not a map[string]interface{}") + return apitypes.ValidatorData{}, errors.New("validator input is not a map[string]interface{}") } addr, ok := raw["address"].(string) if !ok { - return ValidatorData{}, errors.New("validator address is not sent as a string") + return apitypes.ValidatorData{}, errors.New("validator address is not sent as a string") } addrBytes, err := hexutil.Decode(addr) if err != nil { - return ValidatorData{}, err + return apitypes.ValidatorData{}, err } if !ok || len(addrBytes) == 0 { - return ValidatorData{}, errors.New("validator address is undefined") + return apitypes.ValidatorData{}, errors.New("validator address is undefined") } message, ok := raw["message"].(string) if !ok { - return ValidatorData{}, errors.New("message is not sent as a string") + return apitypes.ValidatorData{}, errors.New("message is not sent as a string") } messageBytes, err := hexutil.Decode(message) if err != nil { - return ValidatorData{}, err + return apitypes.ValidatorData{}, err } if !ok || len(messageBytes) == 0 { - return ValidatorData{}, errors.New("message is undefined") + return apitypes.ValidatorData{}, errors.New("message is undefined") } - return ValidatorData{ + return apitypes.ValidatorData{ Address: common.BytesToAddress(addrBytes), Message: messageBytes, }, nil } - -// validate makes sure the types are sound -func (typedData *TypedData) validate() error { - if err := typedData.Types.validate(); err != nil { - return err - } - if err := typedData.Domain.validate(); err != nil { - return err - } - return nil -} - -// Map generates a map version of the typed data -func (typedData *TypedData) Map() map[string]interface{} { - dataMap := map[string]interface{}{ - "types": typedData.Types, - "domain": typedData.Domain.Map(), - "primaryType": typedData.PrimaryType, - "message": typedData.Message, - } - return dataMap -} - -// Format returns a representation of typedData, which can be easily displayed by a user-interface -// without in-depth knowledge about 712 rules -func (typedData *TypedData) Format() ([]*NameValueType, error) { - domain, err := typedData.formatData("EIP712Domain", typedData.Domain.Map()) - if err != nil { - return nil, err - } - ptype, err := typedData.formatData(typedData.PrimaryType, typedData.Message) - if err != nil { - return nil, err - } - var nvts []*NameValueType - nvts = append(nvts, &NameValueType{ - Name: "EIP712Domain", - Value: domain, - Typ: "domain", - }) - nvts = append(nvts, &NameValueType{ - Name: typedData.PrimaryType, - Value: ptype, - Typ: "primary type", - }) - return nvts, nil -} - -func (typedData *TypedData) formatData(primaryType string, data map[string]interface{}) ([]*NameValueType, error) { - var output []*NameValueType - - // Add field contents. Structs and arrays have special handlers. - for _, field := range typedData.Types[primaryType] { - encName := field.Name - encValue := data[encName] - item := &NameValueType{ - Name: encName, - Typ: field.Type, - } - if field.isArray() { - arrayValue, _ := encValue.([]interface{}) - parsedType := field.typeName() - for _, v := range arrayValue { - if typedData.Types[parsedType] != nil { - mapValue, _ := v.(map[string]interface{}) - mapOutput, err := typedData.formatData(parsedType, mapValue) - if err != nil { - return nil, err - } - item.Value = mapOutput - } else { - primitiveOutput, err := formatPrimitiveValue(field.Type, encValue) - if err != nil { - return nil, err - } - item.Value = primitiveOutput - } - } - } else if typedData.Types[field.Type] != nil { - if mapValue, ok := encValue.(map[string]interface{}); ok { - mapOutput, err := typedData.formatData(field.Type, mapValue) - if err != nil { - return nil, err - } - item.Value = mapOutput - } else { - item.Value = "" - } - } else { - primitiveOutput, err := formatPrimitiveValue(field.Type, encValue) - if err != nil { - return nil, err - } - item.Value = primitiveOutput - } - output = append(output, item) - } - return output, nil -} - -func formatPrimitiveValue(encType string, encValue interface{}) (string, error) { - switch encType { - case "address": - if stringValue, ok := encValue.(string); !ok { - return "", fmt.Errorf("could not format value %v as address", encValue) - } else { - return common.HexToAddress(stringValue).String(), nil - } - case "bool": - if boolValue, ok := encValue.(bool); !ok { - return "", fmt.Errorf("could not format value %v as bool", encValue) - } else { - return fmt.Sprintf("%t", boolValue), nil - } - case "bytes", "string": - return fmt.Sprintf("%s", encValue), nil - } - if strings.HasPrefix(encType, "bytes") { - return fmt.Sprintf("%s", encValue), nil - - } - if strings.HasPrefix(encType, "uint") || strings.HasPrefix(encType, "int") { - if b, err := parseInteger(encType, encValue); err != nil { - return "", err - } else { - return fmt.Sprintf("%d (0x%x)", b, b), nil - } - } - return "", fmt.Errorf("unhandled type %v", encType) -} - -// NameValueType is a very simple struct with Name, Value and Type. It's meant for simple -// json structures used to communicate signing-info about typed data with the UI -type NameValueType struct { - Name string `json:"name"` - Value interface{} `json:"value"` - Typ string `json:"type"` -} - -// Pprint returns a pretty-printed version of nvt -func (nvt *NameValueType) Pprint(depth int) string { - output := bytes.Buffer{} - output.WriteString(strings.Repeat("\u00a0", depth*2)) - output.WriteString(fmt.Sprintf("%s [%s]: ", nvt.Name, nvt.Typ)) - if nvts, ok := nvt.Value.([]*NameValueType); ok { - output.WriteString("\n") - for _, next := range nvts { - sublevel := next.Pprint(depth + 1) - output.WriteString(sublevel) - } - } else { - if nvt.Value != nil { - output.WriteString(fmt.Sprintf("%q\n", nvt.Value)) - } else { - output.WriteString("\n") - } - } - return output.String() -} - -// Validate checks if the types object is conformant to the specs -func (t Types) validate() error { - for typeKey, typeArr := range t { - if len(typeKey) == 0 { - return fmt.Errorf("empty type key") - } - for i, typeObj := range typeArr { - if len(typeObj.Type) == 0 { - return fmt.Errorf("type %q:%d: empty Type", typeKey, i) - } - if len(typeObj.Name) == 0 { - return fmt.Errorf("type %q:%d: empty Name", typeKey, i) - } - if typeKey == typeObj.Type { - return fmt.Errorf("type %q cannot reference itself", typeObj.Type) - } - if typeObj.isReferenceType() { - if _, exist := t[typeObj.typeName()]; !exist { - return fmt.Errorf("reference type %q is undefined", typeObj.Type) - } - if !typedDataReferenceTypeRegexp.MatchString(typeObj.Type) { - return fmt.Errorf("unknown reference type %q", typeObj.Type) - } - } else if !isPrimitiveTypeValid(typeObj.Type) { - return fmt.Errorf("unknown type %q", typeObj.Type) - } - } - } - return nil -} - -// Checks if the primitive value is valid -func isPrimitiveTypeValid(primitiveType string) bool { - if primitiveType == "address" || - primitiveType == "address[]" || - primitiveType == "bool" || - primitiveType == "bool[]" || - primitiveType == "string" || - primitiveType == "string[]" { - return true - } - if primitiveType == "bytes" || - primitiveType == "bytes[]" || - primitiveType == "bytes1" || - primitiveType == "bytes1[]" || - primitiveType == "bytes2" || - primitiveType == "bytes2[]" || - primitiveType == "bytes3" || - primitiveType == "bytes3[]" || - primitiveType == "bytes4" || - primitiveType == "bytes4[]" || - primitiveType == "bytes5" || - primitiveType == "bytes5[]" || - primitiveType == "bytes6" || - primitiveType == "bytes6[]" || - primitiveType == "bytes7" || - primitiveType == "bytes7[]" || - primitiveType == "bytes8" || - primitiveType == "bytes8[]" || - primitiveType == "bytes9" || - primitiveType == "bytes9[]" || - primitiveType == "bytes10" || - primitiveType == "bytes10[]" || - primitiveType == "bytes11" || - primitiveType == "bytes11[]" || - primitiveType == "bytes12" || - primitiveType == "bytes12[]" || - primitiveType == "bytes13" || - primitiveType == "bytes13[]" || - primitiveType == "bytes14" || - primitiveType == "bytes14[]" || - primitiveType == "bytes15" || - primitiveType == "bytes15[]" || - primitiveType == "bytes16" || - primitiveType == "bytes16[]" || - primitiveType == "bytes17" || - primitiveType == "bytes17[]" || - primitiveType == "bytes18" || - primitiveType == "bytes18[]" || - primitiveType == "bytes19" || - primitiveType == "bytes19[]" || - primitiveType == "bytes20" || - primitiveType == "bytes20[]" || - primitiveType == "bytes21" || - primitiveType == "bytes21[]" || - primitiveType == "bytes22" || - primitiveType == "bytes22[]" || - primitiveType == "bytes23" || - primitiveType == "bytes23[]" || - primitiveType == "bytes24" || - primitiveType == "bytes24[]" || - primitiveType == "bytes25" || - primitiveType == "bytes25[]" || - primitiveType == "bytes26" || - primitiveType == "bytes26[]" || - primitiveType == "bytes27" || - primitiveType == "bytes27[]" || - primitiveType == "bytes28" || - primitiveType == "bytes28[]" || - primitiveType == "bytes29" || - primitiveType == "bytes29[]" || - primitiveType == "bytes30" || - primitiveType == "bytes30[]" || - primitiveType == "bytes31" || - primitiveType == "bytes31[]" || - primitiveType == "bytes32" || - primitiveType == "bytes32[]" { - return true - } - if primitiveType == "int" || - primitiveType == "int[]" || - primitiveType == "int8" || - primitiveType == "int8[]" || - primitiveType == "int16" || - primitiveType == "int16[]" || - primitiveType == "int32" || - primitiveType == "int32[]" || - primitiveType == "int64" || - primitiveType == "int64[]" || - primitiveType == "int128" || - primitiveType == "int128[]" || - primitiveType == "int256" || - primitiveType == "int256[]" { - return true - } - if primitiveType == "uint" || - primitiveType == "uint[]" || - primitiveType == "uint8" || - primitiveType == "uint8[]" || - primitiveType == "uint16" || - primitiveType == "uint16[]" || - primitiveType == "uint32" || - primitiveType == "uint32[]" || - primitiveType == "uint64" || - primitiveType == "uint64[]" || - primitiveType == "uint128" || - primitiveType == "uint128[]" || - primitiveType == "uint256" || - primitiveType == "uint256[]" { - return true - } - return false -} - -// validate checks if the given domain is valid, i.e. contains at least -// the minimum viable keys and values -func (domain *TypedDataDomain) validate() error { - if domain.ChainId == nil && len(domain.Name) == 0 && len(domain.Version) == 0 && len(domain.VerifyingContract) == 0 && len(domain.Salt) == 0 { - return errors.New("domain is undefined") - } - - return nil -} - -// Map is a helper function to generate a map version of the domain -func (domain *TypedDataDomain) Map() map[string]interface{} { - dataMap := map[string]interface{}{} - - if domain.ChainId != nil { - dataMap["chainId"] = domain.ChainId - } - - if len(domain.Name) > 0 { - dataMap["name"] = domain.Name - } - - if len(domain.Version) > 0 { - dataMap["version"] = domain.Version - } - - if len(domain.VerifyingContract) > 0 { - dataMap["verifyingContract"] = domain.VerifyingContract - } - - if len(domain.Salt) > 0 { - dataMap["salt"] = domain.Salt - } - return dataMap -} diff --git a/signer/core/signed_data_test.go b/signer/core/signed_data_test.go index 23b7b9897..1d972d296 100644 --- a/signer/core/signed_data_test.go +++ b/signer/core/signed_data_test.go @@ -32,9 +32,10 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/signer/core" + "github.com/ethereum/go-ethereum/signer/core/apitypes" ) -var typesStandard = core.Types{ +var typesStandard = apitypes.Types{ "EIP712Domain": { { Name: "name", @@ -153,12 +154,12 @@ var jsonTypedData = ` const primaryType = "Mail" -var domainStandard = core.TypedDataDomain{ - "Ether Mail", - "1", - math.NewHexOrDecimal256(1), - "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC", - "", +var domainStandard = apitypes.TypedDataDomain{ + Name: "Ether Mail", + Version: "1", + ChainId: math.NewHexOrDecimal256(1), + VerifyingContract: "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC", + Salt: "", } var messageStandard = map[string]interface{}{ @@ -173,7 +174,7 @@ var messageStandard = map[string]interface{}{ "contents": "Hello, Bob!", } -var typedData = core.TypedData{ +var typedData = apitypes.TypedData{ Types: typesStandard, PrimaryType: primaryType, Domain: domainStandard, @@ -194,7 +195,7 @@ func TestSignData(t *testing.T) { control.approveCh <- "Y" control.inputCh <- "wrongpassword" - signature, err := api.SignData(context.Background(), core.TextPlain.Mime, a, hexutil.Encode([]byte("EHLO world"))) + signature, err := api.SignData(context.Background(), apitypes.TextPlain.Mime, a, hexutil.Encode([]byte("EHLO world"))) if signature != nil { t.Errorf("Expected nil-data, got %x", signature) } @@ -202,7 +203,7 @@ func TestSignData(t *testing.T) { t.Errorf("Expected ErrLocked! '%v'", err) } control.approveCh <- "No way" - signature, err = api.SignData(context.Background(), core.TextPlain.Mime, a, hexutil.Encode([]byte("EHLO world"))) + signature, err = api.SignData(context.Background(), apitypes.TextPlain.Mime, a, hexutil.Encode([]byte("EHLO world"))) if signature != nil { t.Errorf("Expected nil-data, got %x", signature) } @@ -212,7 +213,7 @@ func TestSignData(t *testing.T) { // text/plain control.approveCh <- "Y" control.inputCh <- "a_long_password" - signature, err = api.SignData(context.Background(), core.TextPlain.Mime, a, hexutil.Encode([]byte("EHLO world"))) + signature, err = api.SignData(context.Background(), apitypes.TextPlain.Mime, a, hexutil.Encode([]byte("EHLO world"))) if err != nil { t.Fatal(err) } @@ -232,13 +233,13 @@ func TestSignData(t *testing.T) { } func TestDomainChainId(t *testing.T) { - withoutChainID := core.TypedData{ - Types: core.Types{ - "EIP712Domain": []core.Type{ + withoutChainID := apitypes.TypedData{ + Types: apitypes.Types{ + "EIP712Domain": []apitypes.Type{ {Name: "name", Type: "string"}, }, }, - Domain: core.TypedDataDomain{ + Domain: apitypes.TypedDataDomain{ Name: "test", }, } @@ -250,14 +251,14 @@ func TestDomainChainId(t *testing.T) { if _, err := withoutChainID.HashStruct("EIP712Domain", withoutChainID.Domain.Map()); err != nil { t.Errorf("Expected the typedData to encode the domain successfully, got %v", err) } - withChainID := core.TypedData{ - Types: core.Types{ - "EIP712Domain": []core.Type{ + withChainID := apitypes.TypedData{ + Types: apitypes.Types{ + "EIP712Domain": []apitypes.Type{ {Name: "name", Type: "string"}, {Name: "chainId", Type: "uint256"}, }, }, - Domain: core.TypedDataDomain{ + Domain: apitypes.TypedDataDomain{ Name: "test", ChainId: math.NewHexOrDecimal256(1), }, @@ -323,7 +324,7 @@ func TestEncodeData(t *testing.T) { } func TestFormatter(t *testing.T) { - var d core.TypedData + var d apitypes.TypedData err := json.Unmarshal([]byte(jsonTypedData), &d) if err != nil { t.Fatalf("unmarshalling failed '%v'", err) @@ -337,7 +338,7 @@ func TestFormatter(t *testing.T) { t.Logf("'%v'\n", string(j)) } -func sign(typedData core.TypedData) ([]byte, []byte, error) { +func sign(typedData apitypes.TypedData) ([]byte, []byte, error) { domainSeparator, err := typedData.HashStruct("EIP712Domain", typedData.Domain.Map()) if err != nil { return nil, nil, err @@ -366,7 +367,7 @@ func TestJsonFiles(t *testing.T) { t.Errorf("Failed to read file %v: %v", fInfo.Name(), err) continue } - var typedData core.TypedData + var typedData apitypes.TypedData err = json.Unmarshal(data, &typedData) if err != nil { t.Errorf("Test %d, file %v, json unmarshalling failed: %v", i, fInfo.Name(), err) @@ -398,7 +399,7 @@ func TestFuzzerFiles(t *testing.T) { t.Errorf("Failed to read file %v: %v", fInfo.Name(), err) continue } - var typedData core.TypedData + var typedData apitypes.TypedData err = json.Unmarshal(data, &typedData) if err != nil { t.Errorf("Test %d, file %v, json unmarshalling failed: %v", i, fInfo.Name(), err) @@ -498,7 +499,7 @@ var gnosisTx = ` // TestGnosisTypedData tests the scenario where a user submits a full EIP-712 // struct without using the gnosis-specific endpoint func TestGnosisTypedData(t *testing.T) { - var td core.TypedData + var td apitypes.TypedData err := json.Unmarshal([]byte(gnosisTypedData), &td) if err != nil { t.Fatalf("unmarshalling failed '%v'", err) diff --git a/signer/rules/rules_test.go b/signer/rules/rules_test.go index d506ef2db..0ab246eea 100644 --- a/signer/rules/rules_test.go +++ b/signer/rules/rules_test.go @@ -605,7 +605,7 @@ function ApproveSignData(r){ t.Logf("address %v %v\n", addr.String(), addr.Original()) - nvt := []*core.NameValueType{ + nvt := []*apitypes.NameValueType{ { Name: "message", Typ: "text/plain", diff --git a/tests/fuzzers/snap/debug/main.go b/tests/fuzzers/snap/debug/main.go new file mode 100644 index 000000000..d0d1b4930 --- /dev/null +++ b/tests/fuzzers/snap/debug/main.go @@ -0,0 +1,39 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/ethereum/go-ethereum/tests/fuzzers/snap" +) + +func main() { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, "Usage: debug \n") + os.Exit(1) + } + crasher := os.Args[1] + data, err := ioutil.ReadFile(crasher) + if err != nil { + fmt.Fprintf(os.Stderr, "error loading crasher %v: %v", crasher, err) + os.Exit(1) + } + snap.FuzzTrieNodes(data) +} diff --git a/tests/fuzzers/snap/fuzz_handler.go b/tests/fuzzers/snap/fuzz_handler.go new file mode 100644 index 000000000..1ae61df29 --- /dev/null +++ b/tests/fuzzers/snap/fuzz_handler.go @@ -0,0 +1,164 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/protocols/snap" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + fuzz "github.com/google/gofuzz" +) + +var trieRoot common.Hash + +func getChain() *core.BlockChain { + db := rawdb.NewMemoryDatabase() + ga := make(core.GenesisAlloc, 1000) + var a = make([]byte, 20) + var mkStorage = func(k, v int) (common.Hash, common.Hash) { + var kB = make([]byte, 32) + var vB = make([]byte, 32) + binary.LittleEndian.PutUint64(kB, uint64(k)) + binary.LittleEndian.PutUint64(vB, uint64(v)) + return common.BytesToHash(kB), common.BytesToHash(vB) + } + storage := make(map[common.Hash]common.Hash) + for i := 0; i < 10; i++ { + k, v := mkStorage(i, i) + storage[k] = v + } + for i := 0; i < 1000; i++ { + binary.LittleEndian.PutUint64(a, uint64(i+0xff)) + acc := core.GenesisAccount{Balance: big.NewInt(int64(i))} + if i%2 == 1 { + acc.Storage = storage + } + ga[common.BytesToAddress(a)] = acc + } + gspec := core.Genesis{ + Config: params.TestChainConfig, + Alloc: ga, + } + genesis := gspec.MustCommit(db) + blocks, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 2, + func(i int, gen *core.BlockGen) {}) + cacheConf := &core.CacheConfig{ + TrieCleanLimit: 0, + TrieDirtyLimit: 0, + TrieTimeLimit: 5 * time.Minute, + TrieCleanNoPrefetch: true, + TrieCleanRejournal: 0, + SnapshotLimit: 100, + SnapshotWait: true, + } + trieRoot = blocks[len(blocks)-1].Root() + bc, _ := core.NewBlockChain(db, cacheConf, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) + if _, err := bc.InsertChain(blocks); err != nil { + panic(err) + } + return bc +} + +type dummyBackend struct { + chain *core.BlockChain +} + +func (d *dummyBackend) Chain() *core.BlockChain { return d.chain } +func (d *dummyBackend) RunPeer(*snap.Peer, snap.Handler) error { return nil } +func (d *dummyBackend) PeerInfo(enode.ID) interface{} { return "Foo" } +func (d *dummyBackend) Handle(*snap.Peer, snap.Packet) error { return nil } + +type dummyRW struct { + code uint64 + data []byte + writeCount int +} + +func (d *dummyRW) ReadMsg() (p2p.Msg, error) { + return p2p.Msg{ + Code: d.code, + Payload: bytes.NewReader(d.data), + ReceivedAt: time.Now(), + Size: uint32(len(d.data)), + }, nil +} + +func (d *dummyRW) WriteMsg(msg p2p.Msg) error { + d.writeCount++ + return nil +} + +func doFuzz(input []byte, obj interface{}, code int) int { + if len(input) > 1024*4 { + return -1 + } + bc := getChain() + defer bc.Stop() + backend := &dummyBackend{bc} + fuzz.NewFromGoFuzz(input).Fuzz(obj) + var data []byte + switch p := obj.(type) { + case *snap.GetTrieNodesPacket: + p.Root = trieRoot + data, _ = rlp.EncodeToBytes(obj) + default: + data, _ = rlp.EncodeToBytes(obj) + } + cli := &dummyRW{ + code: uint64(code), + data: data, + } + peer := snap.NewFakePeer(65, "gazonk01", cli) + err := snap.HandleMessage(backend, peer) + switch { + case err == nil && cli.writeCount != 1: + panic(fmt.Sprintf("Expected 1 response, got %d", cli.writeCount)) + case err != nil && cli.writeCount != 0: + panic(fmt.Sprintf("Expected 0 response, got %d", cli.writeCount)) + } + return 1 +} + +// To run a fuzzer, do +// $ CGO_ENABLED=0 go-fuzz-build -func FuzzTrieNodes +// $ go-fuzz + +func FuzzARange(input []byte) int { + return doFuzz(input, &snap.GetAccountRangePacket{}, snap.GetAccountRangeMsg) +} +func FuzzSRange(input []byte) int { + return doFuzz(input, &snap.GetStorageRangesPacket{}, snap.GetStorageRangesMsg) +} +func FuzzByteCodes(input []byte) int { + return doFuzz(input, &snap.GetByteCodesPacket{}, snap.GetByteCodesMsg) +} +func FuzzTrieNodes(input []byte) int { + return doFuzz(input, &snap.GetTrieNodesPacket{}, snap.GetTrieNodesMsg) +} diff --git a/tests/solidity/contracts/OpCodes.sol b/tests/solidity/contracts/OpCodes.sol index 9e3a0ebb0..2a41a5fc2 100644 --- a/tests/solidity/contracts/OpCodes.sol +++ b/tests/solidity/contracts/OpCodes.sol @@ -206,7 +206,7 @@ contract OpCodes { assembly { let x := mload(0x40) //Find empty storage location using "free memory pointer" - mstore(x,sig) //Place signature at begining of empty storage + mstore(x,sig) //Place signature at beginning of empty storage mstore(add(x,0x04),a) // first address parameter. just after signature mstore(add(x,0x24),a) // 2nd address parameter - first padded. add 32 bytes (not 20 bytes) mstore(0x40,add(x,0x64)) // this is missing in other examples. Set free pointer before function call. so it is used by called function. @@ -225,7 +225,7 @@ contract OpCodes { //callcode assembly { let x := mload(0x40) //Find empty storage location using "free memory pointer" - mstore(x,sig) //Place signature at begining of empty storage + mstore(x,sig) //Place signature at beginning of empty storage mstore(add(x,0x04),a) // first address parameter. just after signature mstore(add(x,0x24),a) // 2nd address parameter - first padded. add 32 bytes (not 20 bytes) mstore(0x40,add(x,0x64)) // this is missing in other examples. Set free pointer before function call. so it is used by called function. @@ -244,7 +244,7 @@ contract OpCodes { //delegatecall assembly { let x := mload(0x40) //Find empty storage location using "free memory pointer" - mstore(x,sig) //Place signature at begining of empty storage + mstore(x,sig) //Place signature at beginning of empty storage mstore(add(x,0x04),a) // first address parameter. just after signature mstore(add(x,0x24),a) // 2nd address parameter - first padded. add 32 bytes (not 20 bytes) mstore(0x40,add(x,0x64)) // this is missing in other examples. Set free pointer before function call. so it is used by called function. diff --git a/tests/state_test.go b/tests/state_test.go index 9554e7563..78ecda040 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers/logger" ) func TestState(t *testing.T) { @@ -115,7 +116,7 @@ func withTrace(t *testing.T, gasLimit uint64, test func(vm.Config) error) { } buf := new(bytes.Buffer) w := bufio.NewWriter(buf) - tracer := vm.NewJSONLogger(&vm.LogConfig{}, w) + tracer := logger.NewJSONLogger(&logger.Config{}, w) config.Debug, config.Tracer = true, tracer err2 := test(config) if !reflect.DeepEqual(err, err2) { diff --git a/trie/iterator.go b/trie/iterator.go index 406f216c2..654772aa1 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -275,7 +275,7 @@ func (it *nodeIterator) seek(prefix []byte) error { } } -// init initializes the the iterator. +// init initializes the iterator. func (it *nodeIterator) init() (*nodeIteratorState, error) { root := it.trie.Hash() state := &nodeIteratorState{node: it.trie.root, index: -1} diff --git a/trie/stacktrie.go b/trie/stacktrie.go index f9ff10b62..76258c311 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -54,12 +54,11 @@ func returnToPool(st *StackTrie) { // in order. Once it determines that a subtree will no longer be inserted // into, it will hash it and free up the memory it uses. type StackTrie struct { - nodeType uint8 // node type (as in branch, ext, leaf) - val []byte // value contained by this node if it's a leaf - key []byte // key chunk covered by this (full|ext) node - keyOffset int // offset of the key chunk inside a full key - children [16]*StackTrie // list of children (for fullnodes and exts) - db ethdb.KeyValueWriter // Pointer to the commit db, can be nil + nodeType uint8 // node type (as in branch, ext, leaf) + val []byte // value contained by this node if it's a leaf + key []byte // key chunk covered by this (leaf|ext) node + children [16]*StackTrie // list of children (for branch and exts) + db ethdb.KeyValueWriter // Pointer to the commit db, can be nil } // NewStackTrie allocates and initializes an empty trie. @@ -90,15 +89,13 @@ func (st *StackTrie) MarshalBinary() (data []byte, err error) { w = bufio.NewWriter(&b) ) if err := gob.NewEncoder(w).Encode(struct { - Nodetype uint8 - Val []byte - Key []byte - KeyOffset uint8 + Nodetype uint8 + Val []byte + Key []byte }{ st.nodeType, st.val, st.key, - uint8(st.keyOffset), }); err != nil { return nil, err } @@ -126,16 +123,14 @@ func (st *StackTrie) UnmarshalBinary(data []byte) error { func (st *StackTrie) unmarshalBinary(r io.Reader) error { var dec struct { - Nodetype uint8 - Val []byte - Key []byte - KeyOffset uint8 + Nodetype uint8 + Val []byte + Key []byte } gob.NewDecoder(r).Decode(&dec) st.nodeType = dec.Nodetype st.val = dec.Val st.key = dec.Key - st.keyOffset = int(dec.KeyOffset) var hasChild = make([]byte, 1) for i := range st.children { @@ -160,20 +155,18 @@ func (st *StackTrie) setDb(db ethdb.KeyValueWriter) { } } -func newLeaf(ko int, key, val []byte, db ethdb.KeyValueWriter) *StackTrie { +func newLeaf(key, val []byte, db ethdb.KeyValueWriter) *StackTrie { st := stackTrieFromPool(db) st.nodeType = leafNode - st.keyOffset = ko - st.key = append(st.key, key[ko:]...) + st.key = append(st.key, key...) st.val = val return st } -func newExt(ko int, key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie { +func newExt(key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie { st := stackTrieFromPool(db) st.nodeType = extNode - st.keyOffset = ko - st.key = append(st.key, key[ko:]...) + st.key = append(st.key, key...) st.children[0] = child return st } @@ -211,17 +204,18 @@ func (st *StackTrie) Reset() { st.children[i] = nil } st.nodeType = emptyNode - st.keyOffset = 0 } // Helper function that, given a full key, determines the index // at which the chunk pointed by st.keyOffset is different from // the same chunk in the full key. func (st *StackTrie) getDiffIndex(key []byte) int { - diffindex := 0 - for ; diffindex < len(st.key) && st.key[diffindex] == key[st.keyOffset+diffindex]; diffindex++ { + for idx, nibble := range st.key { + if nibble != key[idx] { + return idx + } } - return diffindex + return len(st.key) } // Helper function to that inserts a (key, value) pair into @@ -229,7 +223,7 @@ func (st *StackTrie) getDiffIndex(key []byte) int { func (st *StackTrie) insert(key, value []byte) { switch st.nodeType { case branchNode: /* Branch */ - idx := int(key[st.keyOffset]) + idx := int(key[0]) // Unresolve elder siblings for i := idx - 1; i >= 0; i-- { if st.children[i] != nil { @@ -241,10 +235,10 @@ func (st *StackTrie) insert(key, value []byte) { } // Add new child if st.children[idx] == nil { - st.children[idx] = stackTrieFromPool(st.db) - st.children[idx].keyOffset = st.keyOffset + 1 + st.children[idx] = newLeaf(key[1:], value, st.db) + } else { + st.children[idx].insert(key[1:], value) } - st.children[idx].insert(key, value) case extNode: /* Ext */ // Compare both key chunks and see where they differ diffidx := st.getDiffIndex(key) @@ -257,7 +251,7 @@ func (st *StackTrie) insert(key, value []byte) { if diffidx == len(st.key) { // Ext key and key segment are identical, recurse into // the child node. - st.children[0].insert(key, value) + st.children[0].insert(key[diffidx:], value) return } // Save the original part. Depending if the break is @@ -266,7 +260,7 @@ func (st *StackTrie) insert(key, value []byte) { // node directly. var n *StackTrie if diffidx < len(st.key)-1 { - n = newExt(diffidx+1, st.key, st.children[0], st.db) + n = newExt(st.key[diffidx+1:], st.children[0], st.db) } else { // Break on the last byte, no need to insert // an extension node: reuse the current node @@ -288,15 +282,14 @@ func (st *StackTrie) insert(key, value []byte) { // node. st.children[0] = stackTrieFromPool(st.db) st.children[0].nodeType = branchNode - st.children[0].keyOffset = st.keyOffset + diffidx p = st.children[0] } // Create a leaf for the inserted part - o := newLeaf(st.keyOffset+diffidx+1, key, value, st.db) + o := newLeaf(key[diffidx+1:], value, st.db) // Insert both child leaves where they belong: origIdx := st.key[diffidx] - newIdx := key[diffidx+st.keyOffset] + newIdx := key[diffidx] p.children[origIdx] = n p.children[newIdx] = o st.key = st.key[:diffidx] @@ -330,7 +323,6 @@ func (st *StackTrie) insert(key, value []byte) { st.nodeType = extNode st.children[0] = NewStackTrie(st.db) st.children[0].nodeType = branchNode - st.children[0].keyOffset = st.keyOffset + diffidx p = st.children[0] } @@ -339,11 +331,11 @@ func (st *StackTrie) insert(key, value []byte) { // The child leave will be hashed directly in order to // free up some memory. origIdx := st.key[diffidx] - p.children[origIdx] = newLeaf(diffidx+1, st.key, st.val, st.db) + p.children[origIdx] = newLeaf(st.key[diffidx+1:], st.val, st.db) p.children[origIdx].hash() - newIdx := key[diffidx+st.keyOffset] - p.children[newIdx] = newLeaf(p.keyOffset+1, key, value, st.db) + newIdx := key[diffidx] + p.children[newIdx] = newLeaf(key[diffidx+1:], value, st.db) // Finally, cut off the key part that has been passed // over to the children. @@ -351,7 +343,7 @@ func (st *StackTrie) insert(key, value []byte) { st.val = nil case emptyNode: /* Empty */ st.nodeType = leafNode - st.key = key[st.keyOffset:] + st.key = key st.val = value case hashedNode: panic("trying to insert into hash") diff --git a/trie/sync.go b/trie/sync.go index 3a6076ff8..d6e435f93 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -128,11 +128,10 @@ type Sync struct { codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash queue *prque.Prque // Priority queue with the pending requests fetches map[int]int // Number of active fetches per trie node depth - bloom *SyncBloom // Bloom filter for fast state existence checks } // NewSync creates a new trie data download scheduler. -func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom) *Sync { +func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback) *Sync { ts := &Sync{ database: database, membatch: newSyncMemBatch(), @@ -140,7 +139,6 @@ func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallb codeReqs: make(map[common.Hash]*request), queue: prque.New(nil), fetches: make(map[int]int), - bloom: bloom, } ts.AddSubTrie(root, nil, common.Hash{}, callback) return ts @@ -155,16 +153,11 @@ func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, cal if s.membatch.hasNode(root) { return } - if s.bloom == nil || s.bloom.Contains(root[:]) { - // Bloom filter says this might be a duplicate, double check. - // If database says yes, then at least the trie node is present - // and we hold the assumption that it's NOT legacy contract code. - blob := rawdb.ReadTrieNode(s.database, root) - if len(blob) > 0 { - return - } - // False positive, bump fault meter - bloomFaultMeter.Mark(1) + // If database says this is a duplicate, then at least the trie node is + // present, and we hold the assumption that it's NOT legacy contract code. + blob := rawdb.ReadTrieNode(s.database, root) + if len(blob) > 0 { + return } // Assemble the new sub-trie sync request req := &request{ @@ -195,18 +188,13 @@ func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) { if s.membatch.hasCode(hash) { return } - if s.bloom == nil || s.bloom.Contains(hash[:]) { - // Bloom filter says this might be a duplicate, double check. - // If database says yes, the blob is present for sure. - // Note we only check the existence with new code scheme, fast - // sync is expected to run with a fresh new node. Even there - // exists the code with legacy format, fetch and store with - // new scheme anyway. - if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 { - return - } - // False positive, bump fault meter - bloomFaultMeter.Mark(1) + // If database says duplicate, the blob is present for sure. + // Note we only check the existence with new code scheme, fast + // sync is expected to run with a fresh new node. Even there + // exists the code with legacy format, fetch and store with + // new scheme anyway. + if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 { + return } // Assemble the new sub-trie sync request req := &request{ @@ -313,15 +301,9 @@ func (s *Sync) Commit(dbw ethdb.Batch) error { // Dump the membatch into a database dbw for key, value := range s.membatch.nodes { rawdb.WriteTrieNode(dbw, key, value) - if s.bloom != nil { - s.bloom.Add(key[:]) - } } for key, value := range s.membatch.codes { rawdb.WriteCode(dbw, key, value) - if s.bloom != nil { - s.bloom.Add(key[:]) - } } // Drop the membatch data and return s.membatch = newSyncMemBatch() @@ -417,15 +399,10 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { if s.membatch.hasNode(hash) { continue } - if s.bloom == nil || s.bloom.Contains(node) { - // Bloom filter says this might be a duplicate, double check. - // If database says yes, then at least the trie node is present - // and we hold the assumption that it's NOT legacy contract code. - if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 { - continue - } - // False positive, bump fault meter - bloomFaultMeter.Mark(1) + // If database says duplicate, then at least the trie node is present + // and we hold the assumption that it's NOT legacy contract code. + if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 { + continue } // Locally unknown node, schedule for retrieval requests = append(requests, &request{ diff --git a/trie/sync_bloom.go b/trie/sync_bloom.go deleted file mode 100644 index 91e5e6711..000000000 --- a/trie/sync_bloom.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "encoding/binary" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - bloomfilter "github.com/holiman/bloomfilter/v2" -) - -var ( - bloomAddMeter = metrics.NewRegisteredMeter("trie/bloom/add", nil) - bloomLoadMeter = metrics.NewRegisteredMeter("trie/bloom/load", nil) - bloomTestMeter = metrics.NewRegisteredMeter("trie/bloom/test", nil) - bloomMissMeter = metrics.NewRegisteredMeter("trie/bloom/miss", nil) - bloomFaultMeter = metrics.NewRegisteredMeter("trie/bloom/fault", nil) - bloomErrorGauge = metrics.NewRegisteredGauge("trie/bloom/error", nil) -) - -// SyncBloom is a bloom filter used during fast sync to quickly decide if a trie -// node or contract code already exists on disk or not. It self populates from the -// provided disk database on creation in a background thread and will only start -// returning live results once that's finished. -type SyncBloom struct { - bloom *bloomfilter.Filter - inited uint32 - closer sync.Once - closed uint32 - pend sync.WaitGroup - closeCh chan struct{} -} - -// NewSyncBloom creates a new bloom filter of the given size (in megabytes) and -// initializes it from the database. The bloom is hard coded to use 3 filters. -func NewSyncBloom(memory uint64, database ethdb.Iteratee) *SyncBloom { - // Create the bloom filter to track known trie nodes - bloom, err := bloomfilter.New(memory*1024*1024*8, 4) - if err != nil { - panic(fmt.Sprintf("failed to create bloom: %v", err)) - } - log.Info("Allocated fast sync bloom", "size", common.StorageSize(memory*1024*1024)) - - // Assemble the fast sync bloom and init it from previous sessions - b := &SyncBloom{ - bloom: bloom, - closeCh: make(chan struct{}), - } - b.pend.Add(2) - go func() { - defer b.pend.Done() - b.init(database) - }() - go func() { - defer b.pend.Done() - b.meter() - }() - return b -} - -// init iterates over the database, pushing every trie hash into the bloom filter. -func (b *SyncBloom) init(database ethdb.Iteratee) { - // Iterate over the database, but restart every now and again to avoid holding - // a persistent snapshot since fast sync can push a ton of data concurrently, - // bloating the disk. - // - // Note, this is fine, because everything inserted into leveldb by fast sync is - // also pushed into the bloom directly, so we're not missing anything when the - // iterator is swapped out for a new one. - it := database.NewIterator(nil, nil) - - var ( - start = time.Now() - swap = time.Now() - ) - for it.Next() && atomic.LoadUint32(&b.closed) == 0 { - // If the database entry is a trie node, add it to the bloom - key := it.Key() - if len(key) == common.HashLength { - b.bloom.AddHash(binary.BigEndian.Uint64(key)) - bloomLoadMeter.Mark(1) - } else if ok, hash := rawdb.IsCodeKey(key); ok { - // If the database entry is a contract code, add it to the bloom - b.bloom.AddHash(binary.BigEndian.Uint64(hash)) - bloomLoadMeter.Mark(1) - } - // If enough time elapsed since the last iterator swap, restart - if time.Since(swap) > 8*time.Second { - key := common.CopyBytes(it.Key()) - - it.Release() - it = database.NewIterator(nil, key) - - log.Info("Initializing state bloom", "items", b.bloom.N(), "errorrate", b.bloom.FalsePosititveProbability(), "elapsed", common.PrettyDuration(time.Since(start))) - swap = time.Now() - } - } - it.Release() - - // Mark the bloom filter inited and return - log.Info("Initialized state bloom", "items", b.bloom.N(), "errorrate", b.bloom.FalsePosititveProbability(), "elapsed", common.PrettyDuration(time.Since(start))) - atomic.StoreUint32(&b.inited, 1) -} - -// meter periodically recalculates the false positive error rate of the bloom -// filter and reports it in a metric. -func (b *SyncBloom) meter() { - // check every second - tick := time.NewTicker(1 * time.Second) - defer tick.Stop() - - for { - select { - case <-tick.C: - // Report the current error ration. No floats, lame, scale it up. - bloomErrorGauge.Update(int64(b.bloom.FalsePosititveProbability() * 100000)) - case <-b.closeCh: - return - } - } -} - -// Close terminates any background initializer still running and releases all the -// memory allocated for the bloom. -func (b *SyncBloom) Close() error { - b.closer.Do(func() { - // Ensure the initializer is stopped - atomic.StoreUint32(&b.closed, 1) - close(b.closeCh) - b.pend.Wait() - - // Wipe the bloom, but mark it "uninited" just in case someone attempts an access - log.Info("Deallocated state bloom", "items", b.bloom.N(), "errorrate", b.bloom.FalsePosititveProbability()) - - atomic.StoreUint32(&b.inited, 0) - b.bloom = nil - }) - return nil -} - -// Add inserts a new trie node hash into the bloom filter. -func (b *SyncBloom) Add(hash []byte) { - if atomic.LoadUint32(&b.closed) == 1 { - return - } - b.bloom.AddHash(binary.BigEndian.Uint64(hash)) - bloomAddMeter.Mark(1) -} - -// Contains tests if the bloom filter contains the given hash: -// - false: the bloom definitely does not contain hash -// - true: the bloom maybe contains hash -// -// While the bloom is being initialized, any query will return true. -func (b *SyncBloom) Contains(hash []byte) bool { - bloomTestMeter.Mark(1) - if atomic.LoadUint32(&b.inited) == 0 { - // We didn't load all the trie nodes from the previous run of Geth yet. As - // such, we can't say for sure if a hash is not present for anything. Until - // the init is done, we're faking "possible presence" for everything. - return true - } - // Bloom initialized, check the real one and report any successful misses - maybe := b.bloom.ContainsHash(binary.BigEndian.Uint64(hash)) - if !maybe { - bloomMissMeter.Mark(1) - } - return maybe -} diff --git a/trie/sync_test.go b/trie/sync_test.go index cb3283875..970730b67 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -95,7 +95,7 @@ func TestEmptySync(t *testing.T) { emptyB, _ := New(emptyRoot, dbB) for i, trie := range []*Trie{emptyA, emptyB} { - sync := NewSync(trie.Hash(), memorydb.New(), nil, NewSyncBloom(1, memorydb.New())) + sync := NewSync(trie.Hash(), memorydb.New(), nil) if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 { t.Errorf("test %d: content requested for empty trie: %v, %v, %v", i, nodes, paths, codes) } @@ -116,7 +116,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) nodes, paths, codes := sched.Missing(count) var ( @@ -177,7 +177,7 @@ func TestIterativeDelayedSync(t *testing.T) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) nodes, _, codes := sched.Missing(10000) queue := append(append([]common.Hash{}, nodes...), codes...) @@ -223,7 +223,7 @@ func testIterativeRandomSync(t *testing.T, count int) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) queue := make(map[common.Hash]struct{}) nodes, _, codes := sched.Missing(count) @@ -271,7 +271,7 @@ func TestIterativeRandomDelayedSync(t *testing.T) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) queue := make(map[common.Hash]struct{}) nodes, _, codes := sched.Missing(10000) @@ -324,7 +324,7 @@ func TestDuplicateAvoidanceSync(t *testing.T) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) nodes, _, codes := sched.Missing(0) queue := append(append([]common.Hash{}, nodes...), codes...) @@ -371,7 +371,7 @@ func TestIncompleteSync(t *testing.T) { // Create a destination trie and sync with the scheduler diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) var added []common.Hash @@ -431,7 +431,7 @@ func TestSyncOrdering(t *testing.T) { // Create a destination trie and sync with the scheduler, tracking the requests diskdb := memorydb.New() triedb := NewDatabase(diskdb) - sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) + sched := NewSync(srcTrie.Hash(), diskdb, nil) nodes, paths, _ := sched.Missing(1) queue := append([]common.Hash{}, nodes...)