SCANIATM 8 anos atrás
commit
830c6487c3
100 arquivos alterados com 31768 adições e 0 exclusões
  1. 19 0
      AUTHORS
  2. 426 0
      CHANGES
  3. 278 0
      COPYING
  4. 144 0
      FAQ
  5. 156 0
      FEATURES
  6. 439 0
      INSTALL
  7. 430 0
      README
  8. 23 0
      README.md
  9. 105 0
      SUPPORT
  10. 3 0
      TODO
  11. 58 0
      addons/mrtg/flux-mrtg-update.sh
  12. 17 0
      addons/mrtg/flux-mrtg.conf
  13. 456 0
      addons/mrtg/fluxpoller.pl
  14. 129 0
      addons/mrtg/mrtg.flux.cfg
  15. 151 0
      addons/mrtg/mrtg.flux.examples.cfg
  16. BIN
      clients/tornado/BitTornado-0.3.15.tar.bz2
  17. BIN
      clients/tornado/BitTornado-0.3.17.tar.bz2
  18. BIN
      clients/tornado/BitTornado-0.3.18.tar.bz2
  19. 46 0
      clients/transmission/INSTALL
  20. BIN
      clients/transmission/Transmission-0.72_tfCLI-svn3318.tar.bz2
  21. BIN
      clients/transmission/Transmission-0.96_tfCLI-svn3355.tar.bz2
  22. BIN
      clients/transmission/Transmission-1.06_tfCLI-svn3356.tar.bz2
  23. 287 0
      doc/azureus.txt
  24. 5610 0
      doc/manual.txt
  25. 5 0
      html/.buildpath
  26. 124 0
      html/admin.php
  27. 2 0
      html/bin/.htaccess
  28. 243 0
      html/bin/check/check-cli.php
  29. 357 0
      html/bin/check/check.pl
  30. 340 0
      html/bin/clients/fluazu/dopal/COPYING
  31. 9 0
      html/bin/clients/fluazu/dopal/README
  32. 64 0
      html/bin/clients/fluazu/dopal/__init__.py
  33. 352 0
      html/bin/clients/fluazu/dopal/aztypes.py
  34. 4817 0
      html/bin/clients/fluazu/dopal/class_defs.py
  35. 318 0
      html/bin/clients/fluazu/dopal/classes.py
  36. 510 0
      html/bin/clients/fluazu/dopal/convert.py
  37. 1021 0
      html/bin/clients/fluazu/dopal/core.py
  38. 167 0
      html/bin/clients/fluazu/dopal/debug.py
  39. 466 0
      html/bin/clients/fluazu/dopal/errors.py
  40. 92 0
      html/bin/clients/fluazu/dopal/interact.py
  41. 34 0
      html/bin/clients/fluazu/dopal/logutils.py
  42. 79 0
      html/bin/clients/fluazu/dopal/main.py
  43. 326 0
      html/bin/clients/fluazu/dopal/obj_impl.py
  44. 565 0
      html/bin/clients/fluazu/dopal/objects.py
  45. 60 0
      html/bin/clients/fluazu/dopal/persistency.py
  46. 1410 0
      html/bin/clients/fluazu/dopal/scripting.py
  47. 186 0
      html/bin/clients/fluazu/dopal/utils.py
  48. 207 0
      html/bin/clients/fluazu/dopal/xmlutils.py
  49. 77 0
      html/bin/clients/fluazu/fluazu.py
  50. 792 0
      html/bin/clients/fluazu/fluazu/FluAzuD.py
  51. 128 0
      html/bin/clients/fluazu/fluazu/StatFile.py
  52. 605 0
      html/bin/clients/fluazu/fluazu/Transfer.py
  53. 125 0
      html/bin/clients/fluazu/fluazu/TransferFile.py
  54. 26 0
      html/bin/clients/fluazu/fluazu/__init__.py
  55. 59 0
      html/bin/clients/fluazu/fluazu/output.py
  56. 1356 0
      html/bin/clients/mainline/BTL/CMap.py
  57. 1273 0
      html/bin/clients/mainline/BTL/CMultiMap.py
  58. 333 0
      html/bin/clients/mainline/BTL/ConnectionRateLimitReactor.py
  59. 415 0
      html/bin/clients/mainline/BTL/ConvertedMetainfo.py
  60. 56 0
      html/bin/clients/mainline/BTL/CurrentRateMeasure.py
  61. 302 0
      html/bin/clients/mainline/BTL/DictWithLists.py
  62. 95 0
      html/bin/clients/mainline/BTL/EventLoop.py
  63. 221 0
      html/bin/clients/mainline/BTL/HostIP.py
  64. 49 0
      html/bin/clients/mainline/BTL/IPTools.py
  65. 27 0
      html/bin/clients/mainline/BTL/LIFOQueue.py
  66. 111 0
      html/bin/clients/mainline/BTL/Lists.py
  67. 59 0
      html/bin/clients/mainline/BTL/Luciana.py
  68. 19 0
      html/bin/clients/mainline/BTL/Map.py
  69. 1062 0
      html/bin/clients/mainline/BTL/PMap.py
  70. 62 0
      html/bin/clients/mainline/BTL/SaneThreadedResolver.py
  71. 26 0
      html/bin/clients/mainline/BTL/ThreadProxy.py
  72. 69 0
      html/bin/clients/mainline/BTL/TimeLeftEstimator.py
  73. 21 0
      html/bin/clients/mainline/BTL/__init__.py
  74. BIN
      html/bin/clients/mainline/BTL/addrmap.dat
  75. 36 0
      html/bin/clients/mainline/BTL/asyncexecutor.py
  76. 70 0
      html/bin/clients/mainline/BTL/atexit_threads.py
  77. 77 0
      html/bin/clients/mainline/BTL/auth_xmlrpc.py
  78. 310 0
      html/bin/clients/mainline/BTL/bdistutils.py
  79. 131 0
      html/bin/clients/mainline/BTL/bencode.py
  80. 96 0
      html/bin/clients/mainline/BTL/bitfield.py
  81. 85 0
      html/bin/clients/mainline/BTL/brpc.py
  82. 133 0
      html/bin/clients/mainline/BTL/brpclib.py
  83. 88 0
      html/bin/clients/mainline/BTL/btformats.py
  84. 103 0
      html/bin/clients/mainline/BTL/btl_string.py
  85. 180 0
      html/bin/clients/mainline/BTL/buffer.py
  86. 76 0
      html/bin/clients/mainline/BTL/cache.py
  87. 370 0
      html/bin/clients/mainline/BTL/cache_map.py
  88. 196 0
      html/bin/clients/mainline/BTL/circular_list.py
  89. 80 0
      html/bin/clients/mainline/BTL/connection_cache.py
  90. 79 0
      html/bin/clients/mainline/BTL/coro.py
  91. 157 0
      html/bin/clients/mainline/BTL/cpu_meter.py
  92. 157 0
      html/bin/clients/mainline/BTL/crypto_message.py
  93. 222 0
      html/bin/clients/mainline/BTL/daemon.py
  94. 19 0
      html/bin/clients/mainline/BTL/decorate.py
  95. 177 0
      html/bin/clients/mainline/BTL/defer.py
  96. 219 0
      html/bin/clients/mainline/BTL/dlock.py
  97. 367 0
      html/bin/clients/mainline/BTL/ebencode.py
  98. 85 0
      html/bin/clients/mainline/BTL/ebrpc.py
  99. 133 0
      html/bin/clients/mainline/BTL/ebrpclib.py
  100. 223 0
      html/bin/clients/mainline/BTL/epollreactor.py

+ 19 - 0
AUTHORS

@@ -0,0 +1,19 @@
+=======================================================================
+$Id: AUTHORS 3131 2007-06-26 18:16:09Z b4rt $
+=======================================================================
+AUTHORS for torrentflux-b4rt <http://tf-b4rt.berlios.de/>
+=======================================================================
+
+torrentflux-b4rt is written and maintained by:
+
+* b4rt <b4rt@users.berlios.de>
+
+* lordnor <lordnor@users.berlios.de>
+
+* msn_exploder <msn_exploder@users.berlios.de>
+
+* munk <munk@users.berlios.de>
+
+* warion <warion@users.berlios.de>
+
+* danez <danez@users.berlios.de>

+ 426 - 0
CHANGES

@@ -0,0 +1,426 @@
+================================================================================
+$Id: CHANGES 3366 2008-03-22 16:26:45Z b4rt $
+================================================================================
+
+
+torrentflux-b4rt 1.0 beta2
+http://svn.berlios.de/wsvn/tf-b4rt/trunk/?op=log&rev=3366
+
+
+***** changes *****
+
+- Transmission-Transfer-Client updates
+
+- documentation updates (work in progress)
+
+- defaultX theme updates
+
+- icon updates
+
+- sqlite: better catch connection errors (e.g. when db-creation fails because 
+  of missing rights) (3304)
+
+- ajax update now uses %-value for updating the bar (3239)
+
+- index: modify run/seed tooltips to be different between advanced & quick
+  modes (3234)
+
+- torrent search: small tweak to display mininova results sorted by seed (3231)
+
+- MaintenanceAndRepair: added fluazu-maintenance (analog fluxd, delete leftovers
+  if found but no daemon running) (3214)
+
+- index: make "processing request"-div work on IE (3171)
+
+- admin/server:
+  * split code/html in file validation (3212, 3208)
+  * extend validation-check for transmissioncli, to check if file is really
+    tfb-specific version (3210, 3206)
+  * for incoming path, allow subdirs (3169)
+
+- formatBytestokBMBGBTB now also returns filessize in Bytes if
+  size < 1024 (3154)
+
+- admin/stats: move 'Use Compression' option directly before
+  'Compression Level' (3145)
+
+- SimpleHTTP:
+  * don't send empty cookie line (3250)
+  * added torrentspy support (3194)
+  * added demonoid support and fixed isohunt support (3172)
+  * handle https (requires php with openssl module) (3123)
+
+
+
+***** fixes *****
+
+- various theme-related bugfixes
+
+- netstat:
+  * Fixes a bug in detecting ports in use on BSD, which caused 
+    transmission to attempt to bind to the same port every time you 
+    started a new torrent. (3321)
+  * Fixed bug in netstatConnectionsSum for FreeBSD, altered system calls
+    to be more resource-efficient by having sockstat output the right
+    stuff and leave less for grep/awk to parse. (3322)
+  * Fixed a bug in the netstat code where fbsd machines might see the 
+    port of a running transmission transfer as 80, since transmission
+    opens both port 80 and a high-numbered port from the minport-maxport
+    range. (3323)
+  * Fixed a bug that would cause transmission not to select the next 
+    availible port if, and only if, there was no outgoing connection 
+    from the client on the previous port number, and you are using BSD. 
+    Now, we will always select a valid port number. (3326)
+
+- sqlite: fix some SQL queries with joins (use field aliases to get same 
+  behavior as other dbs) Thanks to Korax1 on IRC (3307)
+
+- fluxd rssad filter edit-page: fix superfluous HTML-encoding of 
+  filter entries (3305)
+
+- bandwidth-bar: fixed style-issue if up or download is above 100% (3295)
+
+- ui: emit appropriate charset depending on selected language (3293)
+
+- nzbperl:
+  * fix status messages (3301)
+  * add --keepbrokenbin when "Bad File Action" is "Download Anyway" (3298)
+  * fix 150 kB/s speed limit (3288)
+  * fix missing newline in output (3287)
+
+- xfer-display:
+  * fix B/MB mismatch when calculating % (3286)
+  * fix bar width in themes other than defaultX (3286)
+
+- fluxcli.php: fix xfer command (amounts in MB were handled as B) (3260)
+
+- wget-Transfer-Client: ps --pid switch does not exist on FreeBSD, use -p
+  instead (3237)
+
+- fluxd:
+  * fix "cannot determine peer address" on FreeBSD (3290, 3291, 3292, 3296)
+  * small fix in sub "set" (3233)
+
+- SimpleHTTP: accept URLs containing '+' (pass them thru, don't re-encode
+  them) (3232)
+
+- transfer-control: avoid pid file access if not needed (3222)
+
+- setup.php: small fix to detect sqlite correctly (3221)
+
+- admin/server: fix php-cli checks freezing if php binary path is a cgi/fcgi
+  php (3213)
+
+- fluazu/dopal: make it work even with -OO flag on all Python versions (3205)
+
+- prevent tf_log table from growing too much and loadavg from increasing
+  insanely (3193)
+
+- fluxd/fluxcli: maintenance with torrent-restart did not work (3179)
+
+- inserting empty cookie id (cid) value caused errors on PostgreSQL (3178)
+
+- dir view:
+  * support php-version without unicode- and/or xml-support (3225)
+  * convert UTF-8 names to ISO-8859-1 (3170)
+
+- torrent search:
+  * Search engine: fix for isohunt search engine, thanks to coneybeare (3311)
+  * TorrentSpy: switch to a download URL that works for now (3282)
+  * fix parsing of mininova results so the number of comments isn't displayed
+    as part of the torrent name (3231)
+  * broken if Transfer File Download was disabled (3166)
+
+- unified use of HTTP no-cache headers (pragma & cache-control) (3164)
+
+- added a line to allow for .prio files -- people were getting massive amounts
+  of "Invalid Transfer" errors without this line (3162)
+
+- index: new injected torrents now have &nbsp; again in columns t.up,
+  t.down (3160)
+
+- some php-versions report ini_get() as "on" or "off" (3159)
+
+- the size of a dir returned by dirsize() is now returned as float instead of
+  string (3158)
+
+- index: new torrents were showing "B" for total up/down (3157)
+
+- fluxd/fluxinet: stop warnings about undefined vbls in fluxinet.pm (3151)
+
+- index: in estimated time '00' fields were skipped even if not at front (3149)
+
+- stats.php: display usage when called with no _GET args and autologin cookie is
+  being used (3139)
+
+- fluxd/rssad:
+  * in admin pages, modify rssad filter templates so the correct page title is
+    displayed when adding/editing/saving/deleting filters (3150)
+  * links containing html entities were not handled properly (3134, 3130)
+
+- transmissioncli: use progress to indicate if torrent is finished or not (3128)
+
+- missing HTML-encoding: torrent comment (3125), superadmin changelog (3122)
+
+
+
+***** updates *****
+
+- Torrent Transfer-Clients:
+  * Transmission v1.06 -- modified for tf-b4rt
+  * Transmission v0.96 -- modified for tf-b4rt
+  * Mainline (BitTorrent) v5.2.0
+
+- ADOdb V4.98
+
+
+--------------------------------------------------------------------------------
+--------------------------------------------------------------------------------
+
+
+  torrentflux-b4rt 1.0 beta1
+  http://svn.berlios.de/wsvn/tf-b4rt/trunk/?op=log&rev=3116
+
+
+***** changes *****
+
+- refactoring, code-cleanup and code-tidy in various parts
+
+- UI-cleanup and -tidy (including Readability, Usability and Consistency)
+
+- added Inline-Help
+
+- added a manual (doc/manual.txt, work in progress)
+
+- changed time- and date-format in language-files to the format used in the
+  rest of tfb (3111)
+
+- enlarge sharekill input fields (3 -> 4 chars)
+
+- extended checks to validate the selected theme
+
+- check-cli.php: added check that current interpreter is really the
+  cli-version of PHP (3081)
+
+- new "proccesing request"-divs in default theme (3064)
+
+- cleaned up code so more themes are able to use ajax (3061)
+
+- admin - serverSettings: extend the validation-check for PHP-binary.
+  also check if file is really the cli-version of PHP (3056)
+
+- added <label> elements to all form controls
+
+- new dispatcher single-transfer-ops wipe and deleteWithData (3017 + 3018)
+  * wipe:
+    http://tfb/dispatcher.php?action=wipe&transfer=tname.text
+  * deleteWithData:
+    http://tflux/dispatcher.php?action=deleteWithData&transfer=tname.text
+
+- Transfer-Profiles (2978, 2979, 2980)
+  details: https://tf-b4rt.berlios.de/forum/index.php/topic,614.0.html
+
+- image.php: removed referer-check from internal images (2918)
+
+- fluxd-refactoring (includes the use of config-files and pluggable
+  service-modules)
+
+- SimpleHTTP: detect early and log error if URL is not a valid HTTP URL (2820)
+
+- Added HTTP redirection following functionality. Follow up to n (5)
+  redirections (300, 301, 302, 303, 307 HTTP status codes).
+
+- removed some of the (in-)sanity checks to prevent invocation from web
+  from the php-cli-scripts (2768)
+
+- new icons and images
+
+- fluazu: minors (non-functional, eg print more exceptions) and code-tidy
+  fluazu-version: 0.06
+
+- various checks for requirements in various components
+
+
+***** fixes *****
+
+- various minor fixes in various parts
+
+- spelling and typos in various strings
+
+- fixed: undefined variable when updating search-engine-settings and nothing
+  is selected in form (3113)
+
+- fixed: endless-redirect-loop when db-config-file, setup- and upgrade-scripts
+  are missing (3110)
+
+- fixed: setup.php and upgrade.php login-checks (3109)
+
+- fixed: when using filepriority the *.prio file was written incorrectly.
+  (last folder was left out) (3074)
+
+- fixed: transfer-control page always showed "Last used Settings" profile,
+  instead of "default" if transfer had never been started before (3067)
+
+- fixed: the links in transfer-list to transfer-window was set twice in all
+  themes (<a><a></a></a>) (3063)
+
+- fixes in Authentication Type "Form Auth + Cookie" (3052 + 3053)
+  * flush users autologin-cookie when "performAuthentication" fails
+  * flush users autologin-cookie when user changed password
+
+- dtree.js: superfluous "</a>" was generated for root of file-prio tree (3033)
+
+- fixed: transferlist-template: seed button was showing for non uploadable
+  clients (3022)
+
+- fixed: filemanager: access dot-dir via direct url (3011)
+
+- fixes in AJAX-code (3005 + 3006 + 3008)
+  * ajax silent-mode was not working properly because of missing {}.
+  * added some checks so it is not possible to start an second update while
+    first still not finished.
+  * added an unload method, because it was possible to press F5 and before
+    reloading was started, an ajax request could be started because timer was
+	still running, which then produced errors in FF.
+  * if ajax_update() was called the second time, the ajax_HTTPRequest existed
+    already and so it aborted old request, which results in readystat = 0,
+	which calls the Callback.
+  * changed ajax_pageUpdate so it only updates the "span_update" if needed.
+
+- fixed: template-var transfer_exists was not set in tranfer-templates (3002)
+
+- superadmin pages: display webapp page-title, instead of always
+  'torrentflux-b4rt' (2991)
+
+- Modified backup list function to only list tfb backup archives found in
+  .backups folder, not every single file in the folder. (2985)
+
+- fixed webapp-lock-bug (cache was not flushed on set) (2977)
+
+- close adodb-connection to database on script-shutdown (2961)
+
+- common.js: don't return false from functions used in href attributes (2953)
+  (fixes browser displaying "false" after clicking on transfer-properties
+  or server-monitor while a request was already in progress)
+
+- fixed postgresql sql- and query-files (2952)
+  (use signed 4-byte ints where signed 2-byte ints are not enough)
+
+- fixed: "transmissioncli does not notice the tracker on shutdown" (2915)
+  details: https://tf-b4rt.berlios.de/forum/index.php/topic,587.0.html
+
+- sorttable.js: handle empty cells better (fixes bad behavior, and javascript
+  error under IE, when progress column has some) (2911)
+
+- fluxcli: if invoked synchronously by fluxd, don't call fluxd back (2908)
+  * Fluxd.php: delay-load modules list
+  * fluxcli: don't initialize FluxdQmgr if not needed
+  details: http://tf-b4rt.berlios.de/forum/index.php/topic,586.0.html
+
+- fixed a bug in xfer-stats-page (it was not possible to view monthly and
+  weekly stats for all users) (2895)
+
+- fluxd-Maintenance module: "Maintenance Transfer-Restart" setting
+  (fluxd_Maintenance_trestart) was ignored, like it was always true (2867)
+
+- clienthandler-stop: give transfer 1 more second time after waitForTransfer
+  ("Possible Hung Process" was logged as "just after waitForTransfer" there
+  is just the pid-file gone but not nec. the process (this log was irritating
+  as they were not "hung" but just in a graceful shutdown)) (2857)
+
+- fluxd.pl: fixed error message when pid-file exists but daemon is not
+  running (2838)
+
+- missing HTML- and URL-escaping
+
+- missing Shell-, SQL- and HTML-encoding of various strings
+
+- added the pattern modifier D in some preg_match calls (2824)
+
+- fixed problem that only the first file is uploaded when using the dynamic
+  multi-upload-form on index-page in browsers not IE (2811)
+
+- Fixed a bug in hide/show seeding torrents which caused a hidden torrent's
+  upload usage not to be shown in the bandwidth bars. (2805)
+
+- fix in TorrentSpy search engine: letters P/R (Pwd/Reg. needed) appeared
+  above the table instead of inline (2801)
+
+- add/edit/display links: fixed URL- or HTML-encoding of URLs and sitenames
+  edit links: fixed typos in default & tf templates (2800)
+
+- rss-links: fixed URL- or HTML-encoding of URLs (2799)
+
+- missing encoding of URLs in case they contain special chars (2798)
+
+- fix for upload of metafiles when their URLs contain special chars (2791)
+
+- wget-inject did not clean the transfer-filename (2789)
+
+- transfer profiles: when transfer_profiles was 1 (only admins), admins did
+  not see public profiles (only private) (2769)
+
+- fluazu: fixed problem not catching connect-problems on startup (2761)
+
+- fixed: js-string-trim (2758)
+
+- fluxd: fixed problems with paths with spaces (2757)
+
+- calculate share-ratio from amount downloaded while leeching and not size
+  (2741 + 2742)
+
+- fixed problems in checkDirectory (2739 + 2740)
+
+
+***** updates *****
+
+- Transmission-Transfer-Client:
+  * Transmission 0.80-svn (svn2076) / tfCLI svn3084
+  * Transmission 0.72 / tfCLI svn2950
+
+- ADOdb V4.95a
+
+- vLIB 4.1.0
+
+- Search-Engines:
+  * Pirate Bay 1.06
+
+
+--------------------------------------------------------------------------------
+--------------------------------------------------------------------------------
+
+  torrentflux-b4rt 1.0 alpha7
+  http://svn.berlios.de/wsvn/tf-b4rt/trunk/?op=log&rev=2735
+
+--------------------------------------------------------------------------------
+
+  torrentflux-b4rt 1.0 alpha6
+  http://svn.berlios.de/wsvn/tf-b4rt/trunk/?op=log&rev=2454
+
+--------------------------------------------------------------------------------
+
+  torrentflux-b4rt 1.0 alpha5-1
+  http://svn.berlios.de/wsvn/tf-b4rt/trunk/?op=log&rev=2007
+
+  torrentflux-b4rt 1.0 alpha5
+  http://svn.berlios.de/wsvn/tf-b4rt/trunk/?op=log&rev=1965
+
+--------------------------------------------------------------------------------
+
+  torrentflux-b4rt 1.0 alpha4
+  http://svn.berlios.de/wsvn/tf-b4rt/trunk/?op=log&rev=1800
+
+--------------------------------------------------------------------------------
+
+  torrentflux-b4rt 1.0 alpha3
+  http://svn.berlios.de/wsvn/tf-b4rt/trunk/?op=log&rev=1688
+
+--------------------------------------------------------------------------------
+
+  torrentflux-b4rt 1.0 alpha2
+  http://svn.berlios.de/wsvn/tf-b4rt/trunk/?op=log&rev=1522
+
+--------------------------------------------------------------------------------
+
+  torrentflux-b4rt 1.0 alpha1
+  http://svn.berlios.de/wsvn/tf-b4rt/trunk/?op=log&rev=1452

+ 278 - 0
COPYING

@@ -0,0 +1,278 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+

+ 144 - 0
FAQ

@@ -0,0 +1,144 @@
+=======================================================================
+$Id: FAQ 2921 2007-04-18 15:00:45Z munk $
+vim: set comments=fb\:o,fb\:#,fb\:-,fb\:*,fb\:A.,fb\:Q. et tw=72 sw=4:
+=======================================================================
+
+
+/**********************************************************************
+Torrentflux-b4rt 1.0 Frequently Asked Questions / FAQ file
+**********************************************************************/
+
+	This file contains a list of Frequently Asked Questions (FAQ)
+	together with corresponding Frequently Given Answers (FGA).
+
+	The file is split into the following sections:
+
+    o Torrentflux-b4rt compatibility with other operating systems
+    o Fluxd
+    o Transmission
+
+
+/**********************************************************************
+Torrentflux-b4rt compatibility with other operating systems
+**********************************************************************/
+
+Q.	Will torrentflux-b4rt run on a Linksys NSLU2 'slug'?
+
+A.	There are reports from users that torrentflux-b4rt will run on the
+	slug with the OpenSlug operating system:
+
+	http://www.nslu2-linux.org/wiki/OpenSlug/HomePage
+
+	However, a version of 'ps' must be installed that works with
+	torrentflux-b4rt such as the procps ipkg / package.  See this post
+	for more details:
+
+	http://tf-b4rt.berlios.de/forum/index.php/topic,134.msg2262.html#msg2262
+
+
+
+/**********************************************************************
+Fluxd
+**********************************************************************/
+
+Q.	I get the following error when trying to start fluxd:
+
+	Error : initializing FluxDB : loglevel not defined
+
+	What does it mean and how can I solve the problem?
+
+A.	This error indicates that your PHP commandline binary (php-cli) is
+	not built with support for the database you're using.  You can check
+	the supported modules you have built into your php-cli binary by
+	issuing the command 'php -m' in a shell:
+
+    root@users /root# php -m
+    [PHP Modules]
+    mysql
+    pcre
+    session
+    sockets
+
+	These 3 modules listed above - mysql (or one of the other supported
+	database types), pcre, session and sockets - are all required to run
+	Fluxd correctly.  If you don't see these, you should reinstall your
+	php-cli binary with support for the missing options.
+
+	Notes:
+	======
+	It may be the case that you have installed php-cli with support for
+	all the above options but for some reason the php.ini file has not
+	been modified to actually enable the extensions correctly.  Check
+	that your php.ini file has the following in it:
+
+	extension=/path/to/mysql.so
+	extension=/path/to/pcre.so
+	extension=/path/to/session.so
+	extension=/path/to/sockets.so
+
+	where '/path/to' is the path to the extension libs.
+
+	Another useful troubleshooting tip for this problem is to run
+	'php -i' on the commandline and search for 'ini' in the output:
+
+	root@users /root# php -i | grep ini
+	Configuration File (php.ini) Path => /usr/local/etc/php.ini
+	Scan this dir for additional .ini files => /usr/local/etc/php
+	additional .ini files parsed => /usr/local/etc/php/extensions.ini
+	phpini => /usr/local/etc/php.ini
+
+	This gives you an idea where all the config files for your php
+	binary are sourced from so that you can check and confirm everything
+	is in place.
+
+
+/**********************************************************************
+Transmission
+**********************************************************************/
+
+Q.	How do I install the modified Torrentflux-b4rt version of
+	Transmission on BSD (FreeBSD/NetBSD/OpenBSD/etc)?
+
+A.	The BSD operating systems use 'make' as part of the base operating
+	system to perform various operations such as updating the base
+	system, installing 3rd party software/ports and other stuff.  As a
+	result, running 'make' to build/install the modified transmission
+	client on BSD will cause problems such as:
+
+	[3:58:30] root@users# make
+	"mk/common.mk", line 9: Missing dependency operator
+	"mk/common.mk", line 11: Missing dependency operator
+	"mk/common.mk", line 14: Need an operator
+	"mk/common.mk", line 16: Need an operator
+	"mk/common.mk", line 17: Need an operator
+	"mk/common.mk", line 19: Missing dependency operator
+	Error expanding embedded variable.
+
+	The solution is to use a different version of make such as GNU's
+	gmake.  This utility should be available from the ports tree for
+	your OS.
+
+	Once you have gmake installed, run the following commands to
+	build and install the modified transmission client for use with
+	torrentflux-b4rt:
+
+	# Change into the dir containing the modified transmission client:
+	root@users /root# cd ~tfbdev/svn/tf-b4rt/trunk/clients/transmission/
+
+	# Set env variables for CXXFLAGS, LDFLAGS and CFLAGS.
+	# Note: this command should be all on one line, remove '\' below:
+	root@users /home/tfbdev/svn/tf-b4rt/trunk/clients/transmission# \
+    setenv CXXFLAGS -I/usr/local/include \
+    && setenv LDFLAGS "-L/usr/local/lib -lgnugetopt" \
+    && setenv CFLAGS -I/usr/local/include
+	root@users /home/tfbdev/svn/tf-b4rt/trunk/clients/transmission# \
+
+	# Finally, configure, build and install the client
+	# Again, note this is on one line, remove the '\':
+	root@users /home/tfbdev/svn/tf-b4rt/trunk/clients/transmission# \
+    ./configure && gmake && gmake install
+
+
+	See here for more background and info on the problem:
+
+    https://tf-b4rt.berlios.de/forum/index.php/topic,266.0.html

+ 156 - 0
FEATURES

@@ -0,0 +1,156 @@
+=======================================================================
+$Id: FEATURES 2861 2007-04-11 22:24:26Z munk $
+=======================================================================
+
+/**********************************************************************
+Torrentflux-b4rt FEATURES file
+**********************************************************************/
+
+	Some of the most popular features of torrentflux-b4rt are listed
+	below.
+
+	NOTE! This list is definitely NOT exhaustive, there are
+	a massive number of features that can be configured via the
+	torrentflux-b4rt admin panel!
+
+	o Supports multiple internet transfer protocols:
+
+		- BitTorrent - supported client(s) include:
+
+			* Original BitTorrent/Mainline - supports trackerless
+			  torrents and encryption
+
+			* BitTornado - uses slightly less resources than the original
+			  BT, allows file priority for downloading files selectively
+
+			* Transmission - much smaller memory footprint without much
+			  loss in functionality
+
+			* Azureus - control a number of transfers from a single
+			  control process, tighter control on total max bandwidth
+			  for all torrents
+
+		- HTTP/FTP - supported client(s) include:
+
+			* wget - standard lightweight file transfer utility on
+			  Linux, supported on many other platforms also
+
+		- Usenet - supported client(s) include:
+
+			* nzbperl - perl based application allowing multi-connection
+			  news server downloads from nzb files with functionality
+			  for bandwidth throttling.
+
+	o Unified transfer control:
+
+		- Perform stop/start/resume/kill/delete operations on individual
+		  transfers, all transfers or a selection of transfers
+
+        - Changes Settings of running transfers on the fly - down/up
+		  rates, what ratio to stop seeding at, how many connections to
+		  use at same time, ...
+
+	o Individual Transfers 'remember' their own settings, allowing you
+	  to use different settings for different Transfers.
+
+	o View torrent Transfer information:
+
+		- cumulative transfer statistics are available for every
+		  transfer in the transfer list. tfb keeps track of how much
+		  data has been transfered up and down even when you stop and
+		  restart the transfer.  This allows you to keep track of what
+		  your actual ratio is rather than what the current session
+		  ratio is.
+
+		- logfiles are maintained for each and every torrent, allowing
+		  you to easily see when problems occur with your transfers.
+
+		- current upload/download bandwidth rate as well as the number
+		  of seeds/peers and estimated time left is viewable in the
+		  transfer list
+
+	o Transfer statistics and logging:
+
+		- View detailed Transfer statistics and information, including:
+
+			* per transfer error logging for easy troubleshooting
+			* upload/download totals for each user, by day/month/year
+			* number of seeders/leechers for a torrent in a graphical
+			  display
+
+	o Uploading and injection of metafiles (.torrent, .wget, .nzb files):
+
+		- Upload single or multiple metafiles from your local machine
+		  to the web server
+
+		- Upload metafiles directly to your web server from another
+		  web server
+
+        - Multiple operations in "fluxcli.php" allow inject and more
+          from command-line (cron, etc.)
+          eg.: "inject", "watch", "rss"
+
+	o fluxcli.php - a complete command-line version of torrentflux-b4rt:
+
+		- As mentioned, can perform all the tasks available in the
+		  torrentflux-b4rt frontend but from the commandline.  Makes it
+		  ideal for running from a cron job.
+
+		- Schedule cron jobs to check RSS feeds on a regular basis and
+		  download them to a directory.
+
+		- Schedule cron jobs to watch folders for new torrent files and
+		  then autostart/inject them
+
+		- Check up on the status of transfers directly from a Unix shell
+
+	o Fluxd - background perl daemon to perform scheduled tasks:
+
+		- Qmgr module handles queueing of transfers with per-user and
+		  global limits.  Add transfers to the queue and Qmgr will
+		  automatically start one transfer after another finishes.
+
+		- automate fetching of torrent files from RSS feeds
+
+		- watch a list of directories for new upload of torrent files
+		  and automatically start those torrents running
+
+	o Integrated Filemanager:
+
+		- Support for a large number of additional third party
+		  utilities/functionality, including:
+
+			* archive file extraction from the browser (zip/rar)
+
+			* vlc streaming controllable from browser
+
+			* download of completed transfers directly from browser
+
+			* reading of .nfo files directly in the browser
+
+			* creation of torrent files directly in the browser
+
+	o AJAX updates for maximum info with minimal bandwidth:
+
+		- Display of transfer lists can be easily configured to use AJAX
+		  to update transfer stats in real time.  This saves on
+		  bandwidth since only the transfer list needs to be sent across
+		  the network, not the whole web page.
+
+		- Individual transfer windows can also use AJAX to update stats
+		  in real time.
+
+	o Templating engine:
+
+		- The torrentflux-b4rt GUI is template driven using the vLib
+		  template engine:
+
+			http://vlib.clausvb.de/vlibtemplate.php
+
+		  This allows developers to completely redesign the look and
+		  feel of torrentflux-b4rt without having to worry about the
+		  underlying PHP codebase.
+
+		- Torrentflux-b4rt also incorporates template caching to speed
+		  up the load time of pages.  This feature can be enabled via
+		  the Administration control panel.

+ 439 - 0
INSTALL

@@ -0,0 +1,439 @@
+=================================================================
+$Id: INSTALL 3364 2008-03-22 14:48:39Z b4rt $
+=================================================================
+
+torrentflux-b4rt 1.0
+
+http://tf-b4rt.berlios.de
+
+
+-----------------------------------------------------------------
+System Requirements
+-----------------------------------------------------------------
+
+-- A Linux or FreeBSD Box installed and working properly.
+   (tested on Debian, Ubuntu, Gentoo, RedHat, Fedora and others)
+
+-- Web Server.
+   * Apache (http://www.apache.org)
+   * LightTPD (http://www.lighttpd.net)
+
+-- SQL-Database. Supported are :
+   * MySQL (http://www.mysql.com)
+   * SQLite (http://www.sqlite.org)
+   * PostgreSQL (http://www.postgresql.org)
+
+-- PHP Version 4.3.x or higher with Session and PCRE support
+   enabled. Socket-Support is required for fluxd.
+   (http://www.php.net)
+
+-- Python 2.2 or higher for BitTornado and Python 2.4 or
+   higher for Mainline (BitTorrent).
+   (http://www.python.org)
+
+-- Azureus 2.5.0.0 or higher with the XML/HTTP Plugin to use
+   an Azureus-Server as Client within tfb (with fluazu).
+   * Azureus (http://azureus.sourceforge.net)
+   * XML/HTTP Plugin
+    (http://azureus.sourceforge.net/plugin_details.php?plugin=xml_http_if)
+
+-- Perl 5.6 or higher for fluxd, nzbperl, ttools.pl and
+   fluxpoller.pl (see Section "Perl-Modules" for Perl-Module-
+   Requirements)
+   (http://www.perl.org)
+
+-- Safe Mode must be turned off.
+   (php.ini : safe_mode = Off)
+   Allow Url fopen must be allowed.
+   (php.ini : allow_url_fopen = On)
+
+-- SELinux should be turned off.
+
+
+-----------------------------------------------------------------
+Installation with "setup.php"
+-----------------------------------------------------------------
+
+1. Untar the package into a directory then copy the contents of
+   the "html" folder into your web site directory.
+
+   tar -jxvf torrentflux-b4rt_1.0.tar.bz2
+
+2. ensure the files + dirs in the docroot are readable by your
+   webserver-user. if it is not the case one possible solution is
+   to "chown" the files to your webserver-user. this may be done
+   with a line like that :
+   chown -R username /path/to/torrentflux-docroot
+   change dir + username to fit your system so a possible line
+   would be :
+   chown -R www-data /var/www
+
+3. ensure the dir "inc/config/" in your torrentflux-docroot is
+   writable by your webserver-user.
+
+4. open "setup.php" in a browser and complete it step by step.
+
+5. IMPORTANT:  The first time you access the application, you
+   will be prompted for a user/password -- this is when you will
+   set the SUPER ADMIN user and password by what you enter.  For
+   example, you pull the site up for the first time, you will
+   be prompted to login and if you put in user: 'goombah' and
+   password: 'iama' then your super admin user account will
+   be set to goombah/iama.
+
+6. On your first login you should check all the admin settings
+   pages and configure your installation.
+
+
+-----------------------------------------------------------------
+Manual Installation
+-----------------------------------------------------------------
+
+1. Untar the package into a directory then copy the contents of
+   the "html" folder into your web site directory.
+
+   tar -jxvf torrentflux-b4rt_1.0.tar.bz2
+
+2. Create a database and import the sql-script.
+   You may specify the type of the database you want to use in
+   the file "config.db.php", but the examples here use MySQL.
+
+   mysqladmin create torrentflux
+
+   "mysql_torrentflux-b4rt-1.0.sql" contains the commands to
+   build the table structure and the default data. Import the
+   script into your torrentflux database.
+
+   mysql torrentflux < mysql_torrentflux-b4rt-1.0.sql
+   Or load the script with PHPMyAdmin (web interface)
+
+3. rename the file "config.db.php.dist" in the dir "inc/config/"
+   to "config.db.php" and set the database-settings to use your
+   torrentflux-database.
+   You may specify type of database you want to use, but
+   the examples here use MySQL.
+
+4. ensure the files + dirs in the docroot are readable by your
+   webserver-user. if it is not the case one possible solution is
+   to "chown" the files to your webserver-user. this may be done
+   with a line like that :
+   chown -R username /path/to/torrentflux-docroot
+   change dir + username to fit your system so a possible line
+   would be :
+   chown -R www-data /var/www
+
+5. Remove or rename the file /path/to/torrentflux-docroot/setup.php.
+   This is a security precaution to ensure a malicious user does not
+   attempt to overwrite your torrentflux-b4rt installation.  Your
+   installation will not run with setup.php in the document root folder.
+
+6. IMPORTANT:  The first time you access the application, you
+   will be prompted for a user/password -- this is when you will
+   set the SUPER ADMIN user and password by what you enter.  For
+   example, you pull the site up for the first time, you will
+   be prompted to login and if you put in user: 'goombah' and
+   password: 'iama' then your super admin user account will
+   be set to goombah/iama.
+
+7. On your first login you should check all the admin settings
+   pages and configure your installation.
+
+
+-----------------------------------------------------------------
+Upgrade from TorrentFlux 2.1-b4rt-9x with "upgrade.php"
+-----------------------------------------------------------------
+
+1. If QueueManager is running dequeue all torrents and stop it.
+
+2. Stop all transfers and delete them. (data-delete/reset/wipe is
+   not needed, delete is enough)
+
+3. Delete content of docroot of the v9x-installation. (recursive)
+
+4. Untar the package into a directory then copy the contents of
+   the "html" folder into your web site directory.
+
+   tar -jxvf torrentflux-b4rt_1.0.tar.bz2
+
+5. ensure the files + dirs in the docroot are readable by your
+   webserver-user. if it is not the case one possible solution is
+   to "chown" the files to your webserver-user. this may be done
+   with a line like that :
+   chown -R username /path/to/torrentflux-docroot
+   change dir + username to fit your system so a possible line
+   would be :
+   chown -R www-data /var/www
+
+6. ensure the dir "inc/config/" in your torrentflux-docroot is
+   writable by your webserver-user.
+
+7. copy the file "upgrade.php" from the dir "upgrade/v9x_1.0/" to
+   your torrentflux-docroot. choose the appropriate version.
+
+8. open the just copied file "upgrade.php" in a browser and
+   complete it step by step.
+
+9. Remove or rename the file /path/to/torrentflux-docroot/setup.php.
+   This is a security precaution to ensure a malicious user does not
+   attempt to overwrite your torrentflux-b4rt installation.  Your
+   installation will not run with setup.php in the document root folder.
+
+10. On your first login after the upgrade you should check all the
+    admin settings pages and configure your installation.
+
+
+-----------------------------------------------------------------
+Manual Upgrade from TorrentFlux 2.1-b4rt-98
+-----------------------------------------------------------------
+
+1. If QueueManager is running dequeue all torrents and stop it.
+
+2. Stop all transfers and delete them. (data-delete/reset/wipe is
+   not needed, delete is enough)
+
+3. Delete content of docroot of the v98-installation. (recursive)
+
+4. Untar the package into a directory then copy the contents of
+   the "html" folder into your web site directory.
+
+   tar -jxvf torrentflux-b4rt_1.0.tar.bz2
+
+5. Import the Update-sql-script.
+   You may specify the type of the database you want to use in
+   the file "config.db.php", but the examples here use MySQL.
+
+   "mysql_update_tf-b4rt-98.to.torrentflux-b4rt-1.0.sql"
+   contains the commands to update the table structure and the
+   default data.
+   Import the script into your torrentflux database.
+
+   mysql torrentflux < mysql_update_tf-b4rt-98.to.torrentflux-b4rt-1.0.sql
+   Or load the script with PHPMyAdmin (web interface)
+
+6. rename the file "config.db.php.dist" in the dir "inc/config/"
+   to "config.db.php" and set the database-settings to use your
+   torrentflux-database.
+   You may specify type of database you want to use, but
+   the examples here use MySQL.
+
+7. ensure the files + dirs in the docroot are readable by your
+   webserver-user. if it is not the case one possible solution is
+   to "chown" the files to your webserver-user. this may be done
+   with a line like that :
+   chown -R username /path/to/torrentflux-docroot
+   change dir + username to fit your system so a possible line
+   would be :
+   chown -R www-data /var/www
+
+8. rename the dir ".torrents" in your path to ".transfers".
+   on a default install this would be :
+   /usr/local/torrent/.torrents/
+    ->
+   /usr/local/torrent/.transfers/
+
+9. Remove or rename the file /path/to/torrentflux-docroot/setup.php.
+   This is a security precaution to ensure a malicious user does not
+   attempt to overwrite your torrentflux-b4rt installation.  Your
+   installation will not run with setup.php in the document root folder.
+
+10. On your first login after the upgrade you should check all the
+    admin settings pages and configure your installation.
+
+
+-----------------------------------------------------------------
+Upgrade from official TorrentFlux 2.1/2.2/2.3 with "upgrade.php"
+-----------------------------------------------------------------
+
+1. If QueueManager is running dequeue all torrents and stop it.
+
+2. Stop all transfers.
+
+3. Delete content of docroot of the 2.1/2.2/2.3 installation.
+  (recursive)
+
+4. Untar the package into a directory then copy the contents of
+   the "html" folder into your web site directory.
+
+   tar -jxvf torrentflux-b4rt_1.0.tar.bz2
+
+5. ensure the files + dirs in the docroot are readable by your
+   webserver-user. if it is not the case one possible solution is
+   to "chown" the files to your webserver-user. this may be done
+   with a line like that :
+   chown -R username /path/to/torrentflux-docroot
+   change dir + username to fit your system so a possible line
+   would be :
+   chown -R www-data /var/www
+
+6. ensure the dir "inc/config/" in your torrentflux-docroot is
+   writable by your webserver-user.
+
+7. copy the file "upgrade.php" from the dir "upgrade/TF21_1.0/" or
+   "upgrade/TF22_1.0/" or "upgrade/TF23_1.0/" to your docroot.
+   (choose the version of your current installation)
+
+8. open the just copied file "upgrade.php" in a browser and
+   complete it step by step.
+
+9. Remove or rename the file /path/to/torrentflux-docroot/setup.php.
+   This is a security precaution to ensure a malicious user does not
+   attempt to overwrite your torrentflux-b4rt installation.  Your
+   installation will not run with setup.php in the document root folder.
+
+10. On your first login after the upgrade you should check all the
+    admin settings pages and configure your installation.
+
+
+-----------------------------------------------------------------
+Manual Upgrade from official TorrentFlux 2.1/2.2/2.3
+-----------------------------------------------------------------
+
+1. If QueueManager is running dequeue all torrents and stop it.
+
+2. Stop all transfers.
+
+3. Delete content of docroot of the 2.1/2.2/2.3 installation.
+  (recursive)
+
+4. Untar the package into a directory then copy the contents of
+   the "html" folder into your web site directory.
+
+   tar -jxvf torrentflux-b4rt_1.0.tar.bz2
+
+5. Import the Update-sql-script.
+   You may specify the type of the database you want to use in
+   the file "config.db.php", but the examples here use MySQL for
+   update from TorrentFlux 2.3.
+
+   "mysql_update_torrentflux23.to.torrentflux-b4rt-1.0.sql"
+   contains the commands to update the table structure and the
+   default data.
+   Import the script into your torrentflux database.
+
+   mysql torrentflux < mysql_update_torrentflux23.to.torrentflux-b4rt-1.0.sql
+   Or load the script with PHPMyAdmin (web interface)
+
+6. rename the file "config.db.php.dist" in the dir "inc/config/"
+   to "config.db.php" and set the database-settings to use your
+   torrentflux-database.
+   You may specify type of database you want to use, but
+   the examples here use MySQL.
+
+7. ensure the files + dirs in the docroot are readable by your
+   webserver-user. if it is not the case one possible solution is
+   to "chown" the files to your webserver-user. this may be done
+   with a line like that :
+   chown -R username /path/to/torrentflux-docroot
+   change dir + username to fit your system so a possible line
+   would be :
+   chown -R www-data /var/www
+
+8. rename the dir ".torrents" in your path to ".transfers".
+   on a default install this would be :
+   /usr/local/torrent/.torrents/
+    ->
+   /usr/local/torrent/.transfers/
+
+9. rename stat-files and prio-files.
+   Note: it is also possible to just delete all transfers before upgrading and
+         then re-inject after upgrade. (then this point can be skipped)
+   change to your path-dir. (the dir just renamed in 8.) on a
+   default install this would be :
+   cd /usr/local/torrent/.transfers/
+   execute this commands (requires perl) in that dir :
+   ls -1 *.torrent | perl -e 'while(<STDIN>){chomp;$o=lc(((substr($_,0,(length($_))-7))))."stat";$n=$_.".stat";print $o."->".$n;print `mv $o $n;echo`;}'
+   ls -1 *.prio | perl -e 'while(<STDIN>){chomp;$o=$_;$n=((substr($_,0,(length($_))-4)))."torrent.prio";print $o."->".$n;print `mv $o $n;echo`;}'
+
+10. Remove or rename the file /path/to/torrentflux-docroot/setup.php.
+    This is a security precaution to ensure a malicious user does not
+    attempt to overwrite your torrentflux-b4rt installation.  Your
+    installation will not run with setup.php in the document root folder.
+
+11. On your first login after the upgrade you should check all the
+    admin settings pages and configure your installation.
+
+
+-----------------------------------------------------------------
+Additional / Optional
+-----------------------------------------------------------------
+
+-- PyCrypto (Python Cryptography Toolkit) is needed to run
+   BitTornado with encryption. (avail. since BitTornado 0.3.18)
+   PyCrypto is also required for Mainline (BitTorrent) Client.
+   PyCrypto can be found at :
+   http://www.amk.ca/python/code/crypto
+
+-- Twisted is needed to run Mainline (BitTorrent) Client.
+   It can be found at :
+   http://twistedmatrix.com
+   Twisted requires at least Zope Interface 3.0.1 or higher. It
+   can also work with ZopeX3 version 3.0.0c1 or higher.
+   Zope Interface is included in Twisted-source-tarball or can be
+   found at :
+   http://www.zope.org/Products/ZopeInterface
+
+-- UUDeview is needed for nzbperl.pl. UUDeview can be found at :
+   http://fpx.de/fp/Software/UUDeview/
+
+-- unrar (for Linux) and rarbsd (for FreeBSD) can be found at :
+   http://www.rarlab.com/rar/
+
+-- cksfv (needed for SFV check) can be found at :
+   http://zakalwe.virtuaalipalvelin.net/~shd/foss/cksfv/files/devel/
+
+-- vlc (needed for Streaming Feature) can be found at :
+   http://www.videolan.org/
+
+-- To compile Transmission on Mac OS X you need the Developer
+   Tools from Apple.
+
+-- To use wget on Mac OS X install :
+   http://www.statusq.org/images/wget.zip
+
+
+-----------------------------------------------------------------
+Perl-Modules
+-----------------------------------------------------------------
+
+-- fluxd.pl (part of fluxd)
+   * IO::Select       (perl -MCPAN -e "install IO::Select")
+   * IO::Socket::UNIX (perl -MCPAN -e "install IO::Socket::UNIX")
+   * IO::Socket::INET (perl -MCPAN -e "install IO::Socket::INET")
+   * POSIX            (perl -MCPAN -e "install POSIX")
+
+-- FluxDB.pm (part of fluxd) (only in dbi-mode)
+   * all database-types :
+     DBI              (perl -MCPAN -e "install Bundle::DBI")
+   * MySQL :
+     DBD::mysql       (perl -MCPAN -e "install DBD::mysql")
+   * SQLite :
+     DBD::SQLite      (perl -MCPAN -e "install DBD::SQLite")
+   * PostgreSQL :
+     DBD::Pg          (perl -MCPAN -e "install DBD::Pg")
+
+-- Fluxinet.pm (part of fluxd)
+   * IO::Select       (perl -MCPAN -e "install IO::Select")
+   * IO::Socket::INET (perl -MCPAN -e "install IO::Socket::INET")
+
+-- tfnzbperl.pl (usenet transfer-client)
+   Required :
+   * IO::File
+   * IO::Select
+   * IO::Socket::INET
+   * File::Basename
+   * Getopt::Long
+   * Cwd
+   * XML::Simple
+   * XML::DOM
+   Optional :
+   * threads
+   * Thread::Queue
+
+-- ttools.pl (metainfoclient)
+   * Digest::SHA1     (perl -MCPAN -e "install Digest::SHA1")
+   * LWP::UserAgent   (perl -MCPAN -e "install LWP::UserAgent")
+
+-----------------------------------------------------------------
+Known Issues
+-----------------------------------------------------------------
+Please check at: http://tf-b4rt.berlios.de

+ 430 - 0
README

@@ -0,0 +1,430 @@
+=======================================================================
+$Id: README 3235 2007-10-19 11:22:17Z munk $
+vim: set comments=fb\:o,fb\:#,fb\:-,fb\:*,fb\:A.,fb\:Q. et tw=72 sw=4:
+=======================================================================
+
+/**********************************************************************
+Torrentflux-b4rt 1.0 README file
+**********************************************************************/
+
+    Contents:
+    =========
+    1. Introduction
+    2. Features
+    3. Requirements
+        3.1 Minimum Requirements
+        3.2 Additional/Optional Feature Requirements
+        3.3 Fluxd Requirements
+    4. Installation
+    5. Support
+
+
+/**********************************************************************
+1. Introduction
+**********************************************************************/
+
+    Torrentflux-b4rt is a web based transfer control client.
+    Torrentflux-b4rt allows you to control your bittorrent transfers
+    from anywhere using a highly configurable web based front end.
+
+    Torrentflux-b4rt is very easy to install on a web server and includes
+    a simple setup script which can be accessed from a web browser.
+    Just upload the files to your web server, run the setup script and
+    your torrentflux-b4rt installation is ready to go.
+
+    Torrentflux-b4rt was originally based on the TorrentFlux BitTorrent
+    controller written by Qrome, although has recently undergone a major
+    rewrite to allow transparent integration with a number of transfer
+    clients and protocols:
+
+    Torrentflux-b4rt has builtin support for transfers via:
+
+        o BitTorrent - supported client(s) include:
+
+            * Original BitTorrent/Mainline - supports trackerless
+              torrents and encryption
+
+            * BitTornado - uses slightly less resources than the original
+              BT, allows file priority for downloading files selectively
+
+            * Transmission - much smaller memory footprint without much
+              loss in functionality
+
+            * Azureus - control a number of transfers from a single
+              control process, tighter control on total max bandwidth
+              for all torrents
+
+        o HTTP/FTP - supported client(s) include:
+
+            * wget - standard lightweight file transfer utility on
+              Linux, supported on many other platforms also
+
+        o Usenet - supported client(s) include:
+
+            * nzbperl - perl based application allowing multi-connection
+              news server downloads from nzb files with functionality
+              for bandwidth throttling.
+
+
+    Torrentflux-b4rt incorporates a huge number of advanced options to
+    allow you finer grained control of your transfers. The
+    administration panel in torrentflux-b4rt includes a large number of
+    options to allow you fine grained control over every aspect of your
+    bittorrent/transfers usage.
+
+    Torrentflux-b4rt is also a multi-user application, allowing you to
+    create individual accounts and assign restrictions to those accounts
+    as you wish.
+
+
+/**********************************************************************
+2. Features
+**********************************************************************/
+
+    Please see the FEATURES file for a comprehensive list of
+    Torrentflux-b4rt features.
+
+
+/**********************************************************************
+3. Requirements
+**********************************************************************/
+
+    Due to the extensible nature of torrentflux-b4rt, there are a
+    variety of 3rd party tools/utilities and extra functionality that
+    can be enabled from within the administration control panel.
+    Torrentflux-b4rt also features a service daemon called Fluxd which
+    can be used to schedule rss feed downloads, keep watch over running
+    torrents and restart them if they die.
+
+    The 3rd party utils/functionality and Fluxd all have their own
+    requirements.  For this reason the requirements section is split
+    into the following sections:
+
+        3.1 Minimum Requirements
+        3.2 Additional/Optional Feature Requirements
+        3.3 Fluxd Requirements
+
+    ALWAYS install the required items from the package installation
+    manager for your OS whenever possible.  This will save a lot of time
+    and headaches, and ensure all the dependencies are installed
+    correctly. See your OS documentation for details about the software
+    management systems available on your OS.
+
+
+/**********************************************************************
+3.1 Minimum Requirements
+**********************************************************************/
+
+    This section lists the minimum requirements required to operate
+    torrentflux-b4rt with the basic core functionality.  This list may
+    seem daunting, but most Unix like OSs have many of these features
+    available as part of their software distribution system.
+
+    /******************************************************************
+    Notes:
+    =====
+
+    - The following lists are NOT exhaustive - if you use a Unix like
+      OS/webserver/database/etc not listed below, basic functionality of
+      torrentflux-b4rt may well work.  Success reports installing
+      torrentflux-b4rt on other OSs other than those below above are
+      welcome on the forum:
+
+            http://tf-b4rt.berlios.de/forum/
+
+    - IT IS STRONGLY ADVISED THAT YOU INSTALL THE COMPONENTS LISTED
+      BELOW FROM THE SOFTWARE DISTRIBUTION SYSTEM INCLUDED WITH YOUR OS
+      (ie apt-get, rpm package management, FreeBSD ports system, etc).
+    ******************************************************************/
+
+
+    The core minimum requirements follow below:
+
+    o A Unix like OS (no Win32 support).
+      Current tested OSs include:
+
+        * Linux:
+            o Debian, Ubuntu, Gentoo, RedHat, Fedora, NSLU2,
+              ClarkConnect - amongst others
+
+            Note:
+            =====
+            SELinux should be turned off.
+
+        * BSD:
+            o FreeBSD, OpenBSD, NetBSD
+
+        * Apple:
+            o Mac OS X
+
+
+    o A Web Server.
+      Current tested webservers include:
+
+        * Apache
+            http://www.apache.org/
+
+        * LightTPD
+            http://www.lighttpd.net/
+
+
+    o An SQL-Database.
+      Currently Supported databases:
+
+        * MySQL
+            http://www.mysql.com/
+
+        * SQLite
+            http://www.sqlite.org/
+
+        * PostgreSQL
+            http://www.postgresql.org/
+
+
+    o PHP in 'web mode' (ie mod_php or php-cgi - preferably mod_php) >=
+      4.3.x built with database (MySQL, SQLite or PostgreSQL), session,
+      sockets and PCRE support enabled.  For image recognition/captcha
+	  support for logins, GD support must be enabled in PHP.
+
+    o PHP in 'commandline mode' (php-cli) >= 4.3.x built with database
+      (MySQL, SQLite or PostgreSQL), session, sockets and PCRE support
+      enabled.
+
+        http://www.php.net/
+
+        PHP Notes:
+        ==========
+        o Safe Mode must be turned off:
+            * php.ini: safe_mode = Off
+
+        o Allow Url fopen must be allowed:
+            * php.ini: allow_url_fopen = On
+
+        o open_basedir must be empty:
+            * php.ini: open_basedir = ''
+
+            Note: some web control panels like cPanel use
+            open_basedir to restrict users as to what they can do with
+            PHP.  torrentflux-b4rt does NOT officially support
+            installations on shared hosting platforms due to the
+            complications that arise (such as the use of open_basedir
+            amongst many other things).  However if you do choose to
+            attempt to install torrentflux-b4rt on a shared host running
+            a control panel such as cPanel, be aware of the problems
+            open_basedir can cause.
+
+            Please see:
+
+            http://php.net/manual/en/features.safe-mode.php#ini.open-basedir
+
+            for more info.
+
+        o As mentioned above, both the PHP web module and the
+          commandline / cli binary must be installed, both with support
+          for the database you plan to use and with support for PCRE and
+          sessions.  Check the package management system
+          documentation/website for your OS for more info on installing
+          PHP with support for web and cli.
+
+
+    o Python is required for the BitTorrent, BitTornado and Azureus
+      bittorrent clients:
+
+        * Python >= 2.2 for BitTornado
+        * Python >= 2.4 for BitTorrent Mainline (original 'BitTorrent client)
+        * Python >= 2.2.1 for Azureus
+
+
+    o Perl >= 5.6 or higher for fluxd, nzbperl.pl, ttools.pl and
+      fluxpoller.pl.  See section 3.2 below for extra info.
+
+
+    Once more(!), it is highly advisable to install these requirements
+    using the recommended package management system for your OS.
+
+
+/**********************************************************************
+3.2 Additional/Optional Feature Requirements
+**********************************************************************/
+
+    The following is a list of additional requirements needed before
+    enabling additional features and options in torrentflux-b4rt.  By
+    default these options or features are disabled and can be enabled
+    via the administration panel - please ensure the requirements are
+    installed before enabling additional features.
+
+    As mentioned above, it is strongly advised that these requirements
+    are installed from your OS's software distribution system to ensure
+    all dependencies are installed correctly.
+
+
+    Additional/optional feature requirements follow below:
+
+    o BitTorrent Mainline Client requirements:
+
+        * PyCrypto (Python Cryptography Toolkit)
+            http://sourceforge.net/projects/pycrypto/
+
+        * Twisted Python libs
+            http://twistedmatrix.com/trac/
+
+            Note:
+            =====
+            Twisted also requires Zope Interface >=3.0.1
+                http://www.zope.org/Products/ZopeInterface/
+
+            see Twisted installation docs for more info.
+
+
+    o Azureus Client requirements:
+
+        * Azureus 2.5.0.0 or higher with the XML/HTTP Plugin
+            http://azureus.sourceforge.net
+            http://azureus.sourceforge.net/plugin_details.php?plugin=xml_http_if
+
+        see azureus installation doc (doc/azureus.txt) for more info.
+
+
+    o wget transfer functionality requirements:
+        * wget
+            http://www.gnu.org/software/wget/
+
+
+    o nzbperl.pl requirements:
+
+        * UUDeview
+            http://fpx.de/fp/Software/UUDeview/
+
+        The following perl modules are also required for nzbperl:
+
+        * IO::File
+        * IO::Select
+        * IO::Socket::INET
+        * File::Basename
+        * Getopt::Long
+        * Cwd
+        * XML::Simple
+        * XML::DOM
+
+        Optional nzbperl perl modules:
+
+        * Threads
+        * Thread::Queue
+
+
+    o Archive (rar/zip) extraction functionality requirements:
+
+        * unrar
+            http://www.rarlab.com/rar/
+
+
+    o SFV file integrity checking functionality requirements:
+
+        * cksfv
+            http://zakalwe.fi/~shd/foss/cksfv/
+
+
+    o vlc streaming functionality requirements:
+        * vlc
+            http://www.videolan.org/
+
+
+    o ttools.pl is an alternative BitTorrent metadata client that
+      supports metadata scraping.  It can be enabled in the
+      Administration panel.  It requires the following modules:
+
+        * Digest::SHA1     (perl -MCPAN -e "install Digest::SHA1")
+        * LWP::UserAgent   (perl -MCPAN -e "install LWP::UserAgent")
+
+    o Mac OS X specific:
+
+        o Compilation of Transmission on Mac OS X requirements:
+            * Apple Developer Tools
+                http://developer.apple.com/tools/
+
+        o wget functionality on Mac OS X requirements:
+            * wget
+                http://www.statusq.org/images/wget.zip
+
+
+/**********************************************************************
+3.3 Fluxd Requirements
+**********************************************************************/
+
+    The fluxd daemon works as part of torrentflux-b4rt to run in the
+    background on your server performing various tasks that you
+    schedule.  Fluxd is started from the Administration control panel -
+    by default it is not running.
+
+    Fluxd is written in the Perl scripting language and requires a
+    number of perl modules to run.  In the following list, the perl
+    command is given for you to install the corresponding module from
+    CPAN.  HOWEVER, most of these modules should be available from your
+    OS's package management system and it is strongly advised that you
+    install the modules from there rather than directly using the perl
+    CPAN system.
+
+    The required perl modules are as follows:
+
+    o Core fluxd functionality:
+
+        * IO::Select       (perl -MCPAN -e "install IO::Select")
+        * IO::Socket::UNIX (perl -MCPAN -e "install IO::Socket::UNIX")
+        * IO::Socket::INET (perl -MCPAN -e "install IO::Socket::INET")
+        * POSIX            (perl -MCPAN -e "install POSIX")
+
+    IMPORTANT:
+    ==========
+    Additionally, Fluxd uses a PHP based helper script called
+    fluxcli.php and for this to run correctly the commandline (cli)
+    version of PHP must be installed with the correct extensions
+    (database, PCRE, socket and session support).  See the notes above
+    in section 3.1 for more information.
+
+
+    o (Optional) Fluxd database connectivity using Perl DBI - Perl DBI
+      db connectivity is not required to run Fluxd.  The option exists
+      to enable perl DBI however in the Admin panel.  The following
+      modules are required to run Fluxd in Perl DBI mode:
+
+       * all database-types:
+         DBI              (perl -MCPAN -e "install Bundle::DBI")
+
+       * MySQL:
+         DBD::mysql       (perl -MCPAN -e "install DBD::mysql")
+
+       * SQLite:
+         DBD::SQLite      (perl -MCPAN -e "install DBD::SQLite")
+
+       * PostgreSQL:
+         DBD::Pg          (perl -MCPAN -e "install DBD::Pg")
+
+
+    o (Optional) Fluxinet functionality requirements - Fluxinet allows
+      remote connections directly to the Fluxd daemon over TCP.  It
+      is not enabled by default:
+
+       * IO::Select       (perl -MCPAN -e "install IO::Select")
+
+       * IO::Socket::INET (perl -MCPAN -e "install IO::Socket::INET")
+
+
+/**********************************************************************
+4. Installation
+**********************************************************************/
+
+    Torrentflux-b4rt can be installed quickly from a web browser using
+    the included setup script. For full details of how to install
+    torrentflux-b4rt, please see the INSTALL file.
+
+    It is also possible to Upgrade an existing installation of 2.1-b4rt
+    or official TF (2.1 - 2.3) to torrentflux-b4rt. Details about
+    upgrades can be found in the INSTALL file.
+
+
+/**********************************************************************
+5. Support
+**********************************************************************/
+
+    Please see the SUPPORT file for information on getting help with
+    your Torrentflux-b4rt installation.

+ 23 - 0
README.md

@@ -0,0 +1,23 @@
+
+# Torrentflux-b4rt (PHP 7)
+This whole repository is basically clone of the last SVN revision found 
+for torrentflux-b4rt taken from http://sourceforge.net/projects/tf-b4rt.berlios/.
+
+All credits go to the original devs
+
+## What do you mean with PHP 7
+I use torrentflux-b4rt for a long time now but as my server distro went to PHP 7 
+it stopped working. So what you see here is a version that works again in PHP 7.
+
+## Changes
+* Updated adodb
+* Added mysqli as supported db
+* Fixes for functions that were removed in PHP 7
+
+## Warning
+Everything i changed was in the html folder which is actually what i had on my server.
+
+I did *NOT* do any resetup with this package! Most likely it works but i really did not try.
+
+If you only want to upgrade your existing installation just copy the contents of the html
+folder except your db config in /html/inc/config/config.db.php

+ 105 - 0
SUPPORT

@@ -0,0 +1,105 @@
+=======================================================================
+$Id: SUPPORT 2861 2007-04-11 22:24:26Z munk $
+=======================================================================
+
+/**********************************************************************
+Torrentflux-b4rt SUPPORT file
+**********************************************************************/
+
+	For support with your torrentflux-b4rt installation please see the
+	torrentflux-b4rt forum:
+
+		http://tf-b4rt.berlios.de/forum/
+
+	Please take the time to search the forum first for a solution to your
+	problem.  If you still can't find the solution after searching, please
+	leave a clear and concise subject/message:
+
+	1.	Ensure the subject of your message takes the format:
+
+			[torrentflux-b4rt-VersionXXX] Description
+
+		where:
+
+			VersionXXX is the version of torrentflux-b4rt you are
+			running
+			- ie 1.0-alpha7.
+
+			and
+
+			Description gives a concise description of your problem.
+			A message with a subject of 'HELP!!!' is NOT a good subject
+			and is likely to be ignored.
+
+		For example, a good subject for a message would be something
+		like:
+
+		[torrentflux-b4rt-alpha7] Transfers fail to start when template
+		caching enabled
+
+
+	2.	Ensure the content of your message describes:
+
+		a.	Your working environment if relevant
+
+			- ie OS, PHP/DB/Python/etc
+
+			(if in doubt, always include this info!)
+
+		b.	A clear and concise description of what the problem is, what
+			you did to see the problem and any additional info that you
+			think might be relevant to helping us help you fix the
+			problem.
+
+
+			IMPORTANT:
+			==========
+
+			Most useful of all are the torrentflux-b4rt logfiles for
+			troubleshooting.  Always check these logfiles first and
+			ALWAYS post the relevant info from them when posting on the
+			forum!:
+
+			- transfer logfiles - click on the logfile icon in the
+			  transfer list to view the transfer logfiles
+
+			- fluxd logfiles - the fluxd logs are located in the
+			  Administration section for fluxd, one for fluxd general
+			  messages and one for fluxd errors.  Check both of these
+			  when troubleshooting fluxd problems.
+
+
+		c.	What steps can be taken to reproduce the problem - this is
+			useful to help others test your problem out.
+
+		d.	A suggested fix for the problem if you think this is a bug -
+			these kind of posts are always welcome!!!
+
+			Please be very careful however before labelling something a
+			'bug' before you are absolutely positive it is!
+
+
+	3.	This is probably one of the most obvious important factors when
+		asking a question on a technical forum but is surprisingly
+		overlooked very often:
+
+		MAKE SURE YOU ACTUALLY ASK A QUESTION!
+
+		It's surprising how often people rock up on a forum with the
+		intention of getting help, but they don't actually ask a
+		question and only provide a load of information that leaves
+		potential helpers thinking 'And.... the question is...?'!
+
+		Also make sure you ask a question that doesn't just require a
+		yes/no answer.  Before you actually submit the post, use the
+		'Preview' feature to read over your message and check that any
+		questions you ask won't be simple 'yes/no' answers (unless of
+		course that's all you want!).
+
+
+	Finally, PLEASE PLEASE PLEASE take the time to read the following
+	document if you plan on asking a question on the forum. This is
+	recommended reading for all levels of users who wish to get help on
+	a technical forum:
+
+		http://catb.org/~esr/faqs/smart-questions.html

+ 3 - 0
TODO

@@ -0,0 +1,3 @@
+================================================================================
+$Id: TODO 3363 2008-03-21 20:52:56Z b4rt $
+================================================================================

+ 58 - 0
addons/mrtg/flux-mrtg-update.sh

@@ -0,0 +1,58 @@
+#!/bin/sh
+################################################################################
+# $Id: flux-mrtg-update.sh 2054 2006-12-30 16:57:55Z b4rt $
+# $Revision: 2054 $
+# $Date: 2006-12-30 10:57:55 -0600 (Sat, 30 Dec 2006) $
+################################################################################
+#                                                                              #
+# LICENSE                                                                      #
+#                                                                              #
+# This program is free software; you can redistribute it and/or                #
+# modify it under the terms of the GNU General Public License (GPL)            #
+# as published by the Free Software Foundation; either version 2               #
+# of the License, or (at your option) any later version.                       #
+#                                                                              #
+# This program is distributed in the hope that it will be useful,              #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of               #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the                 #
+# GNU General Public License for more details.                                 #
+#                                                                              #
+# To read the license please visit http://www.gnu.org/copyleft/gpl.html        #
+#                                                                              #
+#                                                                              #
+################################################################################
+
+# defaults
+FLUXPATH="/usr/local/torrentflux"
+CONFFILE="/etc/mrtg/mrtg.flux.cfg"
+BIN_MRTG="/usr/bin/mrtg"
+DEFAULT_CONFFILE="/etc/mrtg/flux-mrtg.conf"
+
+# load conf-file
+if [ "$1X" != "X" ] ; then
+  if [ -e "$1" ] ; then
+    . $1
+  fi
+else
+  if [ -e "$DEFAULT_CONFFILE" ] ; then
+    . $DEFAULT_CONFFILE
+  fi
+fi
+
+# check for mrtg-bin
+if [ ! -x "$BIN_MRTG" ] ; then
+  BIN_MRTG=`whereis mrtg | awk '{print $2}'`
+  if [ ! -x "$BIN_MRTG" ] ; then
+    echo "error: cant find mrtg"
+    exit
+  fi
+fi
+
+# check for mrtg-directory, create if missing.
+if [ ! -d "$FLUXPATH/.mrtg" ] ; then
+  mkdir -p $FLUXPATH/.mrtg
+fi
+
+# invoke mrtg for flux
+$BIN_MRTG $CONFFILE | tee -a $FLUXPATH/.mrtg/mrtg.log
+

+ 17 - 0
addons/mrtg/flux-mrtg.conf

@@ -0,0 +1,17 @@
+# $Id: flux-mrtg.conf 2876 2007-04-12 11:30:34Z munk $
+
+# set all lines after "CHANGEME" (get a list with: "grep -A 1 CHANGEME this")
+
+# flux-path (aka "Path" in flux-settings)
+# CHANGEME
+FLUXPATH="/usr/local/torrentflux"
+
+# path to mrtg-conf-file.
+# wherever this is, ensure directory is writable for the user running this
+# script (-> user running mrtg) (mrtg creates a lockfile at its conf-file(s))
+# CHANGEME
+CONFFILE="/etc/mrtg/mrtg.flux.cfg"
+
+# where is mrtg
+# CHANGEME
+BIN_MRTG="/usr/bin/mrtg"

+ 456 - 0
addons/mrtg/fluxpoller.pl

@@ -0,0 +1,456 @@
+#!/usr/bin/perl
+################################################################################
+# $Id: fluxpoller.pl 2651 2007-03-19 17:26:32Z b4rt $
+# $Revision: 2651 $
+# $Date: 2007-03-19 12:26:32 -0500 (Mon, 19 Mar 2007) $
+################################################################################
+#                                                                              #
+# LICENSE                                                                      #
+#                                                                              #
+# This program is free software; you can redistribute it and/or                #
+# modify it under the terms of the GNU General Public License (GPL)            #
+# as published by the Free Software Foundation; either version 2               #
+# of the License, or (at your option) any later version.                       #
+#                                                                              #
+# This program is distributed in the hope that it will be useful,              #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of               #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the                 #
+# GNU General Public License for more details.                                 #
+#                                                                              #
+# To read the license please visit http://www.gnu.org/copyleft/gpl.html        #
+#                                                                              #
+#                                                                              #
+################################################################################
+use strict;
+################################################################################
+
+# load-average multiplier
+# CHANGEME
+my $AVGmultiplier = "100";
+
+# stat-file-dir (".transfers" in tf-b4rt and ".torrents" in TF 2.1 / 2.1-b4rt)
+my $STATFILEDIR=".transfers";
+
+# webserver-user
+# (only used on bsd)
+my $WEBUSER = "www";
+
+# define socket-bins. default : qw( python transmissionc wget )
+# (only used on bsd)
+my @BINS_SOCKET = qw( python transmissionc wget java );
+
+# should we try to find needed binaries ? (using "whereis" + "awk")
+# use 1 to activate, else "constants" are used (the faster + safer way)
+my $autoFindBinaries = 0;
+
+# Internal Vars
+my ($REVISION, $DIR, $PROG, $EXTENSION, $USAGE, $OSTYPE);
+
+# bin Vars
+my ($BIN_CAT, $BIN_HEAD, $BIN_TAIL, $BIN_NETSTAT, $BIN_SOCKSTAT, $BIN_GREP, $BIN_AWK);
+
+# check env
+checkEnv();
+
+# define common binaries
+$BIN_CAT = "/bin/cat";
+$BIN_HEAD = "/usr/bin/head";
+$BIN_TAIL = "/usr/bin/tail";
+$BIN_AWK = "/usr/bin/awk";
+if ($OSTYPE == 1) { # linux
+	$BIN_GREP = "/bin/grep";
+	$BIN_NETSTAT = "/bin/netstat";
+} elsif ($OSTYPE == 2) { # bsd
+	$BIN_GREP = "/usr/bin/grep";
+	$BIN_SOCKSTAT = "/usr/bin/sockstat";
+}
+
+#-------------------------------------------------------------------------------
+# Main
+#-------------------------------------------------------------------------------
+
+# find binaries
+if ($autoFindBinaries != 0) { findBinaries() };
+
+# init some vars
+$REVISION =
+	do { my @r = (q$Revision: 2651 $ =~ /\d+/g); sprintf "%d"."%02d" x $#r, @r };
+($DIR=$0) =~ s/([^\/\\]*)$//;
+($PROG=$1) =~ s/\.([^\.]*)$//;
+$EXTENSION=$1;
+
+# main-"switch"
+SWITCH: {
+	$_ = shift @ARGV;
+	/^traffic/ && do { # --- traffic ---
+		printTraffic(shift @ARGV, shift @ARGV);
+		exit;
+	};
+	/^connections/ && do { # --- connections ---
+		printConnections(shift @ARGV);
+		exit;
+	};
+	/^loadavg/ && do { # --- LOAD AVG ---
+		printLoadAVG(shift @ARGV);
+		exit;
+	};
+	/.*(version|-v).*/ && do { # --- version ---
+		printVersion();
+		exit;
+	};
+	/.*(help|-h).*/ && do { # --- help ---
+		printUsage();
+		exit;
+	};
+	printUsage();
+	exit;
+}
+
+#===============================================================================
+# Subs
+#===============================================================================
+
+#------------------------------------------------------------------------------#
+# Sub: printTraffic                                                            #
+# Parameters: string with path of flux-dir                                     #
+#             string with wanted output-format (mrtg|cacti)                    #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub printTraffic {
+	my $fluxDir = shift;
+	if (!(defined $fluxDir)) {
+		printUsage();
+		exit;
+	}
+	$fluxDir .= "/".$STATFILEDIR;
+	my $outputFormat = shift;
+	if ($outputFormat eq "mrtg") {
+		mrtgPrintTraffic($fluxDir);
+	} elsif ($outputFormat eq "cacti") {
+		cactiPrintTraffic($fluxDir);
+	} else {
+		# get traffic-vals
+		my @traffic = fluxTraffic($fluxDir);
+		# print traffic-vals
+		print $traffic[0]." ".$traffic[1]."\n";
+	}
+}
+
+#------------------------------------------------------------------------------#
+# Sub: mrtgPrintTraffic                                                        #
+# Parameters: string with path of flux-".stat-files"-dir                       #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub mrtgPrintTraffic {
+	my $fluxDir = shift;
+	# get traffic-vals
+	my @traffic = fluxTraffic($fluxDir);
+	# print down-speed for mrtg
+	print $traffic[0];
+	print "\n";
+	# print up-speed for mrtg
+	print $traffic[1];
+	print "\n";
+	# print uptime for mrtg
+	mrtgPrintUptime();
+	# print target-name for mrtg
+	mrtgPrintTargetname();
+}
+
+#------------------------------------------------------------------------------#
+# Sub: cactiPrintTraffic                                                       #
+# Parameters: string with path of flux-".stat-files"-dir                       #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub cactiPrintTraffic {
+	my $fluxDir = shift;
+	# get traffic-vals
+	my @traffic = fluxTraffic($fluxDir);
+	# print traffic for cacti
+	my $trafficLine = "";
+	$trafficLine .= "bandwidth_in:";
+	$trafficLine .= $traffic[0];
+	$trafficLine .= " ";
+	$trafficLine .= "bandwidth_out:";
+	$trafficLine .= $traffic[1];
+	print $trafficLine;
+}
+
+#------------------------------------------------------------------------------#
+# Sub: printConnections                                                        #
+# Parameters: string with wanted output-format (mrtg|cacti)                    #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub printConnections {
+	my $outputFormat = shift;
+	if ($outputFormat eq "mrtg") {
+		mrtgPrintConnections();
+	} elsif ($outputFormat eq "cacti") {
+		cactiPrintConnections();
+	} else {
+		print fluxConnections();
+		print "\n";
+	}
+}
+
+#------------------------------------------------------------------------------#
+# Sub: mrtgPrintConnections                                                    #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub mrtgPrintConnections {
+	# print down-"speed" for mrtg
+	print fluxConnections();
+	print "\n";
+	# print up-"speed" for mrtg
+	print "0";
+	print "\n";
+	# print uptime for mrtg
+	mrtgPrintUptime();
+	# print target-name for mrtg
+	mrtgPrintTargetname();
+}
+
+#------------------------------------------------------------------------------#
+# Sub: cactiPrintConnections                                                   #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub cactiPrintConnections {
+	# print connections for cacti
+	print fluxConnections();
+}
+
+#------------------------------------------------------------------------------#
+# Sub: printLoadAVG                                                            #
+# Parameters: string with wanted output-format (mrtg|cacti)                    #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub printLoadAVG {
+	my $outputFormat = shift;
+	if ($outputFormat eq "mrtg") {
+		mrtgPrintLoadAVG();
+	} elsif ($outputFormat eq "cacti") {
+		cactiPrintLoadAVG();
+	} else {
+		print LoadAVG();
+		#print "\n";
+	}
+}
+
+#------------------------------------------------------------------------------#
+# Sub: mrtgPrintLoadAVG                                                        #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub mrtgPrintLoadAVG {
+	# print Load AVG. for mrtg
+	LoadAVG();
+	# print uptime for mrtg
+	mrtgPrintUptime();
+	# print target-name for mrtg
+	mrtgPrintTargetname();
+}
+
+#------------------------------------------------------------------------------#
+# Sub: cactiPrintLoadAVG                                                       #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub cactiPrintLoadAVG {
+	# print Load AVG. for cacti
+	LoadAVG();
+}
+
+#------------------------------------------------------------------------------#
+# Sub: LoadAVG                                                                 #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub LoadAVG {
+	# vars
+	my ($AVG1min, $AVG5min, $AVG15min);
+	#generate LOAD AVG.
+	if ($OSTYPE == 1) { # linux
+		my $loadAVG = `cat /proc/loadavg`;
+		($AVG1min, $AVG5min, $AVG15min) = split /\s/, $loadAVG, 3;
+	} elsif ($OSTYPE == 2) { # bsd
+		my $loadAVG = `uptime`;
+		($AVG1min, $AVG5min, $AVG15min) = $loadAVG=~/.*load averages: (\S+), (\S+), (\S+)/;
+	}
+	#1m AVG.
+	print ($AVG1min * $AVGmultiplier);
+	print "\n";
+	#5m AVG.
+	print ($AVG5min * $AVGmultiplier);
+	print "\n";
+}
+
+#------------------------------------------------------------------------------#
+# Sub: mrtgPrintUptime                                                         #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub mrtgPrintUptime {
+	# uptime data for mrtg
+	my $uptime = `uptime`;
+    $uptime =~ /up (.*?), (.*?), /;
+    print "$1, $2\n";
+}
+
+#------------------------------------------------------------------------------#
+# Sub: mrtgPrintTargetname                                                     #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub mrtgPrintTargetname {
+	# target-name for mrtg
+	my $targetname = `hostname`;
+	print $targetname;
+}
+
+#------------------------------------------------------------------------------#
+# Sub: fluxTraffic                                                             #
+# Parameters:	string with path of flux-".stat-files"-dir                     #
+# Return: array with current down-traffic ([0]) and up-traffic ([1])           #
+#------------------------------------------------------------------------------#
+sub fluxTraffic {
+	my $fluxDir = shift;
+	# init speed-sum-vars
+	my $downspeed = 0.0;
+	my $upspeed = 0.0;
+	# process stat-files
+	opendir(DIR, $fluxDir);
+	my @files = map { $_->[1] } # extract pathnames
+	map { [ $_, "$fluxDir/$_" ] } # full paths
+	grep { !/^\./ } # no dot-files
+	grep { /.*\.stat$/ } # only .stat-files
+	readdir(DIR);
+	closedir(DIR);
+	foreach my $statFile (@files) {
+		if (-f $statFile) {
+			my ($down, $up) = split(/\n/, `$BIN_CAT $statFile | $BIN_HEAD -n 5 | $BIN_TAIL -n 2`, 2);
+			if ($down != "") {
+				$down =~ s/(.*\d)(\s.*)/$1/;
+				chomp $down;
+				$downspeed += $down;
+			}
+			if ($up != "") {
+				$up =~ s/(.*\d)(\s.*)/$1/;
+				chomp $up;
+				$upspeed += $up;
+			}
+		}
+	}
+	my @retVal;
+	$retVal[0] = ($downspeed<<10);
+	$retVal[1] = ($upspeed<<10);
+	return @retVal;
+}
+
+#------------------------------------------------------------------------------#
+# Sub: fluxConnections                                                         #
+# Parameters: null                                                             #
+# Return: int with current flux-tcp-connections (python + transmission)        #
+#------------------------------------------------------------------------------#
+sub fluxConnections {
+	my $cons = 0;
+	my $cons_temp = 0;
+	if ($OSTYPE == 1) { # linux
+		$cons_temp = `$BIN_NETSTAT -e -p --tcp -n 2> /dev/null | $BIN_GREP -v root | $BIN_GREP -v 127.0.0.1 | $BIN_GREP -cE '.*(python|transmissionc|wget).*'`;
+		chomp $cons_temp;
+		$cons = int $cons_temp;
+	} elsif ($OSTYPE == 2) { # bsd
+		foreach my $bin_socket (@BINS_SOCKET) {
+			$cons_temp = `$BIN_SOCKSTAT | $BIN_GREP -cE $WEBUSER.+$bin_socket.+tcp`;
+			chomp $cons_temp;
+			$cons += $cons_temp;
+		}
+	}
+	return $cons;
+}
+
+#------------------------------------------------------------------------------#
+# Sub: findBinaries                                                            #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub findBinaries {
+	$BIN_CAT = `whereis cat | awk '{print \$2}'`; chomp $BIN_CAT;
+	$BIN_HEAD = `whereis head | awk '{print \$2}'`; chomp $BIN_HEAD;
+	$BIN_TAIL = `whereis tail | awk '{print \$2}'`; chomp $BIN_TAIL;
+	$BIN_NETSTAT = `whereis netstat | awk '{print \$2}'`; chomp $BIN_NETSTAT;
+	$BIN_SOCKSTAT = `whereis sockstat | awk '{print \$2}'`; chomp $BIN_SOCKSTAT;
+	$BIN_GREP = `whereis grep | awk '{print \$2}'`; chomp $BIN_GREP;
+	$BIN_AWK = `whereis awk | awk '{print \$2}'`; chomp $BIN_AWK;
+}
+
+#------------------------------------------------------------------------------#
+# Sub: checkEnv                                                                #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub checkEnv {
+	## win32 not supported ;)
+	if ("$^O" =~ /win32/i) {
+		print "\r\nWin32 not supported.\r\n";
+		exit;
+	} elsif ("$^O" =~ /linux/i) {
+		$OSTYPE = 1;
+		return;
+	} elsif ("$^O" =~ /bsd$/i) {
+		$OSTYPE = 2;
+		return;
+	}
+}
+
+#------------------------------------------------------------------------------#
+# Sub: printVersion                                                            #
+# Arguments: Null                                                              #
+# Returns: Version Information                                                 #
+#------------------------------------------------------------------------------#
+sub printVersion {
+	print $PROG.".".$EXTENSION." Version ".$REVISION."\n";
+}
+
+#------------------------------------------------------------------------------#
+# Sub: printUsage                                                              #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub printUsage {
+	print <<"USAGE";
+$PROG.$EXTENSION (Revision $REVISION)
+
+Usage: $PROG.$EXTENSION type [extra-args]
+
+types:
+<traffic>     : print current flux-traffic.
+                extra-args : 1. flux-dir (aka "Path" inside flux-admin)
+                             2. (optional) output-format (mrtg|cacti)
+
+<connections> : print current flux-tcp-connections.
+                extra-args : 1. (optional) output-format (mrtg|cacti)
+
+<loadavg>     : print current load-average.
+                extra-args : 1. (optional) output-format (mrtg|cacti)
+
+Examples:
+
+$PROG.$EXTENSION traffic /usr/local/torrentflux
+$PROG.$EXTENSION traffic /usr/local/torrentflux mrtg
+$PROG.$EXTENSION traffic /usr/local/torrentflux cacti
+
+$PROG.$EXTENSION connections
+$PROG.$EXTENSION connections mrtg
+$PROG.$EXTENSION connections cacti
+
+$PROG.$EXTENSION loadavg
+$PROG.$EXTENSION loadavg mrtg
+$PROG.$EXTENSION loadavg cacti
+
+USAGE
+
+}
+
+# EOF

+ 129 - 0
addons/mrtg/mrtg.flux.cfg

@@ -0,0 +1,129 @@
+# $Id: mrtg.flux.cfg 2488 2007-02-03 17:26:52Z b4rt $
+
+
+# set all lines after "CHANGEME" (get a list with : "grep -A 1 CHANGEME this")
+
+# ------------------------------------------------------------------------------
+# paths
+# ------------------------------------------------------------------------------
+
+# you _must_ set all 3 directory-paths. watch for permissions (!).
+
+# CHANGEME
+Logdir: /usr/local/torrentflux/.mrtg/
+# CHANGEME
+HtmlDir: /usr/local/torrentflux/.mrtg/
+# CHANGEME
+Imagedir: /usr/local/torrentflux/.mrtg/
+
+
+# ------------------------------------------------------------------------------
+# defaults
+# ------------------------------------------------------------------------------
+
+# generic "output-styling"
+Colours[_]: GREEN#00eb0c,BLUE#1000ff,DARK GREEN#006600,VIOLET#ff00ff
+Background[_]: #a0a0a0a
+
+# _dont_ change this when using inside tfb
+PageTop[_]: _CONTENT_BEGIN_
+PageFoot[_]: _CONTENT_END_
+Extension[_]: inc
+
+# ------------------------------------------------------------------------------
+# target : traffic
+# ------------------------------------------------------------------------------
+
+# invoke fluxpoller.pl to gather data for this target
+# CHANGEME
+Target[traffic]: `/usr/local/bin/fluxpoller.pl traffic /usr/local/torrentflux mrtg`
+
+# flux line speed
+# CHANGEME
+MaxBytes[traffic]: 1000000
+# 1024 bytes are 1 kilo
+kilo[traffic]: 1024
+
+# title
+Title[traffic]: traffic
+# title in the graph
+PNGTitle[traffic]: traffic
+
+# options. you _must_ use gauge when using with fluxpoller.pl !)
+Options[traffic]: gauge,growright,nobanner,nopercent,pngdate,transparent
+
+# peaks
+WithPeak[traffic]: ym
+
+
+# ------------------------------------------------------------------------------
+# target : connections
+# ------------------------------------------------------------------------------
+
+# invoke fluxpoller.pl to gather data for this target
+# CHANGEME
+Target[connections]: `/usr/local/bin/fluxpoller.pl connections mrtg`
+
+# flux max connections
+# CHANGEME
+MaxBytes[connections]: 500
+# 1000 cons are 1 kilo ;)
+kilo[connections]: 1000
+
+# title
+Title[connections]: connections
+# title in the graph
+PNGTitle[connections]: connections
+
+# options. you _must_ use "gauge" when using with fluxpoller.pl !
+# "noo" is required in connections-case with fluxpoller.pl
+# "integer" should be used
+Options[connections]: gauge,noo,integer,growright,nobanner,nopercent,pngdate,transparent
+
+# peaks
+WithPeak[connections]: ym
+
+# legend
+YLegend[connections]: Connections
+ShortLegend[connections]: &nbsp;
+Legend1[connections]: Connections
+Legend2[connections]: &nbsp;
+Legend3[connections]: Maximal 5 Minute Connections
+Legend4[connections]: &nbsp;
+LegendI[connections]: cons:
+LegendO[connections]: &nbsp;
+
+
+# ------------------------------------------------------------------------------
+# target : LoadAVG
+# ------------------------------------------------------------------------------
+
+# CHANGEME
+Target[loadavg]: `/usr/local/bin/fluxpoller.pl loadavg mrtg`
+
+#title
+Title[loadavg]: LoadAVG
+# title in the graph
+PNGTitle[loadavg]: loadAVG
+
+#options
+Options[loadavg]: gauge,integer,growright,nobanner,nopercent,pngdate,transparent
+
+#page title HTML
+PageTop[loadavg]: <h1>One and Five minute load averages</h1>
+
+# MaxBytes
+MaxBytes[loadavg]: 50000
+# 1 kilo def
+kilo[loadavg]: 1000
+
+# peaks
+WithPeak[loadavg]: ym
+
+# legends
+YLegend[loadavg]: LoadAVG
+ShortLegend[loadavg]: &nbsp;
+LegendI[loadavg]: &nbsp;1min load avg
+LegendO[loadavg]: &nbsp;5min load avg
+Legend1[loadavg]: 1min load avg
+Legend2[loadavg]: 5min load avg

+ 151 - 0
addons/mrtg/mrtg.flux.examples.cfg

@@ -0,0 +1,151 @@
+# $Id: mrtg.flux.examples.cfg 672 2006-09-10 12:29:05Z b4rt $
+
+
+# ------------------------------------------------------------------------------
+# target : loadavg
+# ------------------------------------------------------------------------------
+
+# gather data for this target
+# CHANGEME
+Target[loadavg]: `/usr/bin/mrtg-load -m 1000`
+
+# MaxBytes
+MaxBytes[loadavg]: 50000
+# 1 kilo def
+kilo[loadavg]: 1000
+
+# title
+Title[loadavg]: loadavg
+# title in the graph
+PNGTitle[loadavg]: loadavg
+
+# options.
+Options[loadavg]: gauge,integer,growright,nobanner,nopercent,pngdate,transparent
+
+# peaks
+WithPeak[loadavg]: ym
+
+# legend
+YLegend[loadavg]: loadavg
+ShortLegend[loadavg]: &nbsp;
+# ....
+
+
+# ------------------------------------------------------------------------------
+# target : apachestats
+# ------------------------------------------------------------------------------
+
+# gather data for this target
+# CHANGEME
+Target[apachestats]: `/usr/bin/mrtg-apache -m 100 localhost`
+
+# MaxBytes
+MaxBytes[apachestats]: 20000
+# 1 kilo def
+kilo[apachestats]: 1000
+
+# title
+Title[apachestats]: apachestats
+# title in the graph
+PNGTitle[apachestats]: apachestats
+
+# options.
+Options[apachestats]: gauge,integer,growright,nobanner,nopercent,pngdate,transparent
+
+# peaks
+WithPeak[apachestats]: ym
+
+# legend
+YLegend[apachestats]: loadavg
+ShortLegend[apachestats]: &nbsp;
+# ....
+
+
+# ------------------------------------------------------------------------------
+# target : sensors_temperature
+# ------------------------------------------------------------------------------
+
+# gather data for this target
+# CHANGEME
+Target[sensors_temperature]: `mrtg-sensors -m 800 dummy dummy w83781d-i2c-0-2d Temp`
+
+# MaxBytes
+MaxBytes[sensors_temperature]: 10000
+# 1 kilo def
+kilo[sensors_temperature]: 1000
+
+# title
+Title[sensors_temperature]: sensors_temperature
+# title in the graph
+PNGTitle[sensors_temperature]: temperature
+
+# options.
+Options[sensors_temperature]: gauge,integer,growright,nobanner,nopercent,pngdate,transparent,bits
+
+# peaks
+WithPeak[sensors_temperature]: ym
+
+# legend
+YLegend[sensors_temperature]: temperature
+ShortLegend[sensors_temperature]: &nbsp;
+# ....
+
+
+# ------------------------------------------------------------------------------
+# target : sensors_fans
+# ------------------------------------------------------------------------------
+
+# gather data for this target
+# CHANGEME
+Target[sensors_fans]: `mrtg-sensors -m 400 w83781d-i2c-0-2d FanCPU1 w83781d-i2c-0-2d FanCPU2`
+
+# MaxBytes
+MaxBytes[sensors_fans]: 100000
+# 1 kilo def
+kilo[sensors_fans]: 1000
+
+# title
+Title[sensors_fans]: sensors_fans
+# title in the graph
+PNGTitle[sensors_fans]: fans
+
+# options.
+Options[sensors_fans]: gauge,integer,growright,nobanner,nopercent,pngdate,transparent,bits
+
+# peaks
+WithPeak[sensors_fans]: ym
+
+# legend
+YLegend[sensors_fans]: fans
+ShortLegend[sensors_fans]: &nbsp;
+# ....
+
+
+# ------------------------------------------------------------------------------
+# target : disk_usage
+# ------------------------------------------------------------------------------
+
+# gather data for this target
+# CHANGEME
+Target[disk_usage]: `/usr/local/bin/foo.bar /usr/local/torrentflux`
+
+# flux torrent dir capacity
+MaxBytes[disk_usage]: 100000000000
+# 1024 bytes are 1 kilo
+kilo[disk_usage]: 1024
+
+# title
+Title[disk_usage]: disk_usage
+# title in the graph
+PNGTitle[disk_usage]: disk_usage
+
+# options. 
+Options[disk_usage]: gauge,growright,nobanner,pngdate,transparent
+
+# peaks
+WithPeak[disk_usage]: ym
+
+# legend
+YLegend[disk_usage]: Disk-Usage
+ShortLegend[disk_usage]: &nbsp;
+# ....

BIN
clients/tornado/BitTornado-0.3.15.tar.bz2


BIN
clients/tornado/BitTornado-0.3.17.tar.bz2


BIN
clients/tornado/BitTornado-0.3.18.tar.bz2


+ 46 - 0
clients/transmission/INSTALL

@@ -0,0 +1,46 @@
+=================================================================
+$Id: INSTALL 3365 2008-03-22 15:03:03Z b4rt $
+=================================================================
+
+
+Transmission 1.06 and Transmission 0.96
+---------------------------------------
+
+1. Get the Transmission Source Code Release from
+   http://www.transmissionbt.com/
+
+2. Untar the package into a directory.
+
+   tar -jxvf transmission-<version>.tar.bz2
+
+3. Untar the tfb-package into a directory.
+
+   tar -jxvf Transmission-<version>_tfCLI-<version>.tar.bz2
+
+4. Copy the content of the tfb-package extracted in 3. to the
+   Transmission-Directory extracted in 2. (overwriting files).
+
+   FreeBSD only: Apply the patch "freebsd.patch" (file was just
+                 copied to the Transmission-Directory)
+
+5. Build + Install transmissioncli, instructions about how to
+   do this can be found in the Transmission-Documentation.
+
+
+Transmission 0.72
+-----------------
+
+1. Untar the package into a directory.
+
+   tar -jxvf Transmission-0.72_tfCLI-svn3318.tar.bz2
+
+2. Build + Install transmissioncli.
+   Go to the directory just extracted and execute :
+
+   ./configure --disable-gtk
+   make
+   make install
+   
+   Note: It may be called 'make' or 'gmake' depending on
+         your system.
+

BIN
clients/transmission/Transmission-0.72_tfCLI-svn3318.tar.bz2


BIN
clients/transmission/Transmission-0.96_tfCLI-svn3355.tar.bz2


BIN
clients/transmission/Transmission-1.06_tfCLI-svn3356.tar.bz2


+ 287 - 0
doc/azureus.txt

@@ -0,0 +1,287 @@
+=======================================================================
+$Id: azureus.txt 3238 2007-10-28 15:01:59Z b4rt $
+vim: set comments=fb\:o,fb\:#,fb\:-,fb\:*,fb\:A.,fb\:Q. et tw=72 sw=4:
+=======================================================================
+
+/**********************************************************************
+Torrentflux-b4rt Fluazu Setup Guide
+**********************************************************************/
+
+Introduction
+
+    Fluazu provides support for Azureus in Torrentflux-b4rt.  The
+    azureus server runs in the background and the fluazu interface
+    communicates with the azureus server to control the torrents running
+    under Azureus.
+
+    This guide covers:
+
+    *   Requirements
+
+    *   Installation and initial configuration of a working console
+        based Azureus - no X required - plus installation of the Azureus
+        XML over HTTP plugin.
+
+        With thanks to 'Jessy' on the forums:
+        https://tf-b4rt.berlios.de/forum/index.php?topic=903.msg3493
+
+    *   Configuring Fluazu within Torrentflux-b4rt.
+
+    *   Fluazu Limitations
+
+    *   Checking Azureus Works
+
+
+Requirements
+
+    *   Python 2.2.1 or higher for fluazu-daemon
+
+        http://www.python.org
+
+    *   Azureus 2.5.0.0 or higher
+
+        http://azureus.sourceforge.net
+
+    *   The Azureus XML/HTTP Plugin
+
+        http://azureus.sourceforge.net/plugin_details.php?plugin=xml_http_if
+
+
+Installation And Initial Configuration Of Azureus Without X
+
+    --
+    Submitted by Jessy with thanks:
+    https://tf-b4rt.berlios.de/forum/index.php?topic=903.msg3493
+    --
+
+    This HOWTO assumes you already have java installed.
+
+    You should chown all the files you have created/downloaded to the
+    user Azureus will run as.
+
+    1.  Download necessary files:
+
+        Go to:
+
+        http://azureus.sourceforge.net/index_CVS.php
+
+        Download:
+
+        *   AzureusXXXX-XXX.jar
+        *   log4j.jar
+        *   commons-cli.jar
+
+        Go to:
+
+        http://azureus.sourceforge.net/plugin_details.php?plugin=xml_http_if
+
+        Download the XML over HTTP plugin.
+
+        Save all downloaded files in the same directory and rename
+        AzureusXXXX-XXX.jar into Azureus2.jar
+
+        Note:
+        -----
+        While it doesn't matter where you install Azureus, it is
+        important that a directory called /var/www/.azureus exists and
+        is writable by Azureus. It stores its configuration there (i
+        think this is a bug?).
+
+    2.  Run Azureus on the commandline and do some basic configuration
+        (optional).  On the shell commandline run:
+
+        java -jar Azureus2.jar --ui=console
+
+        When Azureus shows up type this at the console to set ports:
+
+        set TCP.Listen.Port 50000
+        set UDP.Listen.Port 50001
+        set UDP.NonData.Listen.Port 50002
+
+        You may also want to check if the port configuration is ok with
+        the following command in the azureus console:
+
+        show nat
+
+        Finally, exit out of Azureus for now:
+
+        quit
+
+    3.  Install XML over HTTP plugin:
+
+        Make a directory called xml_http_if in your plugins directory,
+        which should be where you installed Azureus.
+
+        Unpack xml_http_if_1.0.zip into that directory.
+
+    4.  Create a script that runs Azureus in background:
+
+        Create an executable (+x) azureusd.pl file containing the
+        following code:
+
+        #!/usr/bin/perl
+
+        use POSIX 'setsid';
+        open STDIN, "/dev/null";
+        open STDOUT,">/dev/null";
+        open STDERR,">/dev/null";
+        exit if fork > 0;
+        setsid;
+        exec("java -jar Azureus2.jar --ui=console");
+
+    5.  Start up Azureus using the script created above.
+
+        IMPORTANT:
+        ----------
+        Ensure the azureus server is run as the webserver user - use su
+        or sudo if necessary.
+
+    Notes:
+    ------
+    Useful links for installation / configuration of console azureus:
+
+    http://azureuswiki.com/index.php/Daemonized_Azureus
+    http://www.azureuswiki.com/index.php/ConsoleUI
+    http://tf-b4rt.berlios.de/azureus
+
+
+Configuring Fluazu within Torrentflux-b4rt.
+
+    1.  Login to torrentflux-b4rt web interface as an admin user:
+        + admin
+            + transfer
+                + fluazu-settings
+
+    2.  Enter the required Azureus server settings in the form and on
+        the fluazu settings page and save.
+
+        Example:
+        -----
+        Note this is only an example, enter the settings appropriate for
+        your own Azureus server setup:
+
+        Host: localhost
+        Port: 6884
+        Secure: false
+        Username: none (leave empty)
+        Password: none (leave empty)
+
+    3.  Start fluazu-daemon - goto the following menu item:
+
+        + admin
+            + transfer
+                + fluazu-settings
+
+        and click the 'start' button.
+
+        Fluazu will start up and connect to the Azureus server. After
+        connecting to the Azureus server, the Fluazu settings page will
+        show an additional form which makes it possible to see and set
+        some additional settings for the Azureus server - eg global
+        bandwidth and connection limits.
+
+    Fluazu is now ready to use just like any other client within
+    torrentflux-b4rt - just select 'azureus' as the transfer client when
+    starting up new transfers.
+
+
+Fluazu Limitations
+
+    Due to the design of fluazu there are some limitations on what
+    features are available when using Azureus in conjunction with
+    Torrentflux-b4rt - all features that depend on process-list or
+    netstat-per-transfer-code:
+
+    -   The transfer hosts page within Torrentflux-b4rt will not display
+        any connection information.
+
+    -   The process list displayed in the superadmin window in
+        Torrentflux-b4rt will not include any Azureus based transfers.
+
+    -   The force stop feature within the superadmin pages will also not
+        work with transfers running under Azureus.
+
+    -   The maintenance feature in Torrentflux-b4rt (auto-restart of
+        dead transfer client process) does not currently work.
+
+
+Checking Azureus Works
+
+    1.  Open a shell on the server and cd to the dir
+        "bin/clients/fluazu" in your tfb-installation.
+
+        For example if torrentflux-b4rt is installed in "/var/www" this
+        would be "/var/www/bin/clients/fluazu"
+
+    2.  Start a python shell. If python is in the path, just enter
+        "python" and hit enter.  This should open up a interactive
+        python shell:
+
+       $:/var/www/bin/clients/fluazu# python
+       Python 2.5 (r25:51908, Sep 19 2006, 23:00:51)
+       [GCC 2.95.4 20011002 (Debian prerelease)] on linux2
+       Type "help", "copyright", "credits" or "license" for more information.
+       >>>
+
+    3.  At the python shell, start the dopal.interact module using the
+        following commands:
+
+        import dopal.interact
+        dopal.interact.main()
+
+        When prompted to enter the Azureus server settings in the python
+        shell, do so:
+
+        Enter host: localhost
+        Enter port (default is 6884): 6884
+        Enter user name (leave blank if not applicable): foobar
+
+    Success!
+    -------
+    If the steps above succeeded, you will be connected to the
+    server successfully and should see:
+
+    ------------------------
+
+    DOPAL 0.60 - interact module
+
+    Connection object stored in 'connection' variable.
+    Plugin interface stored in 'interface' variable.
+
+    >>>
+
+    You can now as further test print the connection using the
+    command "print connection":
+
+    >>> print connection
+    DopalConnection for localhost [Azureus 2.5.0.4]
+
+    For further information, check the examples-page for dopal:
+
+    http://dopal.sourceforge.net/examples.html
+
+
+    Failure!
+    --------
+    If the configuration failed, you will be unable to connect to the
+    server correctly and should see something like:
+
+    ------------------------
+
+    DOPAL 0.60 - interact module
+
+    Connection object stored in 'connection' variable.
+
+    Error getting plugin interface object - could not connect to
+    Azureus, error:
+    URLError: <urlopen error (111, 'Connection refused')>
+    ('http://localhost:6884/process.cgi')
+    >>>
+
+    The message is 'Connection refused' and in this test case the
+    Azureus server at localhost was not running.
+
+    For full troubleshooting please check the requirements page for
+    Dopal:
+
+    http://dopal.sourceforge.net/requirements.html

+ 5610 - 0
doc/manual.txt

@@ -0,0 +1,5610 @@
+=======================================================================
+$Id: manual.txt 3335 2008-02-08 11:19:25Z munk $
+
+vim: set comments=fb\:o,fb\:#,fb\:-,fb\:*,fb\:A.,fb\:Q. et tw=72 sw=4:
+=======================================================================
+
+/**********************************************************************
+Torrentflux-b4rt Reference Manual
+**********************************************************************/
+
+    Contents:
+    =========
+    Introduction
+
+    User operations
+
+        Working with metadata files (.torrent, .wget, .nzb files)
+
+            Uploading metadata (.torrent, .wget, .nzb) files
+                Uploading individual metadata files from your filesystem
+                Uploading multiple torrent files from your filesystem
+                Uploading metadata files from a URL
+
+            Downloading metadata files
+
+            Searching for torrent files
+                Working in the search results page
+                Performing a search
+
+            RSS feed torrents
+
+        Controlling transfers
+            Controlling individual transfers
+                Starting individual transfers in advanced mode
+                Starting individual transfers in quick mode
+                Stopping individual transfers
+
+            Controlling multiple transfers using the multi-ops feature
+                Overview
+                Using multi-ops
+                Available multi-op operations
+
+            Controlling multiple transfers using the bulk-ops feature
+                Overview
+                Using bulk-ops
+
+            Transfer settings window
+                Overview
+                Opening the transfer settings window
+                Viewing general transfer statistics
+                Viewing a transfer's hosts
+                Viewing a transfer's scrape information
+                Viewing 'at a glance' pie charts of a transfers statistics
+                Viewing a transfer's log file
+                Viewing a transfer's metadata details
+                Viewing the files included in a transfer
+                Changing settings for a transfer
+                Starting, stopping and restarting a transfer
+
+        Server statistics
+            Transfer status lights
+
+            General frontend  'Good looking' statistics
+
+            Viewing detailed server statistics
+                Drivespace and 'who' statistics
+                Process statistics
+                Network connection statistics
+                Transfer statistics
+
+            Opening the server monitor
+
+            Viewing an RSS feed of the transfer list
+
+            Viewing activity history
+
+        Changing the frontend look and feel
+            Reordering the transfer list
+
+            Turning AJAX updates on and off
+
+            Turning page refresh on and off
+
+            Showing/hiding seeding torrents
+
+        Profile management
+            General
+                Changing password/theme/language
+
+                Changing what to display in the frontend
+
+                Deleting torrentflux-b4rt auto login cookies
+
+                Resetting user profile to default
+
+            External cookie management
+                Overview
+
+                How to obtain cookie information
+
+                Adding cookies
+
+                Editing cookies
+
+                Deleting cookies
+
+            Transfer profile management
+                Overview
+
+                Adding transfer profiles
+
+                Editing transfer profiles
+
+                Deleting transfer profiles
+
+        Directory / file manager
+            Overview
+            Opening the directory manager
+            Deleting files and directories
+            Renaming files and directories
+            Moving files and directories out of the transfer directory
+            Making torrents
+            Downloading files and directories
+            Streaming movie files using VLC
+            Viewing NFO files
+            Extracting archive files (zip/rar files)
+            Checking the integrity of files using .sfv files
+
+        Troubleshooting
+            Flushing the template cache
+            Running a maintenance operation
+
+
+    Administration operations
+        Superadmin
+            Overview
+
+            Controlling bulk transfer operations
+
+            Working with processes
+                Viewing detailed process listings
+                Force stopping processes
+
+            Performing maintenance tasks
+                Basic transfer maintenance
+                Killing processes
+                Cleaning transfer file leftovers and client resources
+                Repairing problematic installations
+                Resetting transfer totals, transfer stats and personal
+                settings
+                Locking access to the frontend
+
+            Backups
+                Creating a backup
+                Viewing a backup of transfer data
+
+            Viewing log files
+                Overview
+
+            Miscellaneous actions
+                Viewing a list of installed files and their checksums
+                Checking software requirements
+
+            Checking for updates, news and version info for your
+            torrentflux-b4rt installation
+                Checking your version against the latest version available
+                Viewing news about releases
+                Viewing the changelog for your release
+                Listing and verifying checksums of installed files
+
+        Configuration
+            General statistics and information listed on the admin page
+
+            Server options (server)
+                Torrentflux-b4rt path configuration
+                Binary path configuration
+                Operating system specific configuration
+
+            Transfer client options (transfer)
+                Overview
+                BitTorrent specific settings
+                Fluazu specific settings and operation
+                    Overview
+                    Starting and stopping fluazu
+                    Configuring azureus settings via fluazu
+                    Viewing information about fluazu - logs, processes,
+                    version
+                    Configuring fluazu
+                Wget specific settings
+                Nzbperl specific settings
+                Common settings
+
+            Transfer control settings (control)
+                Overview
+                Configuration
+
+            Web application frontend options (webapp)
+
+                Overview
+                Configuration
+
+            Web application index page display options (index)
+                Overview
+                Configuration
+
+            Directory / File Management options (dir)
+                Overview
+                Configuration
+
+            Configuring stats.php output options (stats)
+                Overview
+                Configuration
+
+            Fluxd control and configuration (fluxd)
+                Controlling fluxd (start/stop/restart)
+                Setting the database mode to use
+                Setting the log level to use
+
+            Transfer statistics configuration (xfer)
+                Enabling the display of transfer stats
+                Updating transfer stats in real time
+                Allowing all users to view all other users stats
+                (Re)setting the transfer stats
+                Setting the first day of the week
+                Setting the month day on which a month starts
+
+            Search engine configuration (search)
+                Checking for search engine updates
+                Filtering out unwanted categories from search results
+
+            Website link list management (links)
+                Adding, editing, moving and deleting link items
+
+            RSS Feed list configuration (RSS)
+                Adding, editing and deleting RSS feeds
+
+            Activity / Audit / Event viewer (activity)
+                Filtering items listed in the activity panel
+
+
+        Appendices
+
+			Guide to automatically fetching and uploading RSS torrents to torrentflux-b4rt using fluxd
+				Overview
+				Configuring the Rssad fluxd module to download torrents
+				Configuring the Watch fluxd module to inject torrents into torrentflux-b4rt
+				Starting fluxd
+                Example Rssad filter patterns
+
+            Using fluxcli.php on the command-line
+                Running fluxcli.php from a cron job to auto fetch RSS feed items
+
+            Obtaining statistics using stats.php
+                Overview
+                Specifying the type of output to display
+                Specifying the format of the stats.php output
+                Configuring stats.php
+
+
+
+/**********************************************************************
+Introduction
+**********************************************************************/
+
+    This is the manual for the transfer control client torrentflux-b4rt.
+
+    The manual is split into the following sections:
+
+        o User operations
+            - covers day to day basic use of torrentflux-b4rt
+
+        o Administration operations
+            - covers configuration of torrentflux-b4rt and other
+              operations that require administration rights in
+              torrentflux-b4rt
+
+        o Advanced operations
+            - covers advanced concepts such as fluxd operation (fetching
+              rss torrents automatically, watching folders and uploading
+              new torrents automatically) and running the fluxcli.php
+              script from the commandline or cron jobs.
+
+    For an overview of the capabilities of torrentflux-b4rt and
+    requirements, installation guide, features and author list, please
+    see the README, INSTALL and FEATURES files located in the
+    distribution root folder.
+
+
+
+
+    User operations
+
+        Working with metadata files (.torrent, .wget, .nzb files)
+
+        This section uses the term 'metadata files' to refer
+        collectively to .torrent, .wget and .nzb files.
+
+            Uploading metadata (.torrent, .wget, .nzb) files
+
+                Uploading individual metadata files from your filesystem
+
+                    To upload a metadata file from your filesystem:
+
+                        1. Click on the 'Browse...' button next to the
+                           text field labelled 'Select a Metafile for
+                           upload (.torrent, .wget, .nzb):'.
+
+                        2. Browse to the location containing the meta
+                           file you want to upload, select the file and
+                           click 'OK'.
+                        3. Click the 'Go' button next to the 'upload
+                           metafile' text field.
+
+                        4. (Optional) To upload and start the transfer
+                           at the same time, select 'Upload+Start' from
+                           the drop-down list next to the filename text
+                           field.
+
+                Uploading multiple torrent files from your filesystem
+
+                    To upload more than one torrent file at a time,
+                    there are two options:
+
+                    Javascript multi upload method:
+
+                    1.  Enter the filename of the first torrent in the
+                        filename field.
+
+                    2.  Click the 'More...' link underneath the filename
+                        text field.
+
+                        A new filename text field will appear for you to
+                        enter a new file to upload.  You can repeat the
+                        above until you have the filenames of all files
+                        you wish to upload.  When you're done, click
+                        'Go' to upload all the files at once.
+
+                    Multiple Upload page method:
+
+                    1. Click on the 'Multiple Upload' link underneath
+                       the 'Select filename for upload' text field.
+
+                       You will be directed to a new page with a number
+                       of empty filename text fields where you can enter
+                       the names of the files you wish to upload.  When
+                       all files are selected, click 'Go' to upload.
+
+                    In both methods above, you can have the files upload
+                    and start immediately by selecting the correct
+                    option from the drop down list - 'Upload+Start'.
+
+                Uploading metadata files from a URL
+
+                    Metadata files (.torrent, .nzb and .wget files) can
+                    be uploaded directly from a remote URL (http/ftp) to
+                    torrentflux-b4rt.  This saves having to download the
+                    file first and then upload it separately.
+
+                    To upload a metadata file from a remote URL (ie
+                    http://example.com/torrentfile.torrent):
+
+                    1.  Enter the URL in the text field labelled 'URL
+                        for the Torrent File:'.
+
+                    2.  Click the 'Go' button next to the text field.
+
+                    3.  (Optional) To upload and then start the
+                        transfer, select 'Get File+Start' from the
+                        dropdown list next to the URL field.
+
+                    The same operation can be completed for .wget files
+                    and .nzb files if enabled by the administrator.
+
+
+            Downloading metadata files
+
+                Metadata files that have already been uploaded to
+                torrentflux-b4rt can be downloaded by clicking on the
+                green down arrow to the left of the transfer list item.
+
+                You will be prompted to save the metadata file to disk
+                or open it with a suitable application if your browser
+                is configured to handle the metadata file type.
+
+                Example:
+                --------
+                This feature is useful for when you want to confirm the
+                integrity of a torrent file that doesn't appear to be
+                transferring correctly.  Just download the file by
+                clicking the download button and then open the torrent
+                file in a standalone torrent client.
+
+                This way you can see whether the torrent is working as
+                it should in a different torrent client and verify
+                whether there is a problem with the client you're using
+                in torrentflux-b4rt.
+
+            Searching for torrent files
+
+                Performing a search
+
+                    To search for a torrent file:
+
+                    1.  Enter the keywords you wish to search for in the
+                        text field labelled 'Torrent Search:'.
+
+                    2.  Select the torrent search engine you wish to use
+                        from the dropdown list next to the text box.
+
+                    3.  Click the 'Search' button.
+
+                    A search will then be made for the keywords you
+                    entered on the search engine you specified and the
+                    results will be listed.
+
+                    Click on the name of the torrent in the results list
+                    to download the torrent file directly to
+                    torrentflux-b4rt.
+
+                Working in the search results page
+
+                    The search results page lists all the torrents that
+                    matched the keywords you searched for, including the
+                    following information:
+
+                        o Torrent name
+                            - password protected downloads are marked
+                              with a bold 'P'
+                            - torrents which require you to register
+                              first before downloading them are marked
+                              with a bold 'R'
+                        o Category
+                        o Size
+                        o Seeds
+                        o Peers
+
+                    Clicking on the torrent name will download the
+                    torrent to torrentflux-b4rt.
+
+                    Clicking on a category name will fetch a list of all
+                    torrents in that category from where torrents can be
+                    downloaded by clicking on them.
+
+                    Seedless transfers can be hidden from the search
+                    result list by clicking on the 'Hide Seedless' link.
+
+            RSS feed torrents
+
+                The RSS torrents page can be browsed by clicking on the
+                link 'RSS Torrents' in the middle of the window above
+                the transfer list/drivespace bar.
+
+                An RSS feed item can be downloaded by clicking on it's
+                name in the RSS feed list page.
+
+                If you don't see any feed items on the RSS torrents
+                page, contact your administrator to get the RSS feeds
+                added.
+
+                Note, the RSS torrents page does not download torrents
+                automatically using RSS!  For this functionality you
+                must use the Fluxd daemon's Rssad module.  See
+                TODO:rssad_link for information on configuring this.
+
+                IMPORTANT:
+                ----------
+                If you are unable to download the RSS feed item when you
+                click on it, make sure that the item you're trying to
+                download is actually a torrent file and not just a link
+                to the page that contains the file.
+
+                The RSS feed your administrator adds MUST be a 'direct
+                download' feed.  This is a feed of torrent URLs which
+                can be downloaded directly (instead of having to visit a
+                'details' page first and then download the torrent from
+                that page).
+
+
+        Controlling transfers
+            Controlling individual transfers
+                Starting individual transfers in advanced mode
+
+                    Click the icon with a single white arrow on a green
+                    background - located next to the transfer list item
+                    on the far left of the listing - to open the
+                    advanced start window.
+
+                    The transfer settings window will open on the
+                    'Control' page.  On this page you can modify a
+                    number of settings before starting the transfer.
+
+                    Once any modifications to the settings are made,
+                    click 'Run Transfer' to start the transfer running.
+
+                    To close the settings window after starting the
+                    torrent, check the checkbox named 'Close Window'.
+
+                    To NOT run a hash check before starting a torrent,
+                    check the checkbox named 'Don't check hashes'.  This
+                    is useful for when you have a very large torrent
+                    that you know has been downloaded fully already and
+                    you don't want to spend a long time having the
+                    torrent client rechecking the integrity of the
+                    download before continuing on to seed the torrent.
+                    Note: Skip Hash Check is not supported for "fresh"
+                    transfers. (which have not written any data to the
+                    disk yet)
+
+                    For full information on changing a transfer's
+                    settings see the section on
+                    TODO_link:changing_transfer settings
+
+                    Note:
+                    -----
+                    The start advanced icon is only visible if the
+                    torrent has not already started or is not in the
+                    process of stopping.  To verify this, look at the
+                    status column in the transfer listing which will
+                    tell you whether the transfer is starting, started,
+                    connecting, seeding, leeching, stopping, stopped or
+                    new.
+
+                Starting individual transfers in quick mode
+
+                    Click the icon with a double white arrow on a green
+                    background - located next to the transfer list item
+                    on the far left of the listing - to start a transfer
+                    in 'quick' mode.
+
+                    Starting a transfer in quick mode allows you to skip
+                    the advanced settings window. When a Transfer is
+                    resumed, the last used client and settings of this
+                    transfer are used to start it and if it is new one
+                    the default client and settings configured by the
+                    administrator will be used to start the transfer.
+                    (unless the settings are changed on the settings-
+                    page of a transfer prior to starting it)
+
+                    See the section on TODO_link:transfer_client_options
+
+                Stopping individual transfers
+
+                    Click the icon with a white square on a red
+                    background - located next to the transfer list item
+                    on the far left of the listing - to stop a running
+                    transfer.
+
+            Controlling multiple transfers using the multi-ops feature
+
+                Overview
+
+                    The multi-ops feature of torrentflux-b4rt allows you
+                    to perform the same operation on more than one
+                    transfer at a time.
+
+                Using multi-ops
+
+                    1.  Select the transfers from the transfer list that
+                        you want to perform the multi-op on by checking
+                        their checkboxes to the far right of the
+                        transfer list.
+
+                        Note - to toggle the selection of ALL transfers,
+                        check the checkbox at the bottom of the transfer
+                        list to the far right next to the multi-ops
+                        dropdown list.  Unchecking this checkbox will
+                        deselect all transfers.
+
+                        This can be useful if you have a large list of
+                        transfers and you want to perform a multi-op on
+                        only 90% of the transfers.
+
+                    2.  Select the type of multi-op you want to perform
+                        on the selected transfers from the drop down
+                        list located under the transfer list to the far
+                        right.
+
+                    3.  Click the 'Go' button to perform the chosen
+                        multi-op on the selected transfers.
+
+                        WARNING:
+                        --------
+                        You will NOT be prompted for confirmation to
+                        perform the multi-op so be sure that you really
+                        want to carry out the action before using the
+                        multi-op feature!
+
+                Available multi-op operations
+
+                    The operations that can be performed using multi-ops
+                    are listed below:
+
+                    o   starting and stopping more than one transfer
+
+                    o   deleting transfer metafiles
+                        - ONLY the metafiles are deleted, any transfer
+                          data downloaded will NOT be deleted with this
+                          option.
+
+                          Choose this option if you have yet to move the
+                          transfer data out of the torrentflux-b4rt data
+                          directory.
+
+                    o   deleting transfer metafiles and data
+                        - delete both the transfer metafile - ie
+                          .torrent file etc - AND the transfer data.
+
+                          Be careful when using this option, only use it
+                          when you have no more use for the data that
+                          has been downloaded.
+
+                    o   resetting transfer totals
+                        - reset any information stored about how much
+                          data has been transferred for this transfer -
+                          cumulative upload/download totals.
+
+                          Only use this option if you don't need to know
+                          about how much data has been transferred in
+                          total for a transfer.
+
+                    o   wiping transfers
+                        - deletes any transferred data and resets the
+                          totals for the transfer.
+
+                          Be careful when using this option, only use it
+                          when you want any transferred data removed.
+
+            Controlling multiple transfers using the bulk-ops feature
+
+                Overview
+
+                    A number of actions can be performed on all the
+                    transfer is the transfer list at the same time.
+                    This is quicker than performing the action on each
+                    transfer individually or even by using the multi-ops
+                    feature.
+
+                    The operations that can be performed in bulk are:
+
+                    o   Start all transfers
+                    o   Resume all transfers
+                    o   Stop all transfers
+
+                    'Start all transfers' will start every single
+                    transfer in the transfer list <b>regardless of
+                    whether they have previously been started or
+                    not</b>.
+
+                    'Resume all transfers' will only start those
+                    transfers in the transfer list that have already
+                    previously been started and are currently in the
+                    'stopped' state.
+
+                    'Stop all transfers' stops all currently running
+                    transfers in the transfer list.
+
+                    The bulk-ops start/resume are safe to use on a
+                    transfer list where some transfers are already
+                    running - only those transfers that aren't currently
+                    running will be started or resumed.  A transfer
+                    won't accidentally be started twice!
+
+                    See also:
+                    TODO_LINK:
+                    Controlling bulk transfer operations
+
+                Using bulk-ops
+
+                    To use the bulk-ops feature, click on the
+                    'Start/Stop/Resume All Transfers' icon at the bottom
+                    of the transfer list next to the multi-ops dropdown
+                    list.  The icons are a white arrow on a green
+                    background, a double white arrow on a green
+                    background and a white square on a red background
+                    respectively for start, resume and stop.
+
+            Transfer settings window
+
+                Overview
+
+                    The transfer settings window allows fine grained
+                    control over your transfers and access to detailed
+                    statistics and logs.
+
+                    The options available in the settings window will
+                    differ according to the type of transfer and the
+                    status of the transfer - if the transfer is
+                    currently running or not.
+
+                    The transfer settings window is primarily used to
+                    modify the settings of a currently running transfer
+                    'on the fly' and to view statistics and logging info
+                    for the transfer.
+
+                    The transfer settings window is also used to modify
+                    the settings of a client / transfer when starting a
+                    transfer in 'advanced mode'.  See
+                    TODO_link:'Starting individual transfers in advanced
+                    mode'
+
+                Opening the transfer settings window
+
+                    The transfer settings window can be opened by using
+                    any of the following methods:
+
+                        o   Starting a torrent in 'advanced mode'
+                            TODO_link:'Starting individual transfers in
+                            advanced
+
+                        o   Clicking on a transfer in the transfer list.
+                            The transfer can be in any status - started
+                            or stopped - to use this method.
+
+                        o   Clicking on the 'Transfer Settings' icon to
+                            the far right of the transfer item in the
+                            transfer list.
+
+                    A number of different 'views' or pages are available
+                    in the transfer settings window, these are covered
+                    below.
+
+                Viewing general transfer statistics
+
+                    The 'Stats' page of the transfer settings window
+                    allows you to view the following information about
+                    your transfer:
+
+                        o   Estimated Time - time until transfer
+                            download is complete.
+
+                        o   Percent Done - percentage of data already
+                            downloaded.
+
+                        o   User - the torrentflux-b4rt owner of this
+                            transfer
+
+                        o   Download Speed - current download speed,
+                            updates every 5 seconds by default.
+
+                        o   Upload Speed - current upload speed,
+                            updates every 5 seconds by default.
+
+                        o   Down - total downloaded during the current
+                            session.
+
+                        o   Up - total uploaded during the current
+                            session.
+
+                        o   Down-Total - total downloaded during all
+                            sessions for this transfer; the cumulative
+                            download total.
+
+                        o   Up-Total - total uploaded during all
+                            sessions for this transfer; the cumulative
+                            upload total.
+
+                        o   Seeds - current number of seeds available
+                            for this transfer, updates every 5 seconds
+                            by default.
+
+                        o   Peers - current number of peers available
+                            for this transfer, updates every 5 seconds
+                            by default.
+
+                        o   Port - current local port being used for this
+                            transfer
+
+                        o   Connections - current number of active
+                            connections for this transfer.
+
+                        o   Sharing - current share ratio for this
+                            transfer, 100% equals same amount uploaded
+                            as downloaded.
+
+                        o   Seed Until - the share ratio at which
+                            seeding should stop.  Otherwise known as
+                            'sharekill' ratio - ie 'stop sharing when
+                            this ratio is reached'
+
+                Viewing a transfer's hosts
+
+                    The 'Hosts' page of the transfer settings window
+                    allows you to view a list of the hosts currently
+                    connected with respect to this transfer.
+
+                    A host in this list may not necessarily be actively
+                    transferring data to/from us.  A host can be
+                    connected and just in the process of negotiating the
+                    transfer of data.
+
+                Viewing a transfer's scrape information
+
+                    The 'Scrape' page of the transfer settings window
+                    allows you to view the number of seeds vs leechers
+                    for the current transfer in the form of a pie chart.
+
+                Viewing 'at a glance' pie charts of a transfers statistics
+
+                    The 'Images' page of the transfer settings window
+                    allows you to view at a glance the total uploaded vs
+                    downloaded and the number of peers vs seeders.
+
+                Viewing a transfer's log file
+
+                    The 'Log' page of the transfer settings window
+                    allows you to view the detailed logging information
+                    for the current transfer.
+
+                    The transfer log is the first place to look when
+                    troubleshooting a problematic transfer.  In it you
+                    will find all the information output from the
+                    transfer client and errors relating to a problem are
+                    often displayed there.
+
+                Viewing a transfer's metadata details
+
+                    The 'Details' page of the transfer settings window
+                    allows you to view detailed meta information about
+                    the current transfer - the transfer filename,
+                    hashing info, details of what files/directories are
+                    included in the transfer, transfer size and the
+                    announce url for torrents.
+
+                Viewing the files included in a transfer
+
+                    The 'Files' page of the transfer settings window
+                    allows you to see what files are included in a
+                    transfer once complete.
+
+                Changing settings for a transfer
+
+                    The 'Settings' page of the transfer settings window
+                    allows you to modify the settings related to the
+                    current transfer.
+
+                    If a transfer has already started, any changes made
+                    to the settings can be sent to the transfer's
+                    client 'on the fly' by checking the checkbox
+                    labelled 'Send Changes to Client' (set by default if
+                    a transfer is already running).  There is no need to
+                    stop the transfer before making changes to a
+                    transfer's settings.
+
+                    The settings available for a transfer will differ
+                    depending on the client chosen to perform the
+                    transfer.  Not all settings are available for all
+                    choices of clients.
+
+                Starting, stopping and restarting a transfer
+
+                    The 'Control' page of the transfer settings window
+                    allows you to start, stop or restart a transfer.
+                    The available options available will differ
+                    depending on the current status of the transfer.
+
+                    When the transfer is currently stopped, the control
+                    page resembles the 'Settings' page in that all the
+                    settings are available for modification prior to
+                    starting the transfer.
+
+                    When the transfer is currently running, the control
+                    page is limited to the stopping or restarting of a
+                    transfer.
+
+        Server statistics
+
+            Transfer status lights
+
+                The transfer status light for a transfer is displayed to
+                the far left of the transfer list.  The colour of the
+                transfer status light determines the current status of
+                the transfer.
+
+                The colours of the transfer status light are as follows:
+
+                o   Black
+                    - transfer is not running.
+
+                o   Red
+                    - transfer is running but no seeds are currently
+                      available for the transfer.  If this status
+                      persists you should check:
+
+                        * the transfer log for any errors (see
+                          TODO_link:Viewing transfer log files)
+
+                        * the torrent's tracker is online
+
+                        * your firewall is configured to allow
+                          connections to/from the client
+
+                        * the torrent actually has any seeds available
+                          at all
+
+                      It is also worth testing that the torrent works
+                      correctly in another torrent client if the red
+                      light status continues, although be sure that the
+                      client you test in does not use Distributed Hash
+                      Tables (DHT or 'trackerless') because this maybe
+                      affect the results of the comparison test.
+
+                o   Yellow
+                    - transfer is running but there are less than two
+                      seeds available.  This can indicate that a torrent
+                      is not well seeded and if this status continues
+                      you may need to find a better seeded torrent or
+                      use a DHT/trackerless enabled client.
+
+                o   Green
+                    - transfer is running with at least two seeds.
+
+            General frontend  'Good looking' statistics
+
+                The 'Good looking' statistics are displayed to the top
+                right of the torrentflux-b4rt frontend and include the
+                following statistics:
+
+                o   Download Speed- total download speed for all
+                    running transfers
+
+                o   Upload Speed - total upload speed for all running
+                    transfers
+
+                o   Total Speed - total upload + download speed for all
+                    running transfers
+
+                o   Connections - total network connections for all
+                    running transfers
+
+                o   Drive Space - total hard disk drive space currently
+                    in use for the filesystem where torrentflux-b4rt
+                    downloads transfers to
+
+                o   Server Load - current server load averages of the
+                    server.  A sustained load average of over 2 in each
+                    of the three readings can be indicative of excessive
+                    server load depending on the server role.
+
+                Clicking on any of the links in the 'Good looking'
+                statistics section will open up the 'Server statistics'
+                page.
+
+            Viewing detailed server statistics
+
+                Overview
+                    The server statistics page provides detailed
+                    information on server status, including drivespace,
+                    logged-in users, process, network and transfer usage.
+
+                    The server statistics page can be opened using the
+                    following methods:
+
+                    -   Clicking on any of the links in the 'Good
+                        looking statistics' section of the frontend.
+
+                    -   Clicking on the 'Server Stats' link above the
+                        drivespace bar towards the middle of the
+                        frontend.
+
+                Drivespace and 'who' statistics
+
+                    This is the default view for the server statistics
+                    page labelled 'all' in the drop down list labelled
+                    'Type' at the top of the server stats page.
+
+                    This view provides information about:
+
+                    o   Current drivespace used on the filesystem where
+                        torrentflux-b4rt downloads transfers to.  The
+                        output is taken from the Unix command 'df'.
+
+                    o   Currently logged in users on the server - ie
+                        'real' shell users on the server that
+                        torrentflux-b4rt is installed on.  The output is
+                        taken from the Unix command 'w' or 'who'.
+
+
+                Process statistics
+
+                    This view can be set by selecting 'ps' from the
+                    'Type' dropdown list on the server stats page.
+
+                    The view displays information about any currently
+                    running processes for each of the transfer clients
+                    supported by torrentflux-b4rt.
+
+                Network connection statistics
+
+                    This view can be set by selecting 'netstat' from the
+                    'Type' dropdown list on the server stats page.
+
+                    The view displays information about any network
+                    connections currently connected for any of the
+                    supported torrentflux-b4rt transfer clients.
+
+                Transfer statistics
+
+                    This view can be set by selecting 'xfer' from the
+                    'Type' dropdown list on the server stats page.
+
+                    The view displays transfer statistics - ie amount of
+                    bandwidth used - in a table.  Statistics are given
+                    for bandwidth used for each user in total, over the
+                    last month, over the last week and for the current
+                    day.
+
+                    A detailed breakdown of 'per user' or 'total'
+                    transfer statistics can be viewed by clicking on the
+                    links in the initial transfer stats page.  On this
+                    detailed page you can view statistics for any single
+                    day within the last 30 days.
+
+            Opening the server monitor
+
+                The server monitor can be opened by clicking on the
+                'green light on a white background' image above the
+                drivespace bar towards the middle of the frontend.
+
+                The following information is displayed in the server
+                monitor:
+
+                    o   Download Speed
+                    o   Upload Speed
+                    o   Total Speed
+                    o   Connections
+                    o   Drive Space
+                    o   Server Load
+                    o   Transfers running
+                    o   Transfers queued
+
+                The server monitor is updated every 5 seconds by default
+                - update interval can be changed in the (TODO_link:
+                Setting the server monitor update interval) webapps
+                admin section.
+
+            Viewing an RSS feed of the transfer list
+
+                An RSS feed of the transfer list can be displayed by
+                clicking on the RSS icon (icon with orange background
+                and white arcs).
+
+                The RSS feed can be subscribed to in your feed reader,
+                allowing another method for monitoring the status of
+                your transfers.
+
+            Viewing activity history
+
+                The activity history can be viewed by regular users by
+                clicking on the 'History' link in the top right
+                navigation menu.
+
+                The activity history page displays information about all
+                activities performed by the user in the last 30 days.
+
+                The history page is a good place to look when
+                troubleshooting problematic transfers.
+
+        Changing the frontend look and feel
+
+            Reordering the transfer list
+
+                The transfer items in the transfer list are reordered by
+                clicking on the various transfer list column headers at
+                the top of the transfer list.
+
+                For example, to change the display of transfer list
+                items so the oldest items are displayed towards the top
+                of the list, click on the first transfer list column header
+                (above the transfer status lights) marked with a '#'.
+
+                The transfer list reordering options are as follows -
+                clicking the mentioned column header will toggle the
+                sorting:
+
+                o   Order by date transfer added to torrentflux-b4rt
+                    - click column header labelled '#'
+
+                o   Order by transfer file name alphabetically
+                    - click column header labelled 'Transfer File'
+
+                o   Order by transfer owner
+                    - click column header labelled 'User'
+
+                o   Order by transfer size
+                    - click column header labelled 'Size'
+
+                o   Order by total downloaded
+                    - click column header labelled 'T. Down'
+
+                o   Order by total uploaded
+                    - click column header labelled 'T. Up'
+
+                o   Order by transfer status
+                    - click column header labelled 'Status'
+
+                o   Order by percentage completed
+                    - click column header labelled 'Progress'
+
+                o   Order by download bandwidth
+                    - click column header labelled 'Down'
+
+                o   Order by upload bandwidth
+                    - click column header labelled 'Up'
+
+                o   Order by number of seeders
+                    - click column header labelled 'Seeders'
+
+                o   Order by number of peers
+                    - click column header labelled 'Peers'
+
+                o   Order by estimated time left til completion
+                    - click column header labelled 'Estimated Time'
+
+                o   Order by type of transfer client
+                    - click column header labelled 'C' - client types
+                      are denoted as follows:
+
+                        B: tornado
+                        T: transmission
+                        M: mainline
+                        A: azureus
+                        W: wget
+                        N: nzbperl
+
+                Note:
+                -----
+                The 'Enable sortable' feature must be enabled in
+                the 'index' settings page in the admin section to allow
+                reordering of transfers.
+
+                The columns available in your transfer list may differ
+                depending on how the administrator has configured the
+                display of transfer lists.  See the section on TODO_LINK
+                'Setting the default transfer list columns to display'.
+
+                The transfer list columns can also be configured on a
+                per user basis by modifying the user profile settings if
+                the administrator has allowed profile management.
+                See the section on TODO_LINK 'Changing what to display
+                in the frontend'.
+                Also see the administration section on TODO_LINK
+                'Allowing users to use transfer profiles' for info on
+                enabling or disabling individual user profile
+                management.
+
+            Turning AJAX updates on and off
+
+                The transfer list can be updated automatically using
+                AJAX (Asynchronous Javascript and XML).  To turn on AJAX
+                updates, click on the link 'Turn ON AJAX-Update' under
+                the transfer list.
+
+                The update text will change to:
+
+                    'Next AJAX-Update in xx seconds'
+
+                where xx is the countdown in seconds until the next
+                AJAX update (default 5 seconds).  The timer will count down
+                until it reaches zero, at which point the transfer list
+                will be refreshed.
+
+                To turn off AJAX updates, click on the link 'Next
+                AJAX-Update in xx seconds' again.
+
+                The AJAX update interval can be configured either globally
+                by the administrator ('index' section of the admin
+                pages) or individually on a per-user basis in the user's
+                profile.
+
+            Turning page refresh on and off
+
+                The main torrentflux-b4rt index page containing the
+                transfer list can be updated automatically using the
+                HTML meta refresh method.  To turn on page refresh,
+                click on the link 'Turn ON Page Refresh' under the
+                transfer list.
+
+                The refresh text will change to:
+
+                    'Next Page Refresh in xx Seconds'
+
+                where xx is the countdown in seconds until the next
+                refresh (default 60 seconds).  The timer will count down
+                until it reaches zero, at which point the page will be
+                refreshed.
+
+                To turn off automatic page refreshes, click on the link
+                'Next Page Refresh in .. Seconds' again.
+
+                The refresh interval can be configured either globally
+                by the administrator ('index' section of the admin
+                pages) or individually on a per-user basis in the user's
+                profile.
+
+                Note:
+                -----
+                Using the page refresh method, the whole page is
+                reloaded - not just the transfer list.  This option is
+                now largely deprecated in favour of the AJAX update
+                method, which only updates the transfer list instead of
+                the whole page and so uses up less bandwidth.
+
+            Showing/hiding seeding torrents
+
+                Clicking on the 'Hide Seeding Torrents' link under the
+                transfer list will hide all currently seeding torrents.
+
+                This is useful when you have a lot of transfers in the
+                transfer list that are in the 'Seeding' state that you
+                don't want to keep an eye on continually.
+
+                This feature can be configured either globally in the
+                'index' section of the admin pages or on a per user
+                basis in the user's profile.
+
+        Profile management
+
+            Overview
+
+                The user profile page is available by clicking on the
+                'Profile' link in the navigation menu at the top right
+                of the frontend.
+
+                A number of options and settings can be changed on a per
+                user basis, including general user settings (password,
+                default theme, language), what is displayed in the
+                frontend, what cookies to use for cookie enabled sites
+                and the option to create different transfer profiles for
+                different transfer situations.
+
+            General
+
+                Changing password/theme/language
+
+                    The following items can be changed directly on the
+                    'Profile' page:
+
+                    o   User name
+                    o   Password
+                    o   Default Theme
+                    o   Language
+                    o   Whether to hide offline users on the frontend
+
+                Changing what to display in the frontend
+
+                    A large number of frontend display settings can be
+                    changed on a per user basis.  These options are
+                    available under the 'Personal Settings' heading on
+                    the 'Profile' page.
+
+                    The frontend display options which can be configured
+                    are as follows:
+
+                    TODO: put links to admin help for each of the
+                    following
+
+                    o   Page Refresh
+                        -   Enable/disable page refresh
+
+                    o   Page Refresh Interval
+                        -   Number of seconds between page refreshes
+
+                    o   AJAX Update
+                        -   Enable/disable AJAX updates
+
+                    o   Page title AJAX Update
+                        -   Enable/disable update of page title in AJAX
+                            updates
+
+                    o   User list AJAX Update
+                        -   Enable/disable update of the user list in
+                            AJAX Updates
+
+                    o   Transfer List AJAX Update
+                        -   Enable/disable update of the transfer list
+                            in AJAX updates
+
+                    o   Silent AJAX Update
+                        -   Do not display "Update in Progress..."
+                            during AJAX updates
+
+                    o   AJAX Update Interval
+                        -   Number of seconds between AJAX updates
+                            (default: 5)
+                    o   Width
+                        -   Width in pixels of the index page (default:
+                            900)
+
+                    o   Display Links
+                        -   Display list of links on the index page
+
+                    o   Display Users
+                        -   Display users on the index page
+
+                    o   Enable Good Looking Statistics
+                        -   Enable/disable "Good looking statistics"
+
+                    o   Good Looking Statistics Settings
+                        -   Select Fields shown in "Good looking
+                            statistics"
+
+                            Available stats to display in good looking
+                            stats panel:
+
+                            *   Download Speed
+                            *   Upload Speed
+                            *   Total Speed
+                            *   Connections
+                            *   Drive Space
+                            *   Server Load
+
+                    o   Display Bandwidth Bars
+                        -   Display bandwidth usage bars
+
+                    o   Bandwidth Bars Style
+                        -   Select style of bandwidth bars
+
+                    o   Big Bold Drivespace Warning
+                        -   Enable/disable drivespace warning when disk
+                            space low
+
+                    o   Show Seeding Torrents
+                        -   Display seeding torrents on index page
+
+                    o   Estimated Time To Seed
+                        -   Display the estimated time until transfers
+                            reach seeding cutoff ratio
+
+                    o   Transfer List Sorting
+                        -   Enable/disable reordering of transfer list
+                            by clicking on column headings
+
+                    o   Default Transfer List Sort Order
+                        -   Select default sort order of transfers in
+                            transfer list
+
+                    o   Transfer List Display
+                        -   Select the information to display in the
+                            transfer list
+
+                            Information columns available for display in
+                            transfer list:
+
+                            *   Owner
+                            *   Size
+                            *   Total Down
+                            *   Total Up
+                            *   Status
+                            *   Progress
+                            *   Down-Speed
+                            *   Up-Speed
+                            *   Seeds
+                            *   Peers
+                            *   Estimated Time
+                            *   Client
+
+                    o   Default Page In Transfer Settings Window
+                        -   Select the default page to display in the
+                            transfer settings window
+
+                    o   Server Stats
+                        -   Enable/disable display of server stats on
+                            index page
+
+                    o   Network Connections
+                        -   Enable/disable display of total network
+                            connections
+
+                    o   Show Server Load
+                        -   Enable/disable display of server load
+                            averages (1/5/15 min averages)
+
+                    o   Drivespace Bar Style
+                        -   Select style of drivespace bar
+
+                    o   Transfer Stats Window Update Method
+                        -   Select method to use to update transfer
+                            stats window
+
+                    o   Transfer Stats Window Update Interval
+                        -   Number of seconds between transfer stats
+                            updates
+
+                    o   Transfer Hosts
+                        -   Enable/disable display of connected hosts in
+                            transfer hosts window
+
+                    o   Server Monitor Update Interval
+                        -   Number of seconds between server monitor
+                            updates for server monitor update
+
+                Deleting torrentflux-b4rt auto login cookies
+
+                    To delete the auto-login cookie used to authenticate
+                    the current user automatically, click on the link
+                    'Delete "Auto-Login" Cookie'.
+
+                    When you restart the browser and browse to the
+                    torrentflux-b4rt frontend, you will be prompted to
+                    reenter your username and password to login.
+
+                    To find out about admin options for configuring
+                    authentication in torrentflux-b4rt see TODO_LINK:
+                    'Setting the authentication type to use'.
+
+                Resetting user profile to default
+
+                    Clicking the link 'Reset Personal Settings' will
+                    reset all customised personal settings to the
+                    defaults.
+
+            External cookie management
+
+                Overview
+
+                    Torrentflux-b4rt can be configured to use HTTP
+                    cookies when performing any operation that involves
+                    a HTTP request with a website that requires cookies.
+
+                    This is mainly used for downloading torrent files
+                    from a private tracker that requires you to use
+                    cookies for authentication.
+
+                    The following sections describe how to use external
+                    cookies with torrentflux-b4rt.
+
+                How to obtain cookie information
+
+                    FireFox
+
+                        *   Tools => Options
+
+                        *   Cookies => View Cookies
+
+                        *   Locate the site you want to get cookie
+                            information from.
+
+                        *   Copy the cookie values for the variables
+                            that the site uses.
+
+                            Common cookie variables used in torrent sites
+                            are UID and PASS, although these will differ
+                            from site to site.  See your torrent site FAQ
+                            or Forum for more info.
+
+                    Internet Explorer
+
+                        *   Tools => Internet Options
+                        *   General => Settings => View Files
+                        *   Locate cookie file for site (eg: Cookie:user@www.host.com/)
+                        *   Open the file in a text editor
+                        *   Grab the values below UID and PASS
+
+                            The file will look something like this:
+
+                            ------
+
+                            userZone
+                            -660
+                            www.host.com/
+                            1600
+                            2148152320
+                            29840330
+                            125611120
+                            29766905
+                            *
+                            uid
+                            123456 <----------------------------
+                            www.host.com/
+                            1536
+                            3567643008
+                            32111902
+                            4197448416
+                            29766904
+                            *
+                            pass
+                            0j9i8h7g6f5e4d3c2b1a <--------------
+                            www.host.com/
+                            1536
+                            3567643008
+                            32111902
+                            4197448416
+                            29766904
+                            *
+
+                             --------
+
+                Adding cookies
+
+                    Enter the domain of the website you want to use
+                    cookies for in the text field labelled 'Host:'.
+
+                    Enter the cookie string you want to use in the text
+                    field labelled 'Data:'.
+
+                    Example:
+                    --------
+
+                    If the cookie variables you want to send are:
+
+                    o   uid with a value set to '99'
+                    o   pass with a value set to 'foobar'
+
+                    then the cookie string would look like:
+
+                    o   uid=99;pass=foobar;
+
+                    Once the Host/Data fields are completed, click 'Add'
+                    to add the cookie.
+
+                    The cookie will now be used whenever you perform an
+                    action that involves an HTTP request with the
+                    domain.
+
+                Editing cookies
+
+                    Click on the 'Edit' icon next to the cookie you want
+                    to edit.  On the resulting page, modify the cookie
+                    as needed and click 'Update' when finished.
+
+                Deleting cookies
+
+                    Click on the 'Delete <domain>' icon next to the
+                    cookie you want to delete.  This will remove the
+                    cookie from the current user's profile.
+
+
+            Transfer profile management
+
+                Overview
+
+                    The transfer profile management section is found on
+                    the user's 'Profile' page.  Click the link 'Transfer
+                    Profiles Management' to open the transfer profiles
+                    management window.
+
+                    A transfer profile is a pre-defined set of transfer
+                    options which are saved under a transfer profile
+                    name.  The transfer profile(s) can then be used at
+                    a later date when you start a new transfer, saving
+                    you the effort of having to manually go through each
+                    of the settings for that transfer tweaking them to
+                    suit the type of transfer you're performing.
+
+                    Example:
+                    --------
+                    You use a private torrent tracker that requires you
+                    keep at least a ratio of 1.05.  Rather than manually
+                    go through the settings each time you start a
+                    torrent for that private tracker, instead you can
+                    create a transfer profile with the following
+                    settings:
+
+                    o   'Percentage When Seeding should Stop:' = '105'
+                    o   'Max upload rate' = 0 - no restriction on upload
+                        speed
+
+                    Now each time you start a new transfer that uses the
+                    private tracker, just select the profile from the
+                    transfer settings window - the sharekill ratio
+                    will automatically be set to 105 and there will be
+                    no restriction on how fast you upload to leeching
+                    peers meaning you should meet your ratio of 1.05
+                    faster.
+
+                Adding transfer profiles
+
+                    To add a transfer profile, complete the form on the
+                    transfer profiles management page and click the
+                    'Add' button.
+
+                    If you wish the profile to be available to all other
+                    users, check the 'Public Profile' checkbox.
+
+                Editing transfer profiles
+
+                    To edit a transfer profile, click the 'Edit
+                    <profilename>' icon next to the profile.  This will
+                    load the profile's settings and allow you to modify
+                    them.
+
+                    When you are happy with the changes, click the
+                    'Update' button to save the profile.
+
+                Deleting transfer profiles
+
+                    To delete a transfer profile, click on the red
+                    button with a cross on it next to the profile.
+
+        Directory / file manager
+
+            Overview
+
+                The directory manager allows you to work with the files
+                and directories within the torrentflux-b4rt download
+                directory.  Regular users are limited to working with
+                the files in their own download folder - administrators
+                can access any files in any directory within the master
+                download folder.
+
+                A number of different operations can be performed on
+                files within the directory manager, including:
+
+                o   Deleting files and directories
+                o   Renaming files and directories
+                o   Moving files and directories out of the transfer
+                    directory
+                o   Making torrents
+                o   Downloading files and directories
+                o   Downloading files and directories as tar or
+                    zip archives
+                o   Streaming media files using VLC
+                o   Viewing NFO files
+                o   Extracting archive files (zip/rar files)
+                o   Checking the integrity of files using .sfv files
+
+                Permission to perform these operations can be controlled
+                globally by the administrator in the administration
+                settings pages. TODO_LINK: Directory / File Management
+                options (dir)
+
+            Opening the directory manager
+
+                The directory manager can be opened with the following
+                actions from the torrentflux-b4rt frontend:
+
+                o   Clicking the 'Directory' link in the navigation menu
+                    at the top right of the screen
+
+                o   Clicking the 'Directory List' link in the middle of
+                    the screen
+
+                o   Clicking on the folder icon to the left of a
+                    transfer in the transfer list
+
+                Once the directory manager is opened, directories can be
+                browsed by clicking on folders.  To go 'up' a directory,
+                click on the link '[Back Parent Directory]'.
+
+                The available actions that can be performed on a file
+                are listed to the right of the directory manager.  These
+                actions will differ depending on the type of file.
+
+                All files within a directory can be selected at the same
+                time by checking the checkbox at the bottom right of
+                the directory manager.  Checking the checkbox again will
+                deselect all files.
+
+            Deleting files and directories
+
+                Individual files can be deleted by clicking on the icon
+                with a white cross on a red background next to the file.
+
+                Multiple files can be deleted at the same time by first
+                selecting all the files you wish to delete and then
+                clicking on the cross icon at the bottom right of the
+                directory manager next to the 'select all' checkbox.
+
+                You will be prompted for confirmation to delete the
+                file(s) after clicking the cross icon.
+
+            Renaming files and directories
+
+                Files and directories can be renamed by clicking on the
+                'Rename File/Folder' icon to the right of the file.  A
+                window will open asking you to specify the name you wish
+                to rename the file/folder to.  Complete this form and
+                click 'Ok' to rename the file/folder.
+
+                Note:
+                -----
+                Using this option, files and folder can only be renamed
+                within the bounds of the master download directory.  To
+                move files and folders *out* of the download directory,
+                the 'Move File/Folder' action must be used. TODO_LINK:
+                Moving files and directories out of the transfer
+                directory
+
+            Moving files and directories out of the transfer directory
+
+                Files and directories can be moved out of the master
+                transfer directory by clicking on the 'Move File/Folder'
+                icon.  A window will open asking you to specify the
+                destination directory in a drop down list or text box.
+
+                Note:
+                -----
+                The destination folders for move actions must be
+                configured by the administrator.  Any destination folders
+                configured by the admin will be listed in the 'Move'
+                window dropdown box.
+
+                For more info on setting a list of directories to move
+                files/folders into, see: TODO_LINK: Setting a list of
+                allowed folders to move transferred files into
+
+            Making torrents
+
+                A torrent file can be created for a file or directory by
+                clicking on the 'Make Torrent' icon.  A window will be
+                displayed prompting for the necessary details for the
+                torrent:
+
+                o   Client - the client you want to use to create the
+                    torrent file.
+
+                o   Torrent name - the filename you wish to give the
+                    torrent file
+
+                o   Announcement URL - the URL of the tracker which will
+                    be hosting the torrent file.  Check your tracker's
+                    website for info on what to use for this option.
+
+                o   Announce List - a list of alternative trackers to
+                    use for this torrent.  This option allows you to
+                    provide redundancy in the event that one tracker is
+                    unavailable.
+
+                o   Piece size - the size of chunks you wish your
+                    torrent to be distributed in.
+
+                o   Comments - any comments about the content of the
+                    torrent.
+
+                o   Private Torrent - whether or not this torrent is
+                    private.  If private, the DHT / trackerless option
+                    will be disabled.
+
+                o   DHT Support - whether to support Distributed Hash
+                    Tables for so-called 'trackerless' support.  Using
+                    this option will allow clients with DHT support to
+                    seek out peers using DHT instead of depending only
+                    on static tracker servers.
+
+                Torrent file creation will commence when the 'Create'
+                button is clicked.  If the 'Notify me of completion'
+                checkbox is checked, the create torrent window will
+                display details of the torrent created once complete.
+
+                When the torrent file is created, it will be displayed in
+                the torrentflux-b4rt frontend ready for starting as with
+                any other torrent.
+
+            Downloading files and directories
+
+                Files can be downloaded in a web browser by clicking on
+                the 'Download' icon to the right of the file in the
+                directory manager.
+
+                Complete directories can be downloaded as a tar archive
+                by clicking on the 'Download as tar' icon to the right
+                of the directory in the directory manager.  This action
+                will first create a tar archive of the complete
+                directory listing and then send this to the web browser
+                for download.
+
+                Note:
+                -----
+                Downloading directories as tar archives can take a
+                considerable amount of time if the directory structure
+                is very large.
+
+            Streaming media files using VLC
+
+                Audio and Movie files can be streamed in real time from
+                the server by clicking on the 'Stream Multimedia (vlc)'
+                icon to the right of the file.
+
+                This action will open a window with a form for you to
+                specify the details for the streaming action.
+
+                TODO: description of streaming with vlc.
+
+            Viewing NFO files
+
+                NFO files refer to a standard information file that
+                contains pertinent information for a distribution or
+                download, so-called because the file ends in .nfo.
+                These nfo files can be viewed by clicking on the 'View
+                nfo file' icon to the right of the file listing in the
+                directory manager.
+
+                The following file extensions are considered to be 'nfo'
+                files by torrentflux-b4rt: .nfo, .txt, .log
+
+            Extracting archive files (zip/rar files)
+
+                Torrentflux-b4rt supports the extraction of zip and
+                rar archive files.  To extract an archive file, click on
+                the 'Unzip/Unrar' icon to the right of the file listing.
+
+                A window will open prompting you for a password for the
+                archive - enter the password here if applicable.  If
+                there is no password as far as you know, leave the
+                password field empty.
+
+                Click 'OK' to extract the archive.
+
+                Note:
+                -----
+                The extraction process may take a long time to complete
+                if the archive is very large.  You may close the
+                'Uncompress file' window once it has started, the
+                uncompression process should continue in the background
+                on the webserver.
+
+            Checking the integrity of files using .sfv files
+
+                SFV or 'simple file verification' files are often used
+                by file distributors to check on the integrity
+                of transferred files.  The sfv signatures on a
+                transferred archive can be tested by clicking on the
+                'Check sfv' icon to the left of the directory containing
+                the sfv file.
+
+        Troubleshooting
+
+            Flushing the template cache
+
+                Occasionally problems may occur with the templating
+                system which make pages display incorrectly.  A first
+                attempt at fixing this kind of problem is to flush the
+                template cache.
+
+                If you are unable to click on the icon, try browsing to
+                the following URL:
+
+                http://example.com/superadmin.php?m=35
+
+                replacing 'example.com' with the URL of your
+                torrentflux-b4rt installation.
+
+                Alternatively delete the files direct which are located
+                inside your main-path-directory:
+
+                /path/to/maindir/.templateCache
+
+            Running a maintenance operation
+
+                Occasionally problems occur whereby the integrity of the
+                files that are used to keep track of transfers can
+                become corrupted.  This problem can lead to a situation
+                where torrentflux-b4rt reports that a transfer is
+                running when it isn't - or similar problems to this.
+
+                A first attempt at solving this kind of problem is to
+                perform a maintenance run from the front end which will
+                check the status of the transfer's control files and fix
+                any problems if possible.
+
+                To make a maintenance run, click on the 'Maintenance'
+                icon located at the bottom of the transfer list near to
+                the bulk operations links.  The icon is a red and white
+                life ring symbol.
+
+                For more thorough maintenance tasks see the section on
+                superadmin maintenance tasks
+
+                TODO_LINK: Performing maintenance tasks
+
+    Administration operations
+
+        Superadmin
+
+            Overview
+
+                The superadmin pages are accessible only by the
+                superadmin account holder - the administration account
+                that is created the first time that torrentflux-b4rt is
+                created.
+
+                The superadmin pages are accessed by clicking on the
+                'Superadmin' link on the administration pages - a new
+                window will open with the superadmin pages in it.
+
+                The superadmin pages allow you detailed control over the
+                following tasks and areas:
+
+                o   Transfers - bulk start/stop/resume transfers
+
+                o   Processes - view detailed process info; force stop
+                    transfer processes
+
+                o   Maintenance - clean up and maintenance
+
+                o   Backup - create backups; download saved backups
+
+                o   Log - view logs
+
+                o   Misc - test for PHP and Perl requirements; view list
+                    of installed files
+
+                o   About - version checking, updates, news,
+                    changelog; check installed file integrity
+
+                Be careful when using the superadmin features as many of
+                them apply to all torrentflux-b4rt users and not just
+                individual users.
+
+            Controlling bulk transfer operations
+
+                The superadmin transfer bulk-ops page allows you to
+                operate on multiple transfers at one time.  The actions
+                are identical to the bulk-ops feature available in the
+                frontend.
+
+                The transfer bulk-ops available in the superadmin pages
+                are:
+
+                o   Stop All Transfers - this option will stop all
+                    currently running transfers
+
+                o   Start All Transfers - this option will start all
+                    transfers that are currently not running
+
+                o   Resume All Transfers - this option will start *only*
+                    those transfers that have already been previously
+                    started
+
+                See also:
+
+                TODO_LINK: User operations - Controlling multiple
+                transfers using the bulk-ops feature
+
+            Working with processes
+
+                Viewing detailed process listings
+
+                    To view a detailed list of currently running
+                    processes, click on the 'All' link on the
+                    'Processes' tab in the superadmin pages.
+
+                    A detailed list of all currently running processes
+                    associated with torrentflux-b4rt will be displayed.
+
+                    The output is divided into sections, one for each
+                    client associated with torrentflux-b4rt.
+
+                Force stopping processes
+
+                    Problematic transfers can be forced to stop by using
+                    the 'Transfers' link on the 'Processes' tab in the
+                    superadmin pages.
+
+                    The 'Transfers' page displays a list of all
+                    currently running transfers, listing the user the
+                    transfer belongs to and the transfer file name.
+
+                    The transfers in this list can be force stopped by
+                    clicking on the red icon in the right hand column next
+                    to the transfer.
+
+                    This force stop method sends a SIGKILL signal to the
+                    process associated with the transfer if it is
+                    refusing to shutdown graceful in time.
+
+            Performing maintenance tasks
+
+                Basic transfer maintenance
+
+                    The 'Main' page of the 'Maintenance' tab in the
+                    superadmin pages allows you to run maintenance tasks
+                    on your torrentflux-b4rt installation.
+
+                    The available options are:
+
+                    o   Standard Maintenance Run
+
+                        This option is the same as available on the
+                        index page of the frontend and is automatically
+                        called on every login.
+
+                        The standard maintenance run performs the
+                        following actions:
+
+                        - Checks for stale fluxd PID and socket files
+                          and removes them if found.
+
+                        - Check for any transfers that are marked as
+                          running but actually aren't running and update
+                          their status to show them as not running.
+
+                        - Update the database so the information stored
+                          there is in line with the transfer files in
+                          the filesystem.
+
+                    o   Extended Maintenance Run
+
+                        This option is exactly the same as the standard
+                        maintenance run, except that any transfers that
+                        are marked as running but aren't actually
+                        running are restarted.
+
+                Killing processes
+
+                    The 'Kill' link on the 'Maintenance' tab of the
+                    superadmin pages allows you to kill all processes
+                    for a given type - PHP, Python, Perl,
+                    Transmission, Wget, VLC.
+
+                    This option will send a KILL signal to all processes
+                    matching the type you choose.
+
+                    NOTE:
+                    -----
+                    Be VERY careful when using this option and only use
+                    it as a last resort or if you really know what you
+                    are doing.
+
+                    The kill signal sent will kill ALL processes of the
+                    type you specify - for example if you choose to kill
+                    all PHP processes, every PHP process running on the
+                    server as the webserver user will be killed, not
+                    just those related to torrentflux-b4rt!
+
+                    If you choose to use this option to kill processes,
+                    it is advisable to check what processes are running
+                    first by issuing the following command as root:
+
+                    ps aux | grep php
+
+                    for example in the case of 'php'.  This will give
+                    you an idea of what PHP processes are currently
+                    running and you can be sure then that you are only
+                    going to kill the processes you need to.
+
+                Cleaning transfer file leftovers and client resources
+
+                    The 'Clean' linked page on the 'Maintenance' tab of
+                    the superadmin pages allows you to clean leftover
+                    transfer pid files, client cache files and
+                    torrentflux-b4rt's template cache files.
+
+                    The actions available are:
+
+                    o   PID File Clean
+
+                        Deletes stale PID files from transfers.
+                        Any transfers that are currently not running
+                        should not have a PID file associated with it.
+                        These stale PID files are deleted using this
+                        option.
+
+                    o   BitTornado Clean
+
+                        Deletes the BitTornado cache.
+                        BitTornado uses a cache whilst transferring
+                        data.
+                        This option deletes the BitTornado cache.
+
+                    o   Transmission Clean
+
+                        Deletes the Transmission cache.
+                        Transmission uses a cache whilst transferring
+                        data.
+                        This option deletes the Transmission cache.
+
+                    o   BitTorrent Mainline Clean
+
+                        Deletes the BitTorrent Mainline cache.
+                        BitTorrent Mainline uses a cache whilst
+                        transferring data.  This option deletes the
+                        BitTorrent Mainline cache.
+
+                    o   Template Cache Clean
+
+                        Deletes the Torrentflux-b4rt template cache.
+                        Torrentflux-b4rt can be configured to use a
+                        template cache to speed up display of pages.
+                        This option deletes the template cache.
+
+                        See also:
+                        TODO_LINK webapp admin config - Enabling
+                        template caching
+
+                Repairing problematic installations
+
+                    The 'Repair' linked page on the 'Maintenance' tab of
+                    the superadmin pages allows you to perform a
+                    thorough repair of the torrentflux-b4rt
+                    installation.
+
+                    This option is the same as the basic 'maintenance'
+                    run - TODO_LINK: superadmin maint etc except that
+                    this option resets the stat files of any transfers
+                    and deletes all transfer pids as if they had just
+                    been newly injected.  No transfers are restarted
+                    using this repair option.
+
+                    Generally this option should only be taken as a last
+                    resort if transfer's refuse to start at all.
+
+                Resetting transfer totals, transfer stats and personal
+                settings
+
+                    The 'Reset' linked page on the 'Maintenance' tab of
+                    the superadmin pages allows you to reset the
+                    transfer totals, transfer statistics and personal
+                    settings.
+
+                    The options available are:
+
+                    o   Reset Transfer Totals
+
+                        Resets the transfer totals - total
+                        uploaded/downloaded - for each transfer in the
+                        transfer list.
+                        Do not use this option if you are trying to keep
+                        track of your ratio on trackers using the
+                        transfer totals in the frontend!
+
+                    o   Reset Xfer Stats
+
+                        Resets the transfer statistics.
+                        All transfer statistics on record are reset
+                        using this option - how much has been downloaded
+                        per user over the last day/month/year etc.
+
+                    o   Reset Personal Settings
+
+                        Resets the personal settings of all users.
+                        All user's personal settings are reset to the
+                        default values.  Be careful using this option as
+                        all user's profiles are affected.
+
+                Locking access to the frontend
+
+                    The 'Lock' linked page on the 'Maintenance' tab of
+                    the superadmin pages allows you to lock all access
+                    to the torrentflux-b4rt frontend for non-admin
+                    users.
+
+                    When attempting to browse the frontend, users will
+                    only see a message informing them the frontend is
+                    locked.
+
+            Backups
+
+                Creating a backup
+                    The 'Create Backup' linked page on the 'Backup' tab
+                    of the superadmin pages allows you to create a
+                    backup of all files associated with
+                    Torrentflux-b4rt.
+
+                    There are two options for where to store the backed
+                    up data - decide which is most appropriate for you:
+
+                    o   Backup on Server - this option will store the
+                        backup archive on the server inside a folder
+                        named '.backup' under the main path configured
+                        in the 'server' tab on the admin pages.
+
+                        Archives stored on the server can later be
+                        viewed in torrentflux-b4rt using the 'Backups
+                        currently on server' tab on the 'Backup' page of
+                        the superadmin section.
+
+                    o   Backup on Client - this option will send the
+                        backup archive to you in your web browser so you
+                        can save the backup locally.
+
+                    Once you have decided where to save the backup to,
+                    you can then choose what kind of compression to use
+                    with the archive.
+
+                    There are three compression options to choose from:
+
+                    o   gzip - this is the default compression setting.
+                        The archives will be compressed using gzip,
+                        creating smaller archives.  This option is
+                        faster than bzip2 but slower than 'none'.
+
+                    o   bzip2 - the archives will be compressed using
+                        bzip2, creating the smallest possible archives.
+                        This option is the slowest compression option.
+
+                    o   none - uses no compression when archiving.  The
+                        archives will be archived using tar only, the
+                        resulting archive size will be the same as the
+                        space taken up by the files being archived.
+                        This is the fastest compression option.
+
+                    Select the compression option you want from the
+                    dropdown list and finally click on the 'Backup'
+                    button for the location you chose to save to.
+
+                    The data that is backed up is as follows:
+
+                    o   Document root directory structure - all files
+                        underneath the webserver document root folder
+                        where you installed Torrentflux-b4rt.
+
+                        These files consist of the torrentflux-b4rt web
+                        application itself.
+
+                    o   The Transfers folder directory structure - all files
+                        in the .transfers folder located in the path
+                        configured in the admin pages 'server' tab.
+
+                        The .transfer folder contains all control files
+                        for the transfers in torrentflux-b4rt - pid,
+                        stat and log files.
+
+                    o   The fluxd folder directory structure - all files
+                        in the .fluxd folder located in the path
+                        configured in the admin pages 'server' tab.
+
+                        The .fluxd folder contains control files for the
+                        fluxd daemon - pid, socket and log files.
+
+                    o   The MRTG folder directory structure - all files
+                        in the .mrtg folder located in the path
+                        configured in the admin pages 'server' tab.
+
+                        The .mrtg folder contains control files for the
+                        MRTG traffic graphing addon component.
+
+                    o   The Torrentflux-b4rt database - the database
+                        used to store information used in the day to day
+                        running of torrentflux-b4rt.
+
+                    An archive is made for each of the options listed
+                    above and the resulting archives are then archived
+                    again into a single archive.  In the case of the
+                    filesystem backups, a backup is only made if the
+                    relevant directory actually exists - so for example
+                    if no '.mrtg' folder exists, no mrtg backup will be
+                    made.
+
+                Viewing a backup of transfer data
+
+                    Any backups that are created on the server can be
+                    viewed by clicking on the 'Backups currently on
+                    server' link on the 'Backup' tab of the superadmin
+                    pages.
+
+                    Any backups created by torrentflux-b4rt on the
+                    server are listed on the page, including information
+                    about what version the backup is from, the date the
+                    backup was created, what compression was used and
+                    the size of the archive.
+
+                    An archive can be downloaded or deleted by clicking
+                    on the respective icon to the right of the backup
+                    item in the list.
+
+            Viewing log files
+
+                Overview
+
+                    The 'Log' tab on the superadmin pages allows you to
+                    view the log files for various components of
+                    torrentflux-b4rt:
+
+                    o   fluxd - STDOUT logfiles for the torrentflux-b4rt
+                        fluxd daemon.
+
+                        The fluxd logfile contains day to day logging
+                        information which should be the first place to
+                        look when attempting to troubleshoot fluxd
+                        problems, together with the fluxd error log.
+
+                    o   fluxd-error - STDERR logfiles for the
+                        torrentflux-b4rt fluxd daemon.
+
+                        The fluxd error logfile contains any errors
+                        encountered in the day to day running of fluxd.
+                        Essential viewing when troubleshooting fluxd.
+
+                    o   fluazu - logfiles for the fluazu interface to
+                        Azureus.
+
+                    o   BitTorrent Mainline - centralised logfile for
+                        the BitTorrent Mainline client.
+
+                    o   Transfers - logfiles for transfers in the
+                        current transfer list.
+
+                        Clicking the 'transfers' logfile link will take
+                        you to a page containing a list of links to the
+                        logfiles of all current transfers in the
+                        transfer list.
+
+            Miscellaneous actions
+
+                Viewing a list of installed files and their checksums
+
+                    The 'Lists' linked page on the 'Misc' tab of the
+                    superadmin pages allows you to view a list of all
+                    the torrentflux-b4rt files installed and their
+                    checksums.
+
+                    The file list feature displays a list of each file
+                    under the current document root for the installation
+                    and the version of each file if found.
+
+                    The checksum feature displays a list of each file
+                    under the current document root for the installation
+                    and the checksum for each of those files.  The
+                    checksum for a file is unique and when the file
+                    contents change in any way, the checksum will also
+                    change.  Checksums can therefore be used to
+                    determine if the contents of a file have been
+                    changed at some point by comparing the current
+                    checksum to the checksum of the original file.
+
+                Listing and verifying checksums of installed files
+
+                    The 'Checksum Validation' linked page on the 'Misc'
+                    tab of the superadmin pages allows you to check the
+                    integrity of the files installed in your
+                    installation of torrentflux-b4rt against a list
+                    stored on the torrentflux-b4rt website.  This allows
+                    you to verify whether any of your files have changed
+                    since you installed them, alerting you to any
+                    differences.
+
+                    The script will analyse the files you have installed
+                    and provide a detailed report about all
+                    unchanged/changed/missing/new files.
+
+                Checking software requirements
+
+                    The 'Check Requirements' linked page on the 'Misc'
+                    tab of the superadmin pages allows you to run tests
+                    to check if your server's software meets the
+                    requirements for running Torrentflux-b4rt
+                    successfully.
+
+                    The tests are as follows:
+
+                    o   Check PHP Web Requirements
+
+                        Check your PHP web installation meets the
+                        requirements for web based activities in
+                        torrentflux-b4rt.
+
+                        This test will check that the currently
+                        installed PHP web component has all the correct
+                        extensions built in to run torrentflux-b4rt
+                        correctly.
+
+                    o   Check PHP CLI Binary Requirements
+
+                        Check your PHP commandline binary installation
+                        meets the requirements for commandline based
+                        activities in torrentflux-b4rt.
+
+                    o   Check Perl Requirements
+
+                        Check your Perl installation meets the
+                        requirements for perl based activities in
+                        torrentflux-b4rt.
+
+                        This option will check all perl modules required
+                        for the correct operation of torrentflux-b4rt
+                        are installed.
+
+            Checking for updates, news and version info for your
+            torrentflux-b4rt installation
+
+                Checking your version against the latest version available
+
+                    The 'Version' linked page on the 'About' tab of
+                    the superadmin pages will display the current
+                    version of your torrentflux-b4rt installation along
+                    with information about the current latest version
+                    available from the torrentflux-b4rt downloads pages.
+
+                    This option allows you to see easily whether a new
+                    version of torrentflux-b4rt is available for
+                    download.
+
+                    Note:
+                    -----
+                    When using the SVN version of torrentflux-b4rt,
+                    details of the latest version are not displayed
+                    since it is presumed you are using the bleeding edge
+                    version of the software.
+
+                Viewing news about releases
+
+                    The 'News' linked page on the 'About' tab of the
+                    superadmin pages displays a list of links to the
+                    news page for each release of torrentflux-b4rt.
+
+                    The list of news pages is obtained from the
+                    torrentflux-b4rt website and so is always up to
+                    date.
+
+                Viewing the changelog for your release
+
+                    The 'Changelog' linked page on the 'About' tab of the
+                    superadmin pages displays a list of links to the
+                    changelogs for each release of torrentflux-b4rt.
+
+                    This list of changelogs is obtained from the
+                    torrentflux-b4rt website.
+
+        Configuration
+
+            General statistics and information listed on the admin page
+
+                The default admin page displays statistics about your
+                torrentflux-b4rt installation, information about your
+                database and other miscellaneous details about your OS
+                and PHP installation.
+
+                The information displayed is as follows:
+
+                o   Torrentflux-b4rt Stats:
+
+                    *   Transfers - current number of transfers in the
+                        transfer lists for all users
+
+                    *   Users - number of users registered to use
+                        torrentflux-b4rt
+
+                    *   Hits - number of accesses of torrentflux-b4rt
+                        pages
+
+                    *   Logs - number of log entries in the database
+
+                    *   Links - number of links in the link manager
+                        database
+
+                    *   RSS - number of RSS torrent feeds available in
+                        the frontend
+
+                    *   Messages - number of private messages stored in
+                        the database
+
+                    *   Cookies - number of cookies stored in the
+                        database for all users
+
+                    *   Transfer Profiles - number of transfer profiles
+                        stored for all users
+
+                    *   Search-Engines - number of search engines
+                        available for use
+
+                    *   Themes - number of frontend themes available for
+                        use
+
+                    *   Languages - number of languages supported
+
+                    *   Docroot Disk Usage - current disk space usage
+                        for the torrentflux-b4rt installation on the
+                        webserver
+
+                    *   Version - current active version of this
+                        torrentflux-b4rt installation
+
+                    The following superadmin pages can also be opened from
+                    the tf-b4rt stats column if logged in as a
+                    superadmin:
+
+                    *   Transfer Bulk Ops
+                    *   Processes
+                    *   Maintenance
+                    *   Backup
+                    *   Log
+                    *   Misc
+                    *   About
+
+                    See the manual section on TODO_LINK: Superadmin
+                    Operations for more info on these items.
+
+
+                o   Database Details
+
+                    *   Type - the type of database currently being used
+
+                    *   Host - the fully qualified domain name of the
+                        database host
+
+                    *   Name - the name of the database being used with
+                        this installation
+
+                    *   Username - the username used to access the database
+
+                    *   Persistent Connection - whether or not
+                        persistent connections are used to connect to
+                        the database
+
+                o   Other Stats
+
+                    *   OS - the type of operating system this
+                        installation is installed on
+
+                    *   PHP-Version - the version of PHP used by the web
+                        component on this webserver
+
+                    *   sessions - whether or not session support is built
+                        into PHP (required)
+
+                    *   pcre - whether or not Perl Compatible Regular
+                        Expression support is built into PHP (required)
+
+                    *   sockets - whether or not socket support is built
+                        into PHP (required for fluxd)
+
+                    *   safe_mode - whether PHP is running in safe mode
+                        or not (must be off)
+
+                    *   allow_url_fopen - whether PHP allows the fopen
+                        function to operate with remote URLs (must be
+                        on)
+
+                    *   register_globals - whether variables are
+                        registered globally in PHP (must be off)
+
+                    *   imagetypes - the image types supported by GD; GD
+                        support must be built into PHP and at least one
+                        of GIF, JPEG or PNG image types must be
+                        supported for image-display (pies, captcha).
+
+                    The following software requirement checks can also
+                    be run from the 'Other Stats' column if logged in as
+                    a superadmin:
+
+                    *   Check PHP-Web
+                    *   Check PHP-CLI
+                    *   Check Perl
+
+                    See the manual section on TODO_LINK: 'Superadmin
+                    Operations  - Checking software requirements' for
+                    more info on these items.
+
+            Server options (server)
+
+                Torrentflux-b4rt path configuration
+
+                    The following paths must be configured for
+                    torrentflux-b4rt to operate correctly:
+
+                    o   Transfer Parent Path
+                        The main torrentflux-b4rt parent path (<path>).
+
+                        This directory is used to store all files used
+                        in the day to day operation of torrentflux-b4rt.
+
+                        The folder must be writable by the webserver
+                        user and ideally owned by the webserver user.
+
+                        Files and folders stored/used by the parent path
+                        include:
+
+                        *   Storage for downloaded transfers:
+
+                            Content downloaded by torrentflux-b4rt will
+                            be downloaded into <path>/<username> if the
+                            use of home directories is enabled.
+
+                            If home directories are not enabled,
+                            downloaded content will be saved instead
+                            into the path specified by the 'Incoming'
+                            path instead.  See TODO_LINK: Admin/Server
+                            section on 'Home Directories'
+
+                        *   Transfer control files
+
+                            Files used internally by torrentflux-b4rt to
+                            control the download of individual transfers
+                            are stored in the <path>/.transfers folder.
+                            These files include:
+
+                            o   .stat files - a stat file is created
+                                when a transfer is injected into
+                                torrentflux-b4rt.
+
+                                The stat file is updated when a transfer
+                                is running with information such as how
+                                much data has been transferred, what
+                                percentage of the transfer has
+                                completed, how many seeds/peers the
+                                transfer has and other items of
+                                information.
+
+                                The stat file is then read by the
+                                torrentflux-b4rt internals to display
+                                the status of the transfer in the
+                                frontend.
+
+                            o   .pid files - a pid file (process id
+                                file) is created when a transfer is
+                                started.  The file contains the process
+                                id number of the transfer's process.
+
+                                The pid file is used as an indication of
+                                whether or not a transfer is running and
+                                to forcefully stop the transfer in the
+                                event that it becomes uncontrollable for
+                                some reason.
+
+                            o   .log files - a log file is created for
+                                each transfer when the transfer is
+                                started.  The logfile contains
+                                information specific to that transfer as
+                                it runs over time.
+
+                            o   .cmd files - a cmd file is created when
+                                torrentflux-b4rt has to send a command
+                                to a running transfer.  The transfer's
+                                client polls the .transfer folder
+                                regularly for new cmd files and if it
+                                finds one, it will read the command from
+                                the cmd file and act upon it.
+
+                                The lifetime of a cmd file should be
+                                very short since the file is deleted as
+                                soon as the command is executed.
+
+                        *   Client cache files
+
+                            Cache files used by BitTorrent Mainline,
+                            Transmission and BitTornado are stored in
+                            the .bittorrent, .BitTornado and
+                            .transmission folders under <path>.
+
+                        *   Fluxd control files
+
+                            Files used for the control of fluxd are
+                            contained in the <path>/.fluxd directory.
+                            Files include:
+
+                            o   fluxd-error.log - any error output from
+                                fluxd
+
+                            o   fluxd.log - standard output from fluxd
+
+                            o   fluxd.pid - process id of currently
+                                running fluxd service
+
+                            o   fluxd.sock - socket file used to
+                                communicate with fluxd
+
+                            Additional directories may be created under
+                            the .fluxd directory for use by the fluxd
+                            modules, particularly the Qmgr and Rssad
+                            modules.
+
+                        *   Fluazu control files
+
+                            Files used for the control of the fluazu
+                            component of torrentflux-b4rt.
+
+                        *   Torrentflux-b4rt template cache files
+
+                            If template caching is used to display the
+                            frontend HTML, template cache files are
+                            stored in the <path>/.templateCache folder.
+
+                        *   RSS cache files
+
+                            If RSS feeds are added via the RSS admin
+                            page, a cache of the feeds is stored in
+                            <path>/.rsscache.  This speeds up display of
+                            RSS feed contents in the frontend and takes
+                            the strain off the remote server where the
+                            RSS feed is fetched from.
+
+                    o   Document Root Path
+
+                        The Document Root path of the installation.
+
+                        The folder must be readable by the webserver user.
+
+                        This is the directory where you installed
+                        torrentflux-b4rt on the webserver and it should
+                        be detected/reported automatically by
+                        torrentflux-b4rt.
+
+                Binary path configuration
+
+                    Torrentflux-b4rt integrates with many third party
+                    applications to make performing operations on
+                    transferred files seamless from the frontend.
+
+                    For this integration to work, the associated
+                    applications must of course be installed correctly
+                    first.  For the best results you should use the
+                    distribution management system for your particular
+                    operating system to install the applications - apt,
+                    rpm, etc on linux, the ports systems on BSD.
+
+                    Once the applications required are installed, the
+                    paths to the binaries must be set correctly on the
+                    server admin page for the integration of that
+                    application to work correctly.
+
+                    There are various methods to find the location of
+                    the binary on the server:
+
+                    o   Use 'which':
+
+                        The 'which' command can be executed in a shell
+                        to display the full path to an application if it
+                        exists:
+
+                        shell> which unrar
+                        /usr/local/bin/unrar
+
+                    o   Using 'locate':
+
+                        The 'locate' utility allows you to search
+                        quickly for files on your server containing a
+                        string you specify:
+
+                        shell> locate unrar
+                        /usr/local/bin/unrar
+                        /usr/local/share/doc/unrar
+                        /usr/local/share/doc/unrar/license.txt
+                        -snip-
+
+                        Note: you must have a locate database built to
+                        be able to use the locate command.  See the
+                        manpage for locate on your system - 'man
+                        locate'.
+
+                    o   Using your package management system:
+
+                        Most package management systems include a
+                        utility that can describe what files are
+                        installed by a package.
+
+                        For example on FreeBSD, the 'pkg_info' utility
+                        can be used with the -L switch to describe what
+                        files are installed by a package:
+
+                        shell> pkg_info -L unrar-3.70.b7,4
+                        Information for unrar-3.70.b7,4:
+
+                        Files:
+                        /usr/local/bin/unrar
+                        /usr/local/share/doc/unrar/license.txt
+                        /usr/local/share/doc/unrar/readme.txt
+
+                    The following binary paths are configured on the
+                    server admin page:
+
+                    o   transmissioncli
+
+                        Transmission is a torrent client which can be
+                        used as an alternative to bittorrent or
+                        bittornado. Release-Tarballs include a cli-
+                        version of transmissioncli written to be used
+                        with torrentflux-b4rt. This version has only
+                        some additions specific to tfb-operation and
+                        is using an unmodified "libtransmission".
+                        (exact version of libtransmission can be found
+                        in the file transmission.revision or in help-
+                        page of transmissioncli)
+
+                        The modified transmission source can be
+                        found in the 'clients/transmission' folder -
+                        once extracted, build and install it to your
+                        desired location.
+
+                        It must be built and installed on
+                        the system on which you are running it before it
+                        can be used.
+
+                    o   wget
+
+                        Wget transfers can be controlled using
+                        torrentflux-b4rt.
+
+                    o   uudeview
+
+                        uudeview is required for the operation of the
+                        nzbperl component of torrentflux-b4rt.
+
+                    o   php
+
+                        This is the commandline (cli) PHP binary, not to be
+                        confused with the PHP web installation!  You
+                        must have a working cli php binary for many
+                        features of torrentflux-b4rt to work correctly.
+
+                        Note that the php cgi binary WILL NOT work as a
+                        replacement to the php cli binary.
+
+                        On most OS's, the package management system will
+                        allow you to install a php cli binary as well as
+                        a web based php component (mod_php/php cgi
+                        binary).  See your package management package
+                        database for info.
+
+                    o   python
+
+                        Python is used by the BitTorrent and BitTornado
+                        torrent clients and is requisite for the most basic
+                        operation of torrentflux-b4rt as a torrent
+                        controller.
+
+                        There are various additional python related
+                        dependencies required for BitTorrent to work
+                        fully, see the README file in the
+                        torrentflux-b4rt distribution root directory for
+                        more info.
+
+                    o   perl
+
+                        Perl is used by various features in
+                        torrentflux-b4rt.  There are various perl
+                        related dependencies required for these features
+                        to work fully, see the README file in the
+                        torrentflux-b4rt distribution root directory for
+                        more info.
+
+                    o   grep
+
+                        Basic userland binary that is included in most
+                        modern Unix type OS's.
+
+                    o   awk
+
+                        Basic userland binary that is included in most
+                        modern Unix type OS's.
+
+                    o   du
+
+                        Basic userland binary that is included in most
+                        modern Unix type OS's.
+
+                    o   unzip
+
+                        Unzip is used in the directory manager frontend
+                        in torrentflux-b4rt for extracting zip archives
+                        on the server.  Unzip can be found in most
+                        package management systems on Unix type systems.
+
+                    o   cksfv
+
+                        cksfv is used in the directory manager frontend
+                        in torrentflux-b4rt for checking the integrity
+                        of transferred content against an included .sfv
+                        file.  Cksfv can be found in most package
+                        management systems on Unix type systems.
+
+                    o   vlc
+
+                        VLC can be used to stream avi content directly
+                        from the torrentflux-b4rt server where the
+                        content is located.  VLC can be found in most
+                        package management systems on Unix type systems.
+
+                    o   unrar
+
+                        Unrar is used in the directory manager frontend
+                        in torrentflux-b4rt for extracting zip archives
+                        on the server.  Unrar can be found in most
+                        package management systems on Unix type systems.
+
+                Operating system specific configuration
+
+                    There are a small number of applications that are
+                    specific to different operating systems -
+                    applications that only exist on one type of OS.
+
+                    The OS specifics, by OS, are as follows:
+
+                    o   Linux:
+
+                        *   loadavg
+
+                            Used to determine server load average for
+                            display in torrentflux-b4rt frontend stats.
+
+                        *   netstat
+
+                            Used to determine network connection stats for
+                            display in torrentflux-b4rt frontend.
+
+                            netstat is a native app on Linux and the
+                            path should be detected automatically.
+
+                    o   BSD:
+
+                        *   sockstat
+
+                            Used to determine network connection stats for
+                            display in torrentflux-b4rt frontend.
+
+                            sockstat is a native app on BSD and the path
+                            should be detected automatically.
+
+                            The reason for the requirement of sockstat
+                            on BSD instead of netstat is that the
+                            functionality of BSD's netstat does not work
+                            identically to that of Linux's netstat.
+                            Sockstat instead returns the information
+                            required by torrentflux-b4rt correctly.
+
+            Transfer client options (transfer)
+
+                Overview
+
+                    The transfer admin page allows you to configure
+                    options for various transfer clients in
+                    torrentflux-b4rt.  Since there are various clients
+                    supported in torrentflux-b4rt, the transfer admin
+                    section is split into subsections for each 'type' of
+                    client - bittorrent, azureus (a special case of
+                    bittorrent in torrentflux-b4rt), wget, nzbperl and
+                    other settings common to all transfers.
+
+                    In the admin section, a legend or key is used to
+                    refer to each type of client.  The legend is as
+                    follows:
+
+                        B = BitTornado
+                        T = Transmission
+                        M = Bittorrent Mainline
+                        A = Azureus
+
+                    The client type that a setting applies to on the
+                    transfer admin page is indicated in brackets, so for
+                    example if a certain setting applies only to
+                    BitTorrent Mainline and BitTornado, the setting will
+                    have the following after it:
+
+                    (B + M)
+
+                    where B represents BitTornado and M represents
+                    Mainline.
+
+                    Keep this in mind when configuring settings on the
+                    transfer admin page.
+
+                BitTorrent specific settings
+
+                    The BitTorrent specific settings are as follows:
+
+                    o   Default BitTorrent Client
+                        Set the client to use by default to start
+                        torrent transfers.
+
+                        This option determines which bittorrent client
+                        should be used by default when torrents are
+                        started.
+
+                    o   Torrent Metainfo Client
+                        Set the client to use to decode torrent meta data.
+
+                        This option determines which metainfo client is
+                        used to display information encoded within a
+                        .torrent file.  This information is displayed
+                        primarily in 'details' tab of the transfer
+                        settings window for a torrent specific transfer.
+
+                    o   Extra Commandline Options (B+T+M)
+
+                        This option allows you to specify any additional
+                        commandline options for the BitTornado/Mainline
+                        or Transmission clients that aren't covered
+                        elsewhere in the torrentflux-b4rt transfer admin
+                        settings page.
+
+                    o   Max Upload Rate (B+T+M+A)
+                        Set the default value for the max upload rate
+                        per transfer (0 for no limit).
+
+                        This option allows you to set a global default
+                        for the maximum upload rate in kB/sec.
+
+                        Note:
+                        -----
+                        This limit applies only to individual transfers,
+                        so if you set a maximum upload rate here of 10
+                        and you have 5 torrents running in
+                        torrentflux-b4rt, potentially your total maximum
+                        upload rate will be 50 kB/sec.
+
+                    o   Max Download Rate (B+T+M+A)
+                        Set the default value for the max download rate
+                        per transfer (0 for no limit).
+
+                        This option allows you to set a global default
+                        for the maximum download rate in kB/sec.
+
+                        Note:
+                        -----
+                        As with the upload rate - this limit applies
+                        only to individual transfers, so if you set a
+                        maximum download rate of 10 and you have 5
+                        torrents running in torrentflux-b4rt,
+                        potentially your total maximum download rate
+                        will be 50 kB/sec.
+
+                    o   Max Upload Connections (B+M)
+                        Set the default value for the max number of
+                        upload connections per transfer (0 for no
+                        limit).
+
+                        This option determines how many concurrent
+                        upload connections a single torrent process
+                        will initiate.
+
+                        This option can be useful if you have a large
+                        number of torrents running and need to restrict
+                        the number of open sockets you use.
+
+                    o   Max Connections (B+M)
+                        Set the maximum number of concurrent connections
+                        allowed.
+
+                        This option determines the total concurrent
+                        connections for both upload and download.
+
+                        Again, this is useful if you need to restrict
+                        the total number of network connections to/from
+                        your machine due to resource limitations.
+
+                    o   Port Range (B+T+M)
+                        Set the default values for the for port range
+                        (min - max).
+
+                        Note:
+                        -----
+                        One port is required per torrent; ensure
+                        the port range is large enough for the maximum
+                        number of torrents you will be running.
+
+                        The port range you set here should be allowed
+                        by any firewall you use, either on the server
+                        itself or at the network perimeter (ie on your
+                        modem router).
+
+                        As mentioned, be sure to make the port range
+                        large enough for your needs.  One port is
+                        required for each transfer that you run, so if
+                        you run 10 torrents be sure to set the port
+                        range minimum and maximum at least 10 ports
+                        apart.
+
+                        When choosing Azureus as transfer-client the
+                        port-setting has no affect. Azureus uses a
+                        single port set in the used Azureus-Server.
+
+                    o   Rerequest Interval (B+M)
+                        Set the default value for the rerequest interval
+                        to the tracker (default 1800 seconds).
+
+                        This setting allows you to control how
+                        frequently the torrent client will contact the
+                        torrent tracker with updates about how much data
+                        has been transferred and other information
+                        related to the transfer.
+
+                        Be careful when changing this value as it puts
+                        extra load onto the remote server running the
+                        tracker.
+
+                    o   Default Torrent Completion Activity (B+T+M+A)
+                        Select whether or not a torrent should keep
+                        seeding when download is complete (please seed
+                        your torrents).
+
+                        This setting allows you to control what happens
+                        when a torrent has finished downloading all the
+                        data you requested.  Setting it to 'die when
+                        done' will have the torrent stop after it's
+                        completed; 'keep seeding' will carry on seeding
+                        until you reach the percentage ratio you specify
+                        for the torrent to stop (see below).
+
+                        Please bear in mind it is generally considered
+                        polite to continue seeding until you at least
+                        meet a 100% / 1:1 ratio for a torrent - ie when
+                        you have uploaded as much as you have
+                        downloaded.
+
+                    o   Default Percentage When Seeding Should Stop
+                        (B+T+M+A)
+                        Set the default share percentage at which
+                        torrents will die if 'Keep Seeding' is selected
+                        as completion activity above ('0' will seed
+                        forever).
+
+                        This setting is only applicable when 'Keep
+                        Seeding' is specified as the default action to
+                        take when a torrent is completed.
+
+                        The option determines at what percentage sharing
+                        ratio a transfer should stop.  For example if
+                        you set this to 200 (percent), then the torrent
+                        will continue seeding after it completes until
+                        you have uploaded twice as much as you
+                        downloaded.
+
+                        This option is very useful for private trackers
+                        who require that you maintain a certain minimum
+                        sharing ratio.  If for example the default
+                        minimum share ratio to maintain 'power user' level
+                        on a particular tracker is 1.05 you can ensure that
+                        this ratio is kept up by using the 'default
+                        percentage when seeding should stop' option and set
+                        it to 105 (percent).
+
+                    o   Enable Cumulative Sharekill (B+T+M+A)
+                        Recalculate the sharekill value passed to
+                        clients on restart based on the total amount
+                        uploaded - ie including previous sessions
+                        transfer totals.
+
+                        This option allows you to keep track of your
+                        sharing ratio over a number of transfer sessions
+                        for a given transfer.
+
+                        For example, you start a torrent and it reaches
+                        a 50% sharing ratio and then you stop it.
+                        Without 'enable cumulative sharekill', if you
+                        restart the torrent the ratio will drop back
+                        down to 0% again and you will have lost track of
+                        how much you have seeded the torrent over the
+                        two sessions.
+
+                        Instead, if cumulative sharekill is enabled,
+                        when restarted the torrent will display 50% - ie
+                        the amounts transferred in the last session are
+                        remembered.
+
+                    o   Enable File Priority (B)
+                        Allow users to select which files from a torrent
+                        to download.
+
+                        This option allows your users to be able to pick
+                        out individual files from a torrent and have
+                        only those files download.
+
+                        This can be very useful for example when you
+                        have a torrent for a complete season of a show
+                        but you only want to grab the first 2 episodes.
+                        By using file prioritization you can do just
+                        that, grab only the individual files you want.
+
+                    o   Superseeder (B)
+                        Set superseeder setting on torrent start by
+                        default.
+
+                        This option determines if the superseeder
+                        setting is on by default for all torrents that
+                        are started.
+
+                        Superseeding is an optimisation concept that
+                        allows pieces of a torrent to be uploaded
+                        quickly and efficiently using as little
+                        bandwidth as possible *when initially seeding a
+                        torrent*.
+
+                        For general seeding - ie when you are not the
+                        initial seeder - superseeding definitely should
+                        not be used.
+
+                        See:
+
+                        http://www.bittornado.com/docs/superseed.txt
+
+                        for more info.
+
+                    o   Skip HashCheck (B+M)
+                        Skip hash checking by default when starting
+                        torrents.
+
+                        This option allows you to skip hash checking
+                        when torrents are started up.
+
+                        Generally a client will run a hash check on
+                        downloaded data to verify the integrity of
+                        it whenever the torrent is restarted.  However
+                        by using this option you can disable this
+                        checking.
+
+                        This option can be useful when you are seeding
+                        many large torrents whose content you are
+                        certain has not changed and you need to restart
+                        them all for some reason (after a server reboot
+                        for example) but don't want to go through the
+                        time consuming process of having hash checking
+                        performed on each and every torrent.  Disabling
+                        hash checking will speed up the restart process
+                        here and get the torrents all seeding quickly.
+
+                        *However*, generally you should NOT disable hash
+                        checking.
+
+                Fluazu specific settings and operation
+
+                    Overview
+
+                        The fluazu settings page is accessed by clicking
+                        on the 'Edit Fluazu Settings' link on the
+                        'transfer' admin page.  On the edit fluazu
+                        settings page you can start and stop fluazu,
+                        view log, process and version info and configure
+                        fluazu settings.  Settings can also be changed
+                        for the fluazu daemon on the fly whilst it is
+                        running.
+
+                    Starting and stopping fluazu
+
+                        To start the fluazu daemon, click the 'Start
+                        Fluazu' button.  The daemon will be started in
+                        the background on the server and the status of
+                        the 'Edit Fluazu Settings' page will change to
+                        'Fluazu Started'.  A number of configuration
+                        options for Azureus will also be displayed to
+                        allow you to change these via fluazu.
+
+                        To stop the fluazu daemon, click the 'Stop
+                        Fluazu' button.  The daemon will be stopped in
+                        the background on the server and the status of
+                        the page will change to 'Fluazu Off'.
+
+                    Configuring azureus settings via fluazu
+
+                        After fluazu has been started, a number of
+                        azureus options can be changed dynamically from
+                        the 'Edit Fluazu Settings' page.  Making changes
+                        to these options will send a command to the
+                        backend azureus server to make the change.
+
+                        The Azureus settings that can be changed via the
+                        'Edit Fluazu Setting' page are as follows:
+
+                        o   Max Active Torrents
+
+                            Maximum torrents to run at the same time.
+
+                        o   Max Active Torrents When Only Seeding
+
+                            Maximum torrents to run at the same time
+                            when there are only seeding torrents running
+                            - ie when not leeching anything.
+
+                        o   Max Connections Global
+
+                            Maximum number of connections for all
+                            torrents running.
+
+                        o   Max Connections Per Torrent
+
+                            Maximum number of connections for each
+                            individual torrent.
+
+                        o   Max Download Speed KBs
+
+                            Maximum download speed for all torrents.
+
+                        o   Max Downloads
+
+                            Maximum number of downloads to run
+                            consecutively.
+
+                        o   Max Upload Speed KBs
+
+                            Maximum upload speed for all torrents.
+
+                        o   Max Upload Speed When Only Seeding KBs
+
+                            Maximum upload speed to allow when only
+                            seeding - ie when not leeching
+
+                        o   Max Uploads
+
+                            Maximum number of uploads to run
+                            consecutively
+
+                        o   Max Uploads Seeding
+
+                            Maximum number of uploads to allow when
+                            seeding.
+
+                    Viewing information about fluazu - logs, processes,
+                    version
+
+                        To view the fluazu log entries, click on the
+                        'logs' link.  A new page will open with the
+                        current fluazu logfile in it.
+
+                        To view the process listing information for the
+                        fluazu daemon, click on the 'ps' link.  A new
+                        page will open containing the process listing
+                        info if available.
+
+                        To view the version information for the
+                        currently installed fluazu daemon, click the
+                        'version' link.  A new page will open containing
+                        the version details.
+
+                    Configuring fluazu
+
+                        The following options are configured on the
+                        'Edit Fluazu Settings' page:
+
+                        o   Host
+
+                            Host of Azureus-server (currently only
+                            localhost is supported).
+
+                        o   Port
+
+                            Port of Azureus-server (XML/HTTP, default:
+                            6884).
+
+                        o   Secure
+
+                            Use secure connection to azureus.
+
+                        o   Username
+
+                            Username to use when connecting to
+                            Azureus-server.
+
+                        o   Password
+
+                            Password to use when connecting to
+                            Azureus-server.
+
+                Wget specific settings
+
+                    The wget specific settings are as follows:
+
+                    o   Enable Wget
+                        Set who can use wget.
+
+                        By default torrentflux-b4rt is set to allow all
+                        users to use the wget functionality.  If you
+                        wish to restrict access to wget, you can do so
+                        here.
+
+                    o   Limit Download Rate
+                        Set the default value for the max download rate
+                        per transfer (0 for no limit).
+
+                    o   Limit Number of Retries
+                        Set the max number of retries to attempt (0 for
+                        no limit).
+
+                        When accessing a URL resource using wget and the
+                        URL is unavailable for some reason, this option
+                        specifies how many times wget should continue to
+                        retry getting the resource.
+
+                    o   Enable Passive FTP
+                        Enable/disable "passive" transfer mode for FTP.
+
+                        In passive ftp transfer mode, the transfer
+                        client determines the port it uses to
+                        communicate with the remote server.  In active
+                        mode, the server determines the ports used to
+                        communicate.  Changing the transfer mode can
+                        help with communication problems because of
+                        restrictive firewalls - the subject is beyond
+                        the scope of this document.
+
+                Nzbperl specific settings
+
+                    The requirements for using nzbperl can be checked by
+                    clicking on the 'nzbperl Requirements Check' link.
+                    The requirements checker script for nzbperl will run
+                    in a new window and list all the elements required
+                    to run nzbperl and whether your current server
+                    installation meets those requirements.
+
+                    The nzbperl specific settings are as follows:
+
+                    o   Enable Nzbperl
+                        Set who can use nzbperl.
+
+                        By default access to the nzbperl functionality
+                        is disabled. If you wish to change access rights
+                        to nzbperl, you can do so here.
+
+                    o   Use Subdirectories
+                        Download nzbperl content into separate
+                        subdirectories.
+
+                        By default nzbperl will download all content
+                        into the master nzb directory.  If you wish to
+                        have content downloaded into individual
+                        subdirectories for each transfer, you can do
+                        that here.
+
+                    o   Bad File Action
+                        Set the action to perform on finding a bad entry
+                        in the nzb.
+
+                        By default nzbperl will drop any bad files
+                        downloaded and attempt to re-transfer them.
+                        This option can be changed here.
+
+                    o   Download Rate
+                        Set the default value for the max download rate
+                        per transfer (0 for no limit).
+
+                    o   Server
+                        Set the NNTP server nzbperl should connect to.
+
+                    o   Username
+                        Set the username to use when connecting to your
+                        NNTP server.
+
+                    o   Password
+                        Set the password to use when connecting to your
+                        NNTP server.
+
+                    o   Connections
+                        The number of simultaneous connections to the
+                        NNTP server nzbperl will create.
+
+                        Note:
+                        You can only run conn/server_max nzb downloads at once.
+
+                    o   Threads
+                        Use threading.
+
+                        Note:
+                        Enable only if your Perl installation supports threads.
+
+                    o   Extra Commandline Options
+
+                        Any extra commandline options you wish to pass
+                        to the nzbperl process can be set here.  Refer
+                        to the nzbperl documentation for more
+                        information on this TODO_LINK:nzbperl website
+
+                Common settings
+
+                    The common settings available for configuration are:
+
+                    o   Enable 0000 Umask (B+T+M+W+N)
+                        Enable/disable setting umask to 0000 when
+                        starting a client.
+
+                        Note:
+                        Resulting permissions of created files will be
+                        777 - readable, writable and executable by
+                        everyone.
+
+                        Umasking refers to the default file permissions
+                        that should be given to a newly created file.
+                        This option will set the umask to 0000 which
+                        will give all files created by all transfer
+                        processes a default permission so the files are
+                        readable/writable by everyone.
+
+                        See the manpage for 'sh' for more info on umask.
+
+                    o   Use Nice (B+T+M+W+N)
+                        Set the nice level to use for transfer
+                        processes.
+
+                        Note:
+                        The highest nice level - 19 - gives processes
+                        the lowest possible priority over other
+                        processes on the server.
+
+                        The nice utility is a feature of Unix type OSs
+                        that allows you to adjust the priority of a
+                        process.  The higher a process's 'nice' value is
+                        the less priority it has with respect to access
+                        to OS system resources - memory paging, CPU
+                        access, etc.  By setting a high nice value you
+                        are being 'nice' to the other processes on a
+                        server.
+
+                        This option is useful in the case you have a
+                        multi-purpose server and you don't want
+                        torrentflux-b4rt created processes to take all
+                        the resources up - setting a high nice value
+                        here will effectively give other processes on
+                        the webserver higher priority.
+
+            Transfer control settings (control)
+
+                Overview
+
+                    The 'control settings' tab ('control') on the admin
+                    pages allows you to specify settings related to the
+                    control of transfers in torrentflux-b4rt.
+
+                Configuration
+
+                    The settings available on the 'control' tab are as
+                    follows:
+
+                    o   Customize Transfer Settings
+                        Define who can customize transfer settings.
+
+                        This option allows you to determine who can
+                        modify the transfer settings when starting a
+                        transfer - ie max upload/download rate, max
+                        connections, completion activity, port
+                        configuration, etc.
+
+                        Access to the transfer settings customization
+                        functionality can be configured as follows:
+
+                        *   Only Admins - only admins can modify
+                            transfer settings.
+
+                        *   All Users - all users can modify transfer
+                            settings.
+
+                        *   Disabled - customization of transfer
+                            settings is disabled.
+
+                    o   Transfer Profiles
+                        Define who can use transfer profiles.
+
+                        Transfer profiles allow users to configure
+                        different profiles for different transfer
+                        requirements.  TODO_LINK: see user operation
+                        section - Transfer profile management
+
+                        Access to the transfer profile functionality can
+                        be configured as follows:
+
+                        *   All Users - all users can create and use the
+                            transfer profile functionality.
+
+                        *   Only Predefined - only predefined transfer
+                            profiles can be used.  Create transfer
+                            profiles as admin first.
+
+                        *   Only Admins - only admins can create and use
+                            transfer profiles.
+
+                        *   Disabled - transfer profile functionality is
+                            disabled.
+
+                    o   Enable Client Chooser
+                        When enabled, allow users to choose which
+                        transfer client to use.
+
+                    o   Enable Save Path Chooser
+                        When enabled, displays a directory tree to allow
+                        users to downloaded transfers to any existing
+                        folder under their home directory.
+
+                        With this option enabled, each recursive
+                        subdirectory within the user's configured home
+                        directory will be displayed in a directory tree
+                        list, up to 'max depth' levels deep.
+
+                        The user can then choose one of those
+                        directories as the location to save newly
+                        transferred content to.
+
+                    o   Save Path Chooser Max Depth
+                        Set the maximum depth of subfolders to display
+                        in the save path chooser directory tree (set to
+                        '0' to display all subfolders).
+
+                        If 'enable save path chooser' is enabled, the
+                        list of directories displayed in the save path
+                        chooser will drill down by default as far as
+                        necessary to display all recursive folders.
+
+                        If you have a large number of complex folder
+                        structures within the user's home directories,
+                        you can change the depth to which the 'save path
+                        chooser' drills down by changing this option.
+
+            Web application frontend options (webapp)
+
+                Overview
+
+                    The 'WebApp Settings' (webapp) tab on the admin
+                    pages allows you to configure settings related to
+                    the torrentflux-b4rt frontend.
+
+                Configuration
+
+                    The configuration options for the webapps tab are as
+                    follows:
+
+                    o   Select Authentication Type
+                        Select which authentication method
+                        torrentflux-b4rt uses for user login.
+
+                        This option allows you to decide how users will
+                        authenticate to login to the torrentflux-b4rt
+                        frontend.  There are five different options to
+                        choose from:
+
+                        *   Form Auth
+                            Standard form based authentication.
+
+                            This is the default authentication method.
+                            Users will have to complete and submit a login
+                            form with their username and password at the
+                            start of each new browser session to be able
+                            to login to the frontend.
+
+                        *   Form Auth + Cookie
+                            Standard form based authentication using an
+                            'autologin' cookie (expires after 30 days).
+
+                            This method is similar to the default 'Form
+                            Auth', except that the user will have the
+                            option to check a checkbox to allow them to
+                            stay logged in across browser sessions -
+                            'Remember Me' functionality.
+
+                            This method uses a client side cookie to store
+                            the autologin information.  Information is
+                            stored using a hashing algorithm so that
+                            details are not directly readable in plain
+                            text.
+
+                        *   Form Auth + Image
+                            Standard form based authentication using image
+                            based captcha validation (user must enter the
+                            characters displayed in a dynamically created
+                            image to validate).
+
+                            This option enables the use of Captchas as an
+                            added form of protection against automated
+                            brute force attacks.  Using captchas, the user
+                            is required to type in the characters
+                            displayed in a dynamically generated image
+                            before being allowed to login.  This is
+                            required the first time the user browses to
+                            the torrentflux-b4rt frontend in any given
+                            browser session.
+
+                        *   Basic Auth
+                            Use HTTP Basic Authentication scheme
+                            (browser will handle user/password input).
+
+                            This option uses the standard HTTP basic
+                            authentication method to log users in.
+
+                            The torrentflux-b4rt login page sends a
+                            'HTTP/1.0 401 Unauthorized' HTTP header to
+                            the browser/user agent which should prompt
+                            the browser to display a username/password
+                            dialogue box for the user to enter their
+                            credentials in.  When this dialogue box is
+                            submitted, the credentials are then
+                            compared to the credentials stored in the
+                            torrentflux-b4rt database.  If the
+                            credentials match, the user is logged in.
+
+                        *   Basic Auth + Passthrough
+                            Pass the credentials of a user authenticated
+                            externally via basic auth to the webapp.
+
+                            This method can be used when authentication
+                            is to be performed against another user
+                            database other than the default
+                            torrentflux-b4rt db - for example using the
+                            apache webserver's built in authentication
+                            modules or some third party authentication
+                            module.
+
+                            Using this method, the webserver is
+                            responsible for performing the
+                            authentication initially.  If the
+                            credentials are accepted by the webserver,
+                            they are then passed on to the
+                            torrentflux-b4rt login mechanism where the
+                            credentials are again checked, this time
+                            against the torrentflux-b4rt database.
+                            If this check is successful, the user is
+                            logged in.
+
+                            Note:
+                            -----
+                            To logout from torrentflux-b4rt whilst using
+                            the 'Basic Auth + Passthrough' method, the
+                            user must end the current browser session -
+                            ie terminate all instances of the current
+                            web browser session.
+
+                            This has the effect of logging the user out
+                            - when the user starts a new browser session
+                            and browses to the torrentflux-b4rt
+                            frontend, they will be required to login
+                            again.
+
+                            TODO_LINK: Link to some basic auth guide,
+                            apache site or apache week?
+
+
+                    o   Basic Authentication Realm
+                        Specify the realm name to use if Basic
+                        Authentication method is used.
+
+                        This option sets which realm to use when using
+                        basic authentication methods. A realm defines a
+                        particular area of a website that the basic
+                        authentication scheme protects or applies to.
+
+                        The realm name configured here will be displayed
+                        in the basic auth dialogue box displayed when a
+                        user logs in.
+
+                    o   Default Page Title
+                        Specify the default page title.
+
+                        This is the default page title displayed in the
+                        HTML title tags for the torrentflux-b4rt index
+                        page.  Web browsers will typically display the
+                        text contained in the HTML title tags for a web
+                        page in the title bar of the browser window.
+
+                    o   Enable Personal Settings
+                        Allow users to change their personal settings.
+
+                        This option allows users to modify their
+                        personal settings via the 'Profile' page.
+
+                        TODO_LINK: user ops, Changing what to display in
+                        the frontend
+
+
+                    o   Display Torrentflux-B4rt Site Link
+                        When enabled, will display a small link to the
+                        Torrentflux-b4rt website at the bottom of all
+                        pages.
+
+                    o   Drivespace Bar Style
+                        Select the styling of the drivespace bar in the
+                        frontend.
+
+                        There are two different styles of drivespace to
+                        choose from - tf and xfer.
+
+                        The 'tf' style bar displays a drivespace bar on
+                        the index page where the length of the bar
+                        indicates how much disk space has been used on
+                        the hard disk mount point where the .transfers
+                        folder is located.  The amount of free space in
+                        MB/Gb and as a percentage of the total is also
+                        displayed in text next to the drivespace bar.
+
+                        The 'xfer' style bar displays a drivespace bar
+                        where the length of the bar indicates how much
+                        disk space is left available (instead of how
+                        much is actually used in the case of the 'tf'
+                        style bar).
+
+                    o   Transfer Statistics Update Method
+                        Choose the method used to update the transfer
+                        statistics window.
+
+                        The 'Stats' page for a transfer - displayed by
+                        clicking on the transfer in the transfer list
+                        and then clicking the 'Stats' tab - is
+                        automatically updated to display stats in near
+                        real time.
+
+                        The method used to update the stats can be set
+                        using this option.  There are two options - AJAX
+                        or HTML Meta Refresh.
+
+                        AJAX is the default update method.
+
+                        Tip:
+                        ----
+                        AJAX updates uses less bandwidth since only the
+                        relevant data in the stats page is transferred.
+
+                        Alternatively, the HTML Meta Refresh update
+                        method will transfer the complete stats webpage
+                        from the webserver using a little more bandwidth
+                        than AJAX.
+
+                    o   Transfer Statistics Update Interval
+                        Specify the interval in seconds between updates
+                        in the transfer statistics window.
+
+                    o   Resolve Hostnames In Transfer Window Hosts Tab
+                        Whether to resolve transfer peer hostnames to IP
+                        addresses in the Hosts tab of the transfer
+                        window - selecting Hostnames will add load to
+                        the server.
+
+                        By default, torrentflux-b4rt does not resolve IP
+                        addresses (ie 1.2.3.4) into fully qualified
+                        domain names (ie example.com).  This option
+                        enables the resolution of IP addresses to domain
+                        names.
+
+                        Note:
+                        -----
+                        This option increases the load on the server,
+                        both in terms of CPU processing and network
+                        resources and should be used with caution
+                        especially on servers with a large number of
+                        transfers running at the same time.
+
+                    o   Server Monitor Update Interval
+                        Specify the interval in seconds between updates
+                        in the Server Monitor window.
+
+                        The server monitor displays statistics relating
+                        to the webserver running torrentflux-b4rt.  This
+                        option defines how often the server monitor
+                        window is updated.
+
+                        For more information on the server monitor, see:
+
+                        TODO_LINK: user ops, Opening the server monitor
+
+                    o   Hide Referer When Following Links
+                        When enabled, referer information will not be
+                        forwarded to websites visited by clicking on
+                        links within torrentflux-b4rt.
+
+                        This option is enabled by default and stops the
+                        browser sending the URL of the page from which
+                        the user came from - the 'referer' URL.
+
+                    o   Default Theme
+                        Select the default theme.
+
+                        Note: This theme will also be used for the login
+                        screen when a form based authentication method
+                        is used.
+
+                        Note:
+                        -----
+                        This option will set the default theme for all
+                        new users.  Current user's theme settings will
+                        not be modified.
+
+                    o   Default Language
+                        Select the default language.
+
+                        Note:
+                        -----
+                        This option will set the default language for
+                        all new users.  Current user's language settings
+                        will not be modified.
+
+                    o   Days To Keep Logged Audit Actions
+                        Number of days that audit actions will be held
+                        in the database.
+
+                        By default, audit actions are stored in the
+                        database for 30 days.  After this time the
+                        entries are pruned from the database.
+
+                        For more information on viewing Audit Actions,
+                        see:
+
+                        TODO_LINK: user ops, Viewing activity history
+                        admin ops, Activity / Audit / Event viewer
+                        (activity)
+
+                    o   Minutes To Keep User Online Status
+                        Number of minutes after ending a browser session
+                        that user status changes to offline.
+
+                        Torrentflux-b4rt displays a list of all users
+                        currently 'online' at any one time in the
+                        'users' panel in the frontend.  This option sets
+                        how long in minutes after a user ends a browser
+                        session to mark the user as being 'offline'.
+
+                        The default is 3 minutes.
+
+                    o   Minutes To Cache RSS Feeds
+                        Number of minutes to cache RSS XML feeds on the
+                        server.
+
+                        Torrentflux-b4rt caches RSS feeds for users to
+                        browse via the 'RSS Torrents' link on the index
+                        page to save on network traffic and processing.
+
+                        This option allows you to configure how long to
+                        wait before requesting new copies of the RSS
+                        feeds to 'refresh' the cache.
+
+                        The default is to cache RSS feeds for 20
+                        minutes.
+
+                        Warning:
+                        --------
+                        Setting the cache to a very low value so the RSS
+                        feeds are refreshed more often can lead to a ban
+                        on your IP address if the server where the RSS
+                        feed is requested from sees you as 'hammering'.
+
+                        You should consult the tracker website's rules
+                        before setting the cache timeout to a very low
+                        value.
+
+                    o   Enable Template Cache
+                        Enable caching of HTML template files.
+
+                        Torrentflux-b4rt can cache the HTML template
+                        files displayed for users by setting this
+                        option.  This can significantly improve the
+                        speed at which pages are displayed in the
+                        browser.
+
+                        Template caching is disabled by default.
+
+                        Note:
+                        -----
+                        This option should be used with caution - if
+                        'stale' pages or other glitches are observed
+                        when using template caching, the cache can be
+                        flushed:
+
+                        TODO_LINK:user ops, Flushing the template cache
+
+                    o   Debug Level
+                        Set the debug level - higher values increase the
+                        debugging output in the logs.
+
+                        The verbosity of logging messages emitted by
+                        torrentflux-b4rt can be increased by setting
+                        this value higher - 0 is lowest, 2 is highest.
+                        This option is especially useful when
+                        troubleshooting the operation of
+                        torrentflux-b4rt and should be set to the
+                        highest value before requesting support.
+
+                        The default debugging level is '0'.
+
+                    o   Show SQL Debug Statements
+                        SQL Errors will always be displayed but when
+                        this feature is enabled the SQL Statement that
+                        caused the error will be displayed as well.
+
+            Web application index page display options (index)
+
+                Overview
+
+                    The 'Index Settings' (index) tab on the admin
+                    pages allows you to configure settings related to
+                    the display of the torrentflux-b4rt index page in
+                    the frontend.
+
+                Configuration
+
+                    The configuration options for the index tab are as
+                    follows:
+
+                    o   Page Refresh
+                        Enable/disable page updates using HTML meta
+                        refresh.
+
+                        The index page will be refreshed using an HTML
+                        meta refresh tag when this option is enabled.
+
+                        Tip:
+                        ----
+                        AJAX updates uses less bandwidth since only the
+                        relevant data in the stats page is transferred.
+
+                        Alternatively, the HTML Meta Refresh update
+                        method will transfer the complete stats webpage
+                        from the webserver using a little more bandwidth
+                        than AJAX.
+
+                    o   Page Refresh Interval
+                        Interval in seconds between page refreshes.
+
+                    o   AJAX Update
+                        Enable/disable page updates using AJAX.
+
+                    o   Page Title AJAX Update
+                        Enable/disable update of page titles using AJAX.
+
+                        Tip:
+                        ----
+                        This option is useful because torrentflux-b4rt
+                        displays various statistics in the page title,
+                        which in turn are displayed in the task bar on
+                        Windows.  This makes it easy to see at a glance
+                        how transfers and the server is performing.
+
+                    o   Users AJAX Update
+                        Enable/disable update of user list using AJAX.
+
+                        The user list displays which torrentflux-b4rt
+                        users are currently online.  This option updates
+                        the user list dynamically using AJAX.
+
+                    o   Transfer List AJAX Update
+                        Enable/disable update of transfer list using
+                        AJAX.
+
+                        The transfer list can be updated using AJAX
+                        with this option.
+
+                    o   Silent AJAX Update
+                        Do not display any processing request messages
+                        when AJAX updates are in progress. Countdown to
+                        update timer will also not be displayed.
+
+                        Usually torrentflux-b4rt will display a
+                        'Processing...' messages when an update is in
+                        effect to indicate to the user that they should
+                        not click on anything since the system is busy.
+                        This option will disable the processing
+                        messages.
+
+                    o   AJAX Update Interval
+                        Interval in seconds between AJAX updates.
+
+                    o   Width
+                        Width of the index page in pixels.
+
+                        Note:
+                        -----
+                        If a large number of transfer list columns are
+                        selected for display, the width of the transfer
+                        list may exceed the default page width selected
+                        here.  As a result, the index page width may be
+                        larger than the width selected here if you have
+                        a large number of transfer list columns on
+                        display.
+
+                    o   Multi Uploads
+                        Enable/disable display of links for uploading of
+                        multiple files at once.
+
+                        This option will display links underneath the
+                        transfer file upload form to allow users to
+                        upload multiple files at a time.
+
+                    o   Multi Upload Page Rows
+                        Number of rows of file upload fields to display
+                        on the multi uploads page.
+
+                    o   Torrent Search
+                        Enable/disable torrent searches from the home
+                        page via third party torrent sites.
+
+                    o   Default Torrent Search Engine
+                        Default search engine to use for torrent
+                        searches.
+
+                    o   Display Links List
+                        Enable/disable display of admin configured list
+                        of links on the index page.
+
+                        The link list is configured on the 'links' admin
+                        page - see here for more info:
+
+                        TODO_LINK:
+                        Admin ops, Website link list management (links)
+
+                    o   Display Users List
+                        Enable/disable display of torrentflux-b4rt users
+                        and their online status.
+
+                        When enabled, a list of all torrentflux-b4rt
+                        users is displayed showing their current online
+                        status.
+
+                        The period of time after which users are
+                        considered 'offline' is configured on the
+                        'webapp' admin page, see here for more info:
+
+                        TODO_LINK:
+                        admin ops, webapp section, Minutes To Keep User
+                        Online Status
+
+                    o   Good Looking Statistics
+                        Enable/disable display of good looking
+                        statistics - up/download speeds, connections,
+                        drivespace and server load statistics.
+
+                        This option enables the display of various
+                        statistics on the index page.
+
+                    o   Good Looking Statistics Display Settings
+                        Select the items to display in the "Good Looking
+                        Statistics" panel.
+
+                        *	Download Speed - total download speed being
+                            used by torrentflux-b4rt.
+
+                        *	Upload Speed - total upload speed being
+                            used by torrentflux-b4rt.
+
+                        *	Total Speed - total speed (up and down)
+                            being used by torrentflux-b4rt.
+
+                        *	Connections - total number of transfers
+                            running.
+
+                        *	Drive Space - total drive space remaining on
+                            the mount point where the transfers folder
+                            is located.
+
+                        *	Server Load - the load averages for the
+                            server.  Uses results from 'loadavg' on
+                            linux or 'uptime' on BSD.
+
+                    o   Bandwidth Bars
+                        Enable/disable display of current bandwidth
+                        usage bars.
+
+                        When enabled, bars will be displayed to indicate
+                        the current upload and download bandwidth being
+                        utilised.  The longer the bar, the more
+                        bandwidth in use.
+
+                    o   Bandwidth Bars Style
+                        Select the style of bandwidth bar to use.
+
+                        There are two bar styles to choose from - tf and
+                        xfer.  The tf style has a constant color,
+                        whereas the xfer style turns redder the higher
+                        the bandwidth utilization is.
+
+                    o   Upstream Bandwidth
+                        Maximum available upstream bandwidth in kilobits
+                        per second (kbit/s) through your ISP - 1 Megabit
+                        per second (1Mbit/s) is roughly equal to
+                        1000kbit/s.
+
+                        This figure is used to determine the percentage
+                        utilization of bandwidth in the bandwidth bars.
+                        You should enter the maximum available upstream
+                        bandwidth for the network connection of the
+                        server - usually this will be the max bandwidth
+                        you get from your ISP.
+
+                        Tip:
+                        ----
+                        A rough guide to bandwidth figures:
+
+                        1000 = roughly 1Mbit/s
+                        512 = 512kbit/s
+
+                    o   Downstream Bandwidth
+                        Maximum available downstream bandwidth in
+                        kilobits per second (kbit/s) through your ISP -
+                        1 Megabit per second (1Mbit/s) is roughly equal
+                        to 1000kbit/s.
+
+                        This figure is used to determine the percentage
+                        utilization of bandwidth in the bandwidth bars.
+                        You should enter the maximum available downstream
+                        bandwidth for the network connection of the
+                        server - usually this will be the max bandwidth
+                        you get from your ISP.
+
+                    o   Big Bold Drivespace Warning
+                        Enable/disable display of "Big Bold Drivespace"
+                        warning when disk space gets below 2% of total.
+
+                    o   Only Admin Can See Other User's Transfers
+                        Enable/disable visibility of other user's
+                        transfers in regular user's transfer lists -
+                        when enabled, only admins will be able to see
+                        other user's transfers in their transfer list.
+
+                    o   Transfer File Download
+                        Enable/disable links in the transfer list to
+                        allow users to download transfer files from the
+                        server.
+
+                        This option will add a link to the left of each
+                        transfer in the transfer list so that the
+                        transfer file (ie .torrent file) can be
+                        downloaded.  When the link is clicked, the
+                        transfer file will be sent to the user as an
+                        attachment - ie the browser will prompt the user
+                        to save the file locally (or open it in an
+                        associated application if MIME types are
+                        configured correctly).
+
+                    o   Display Seeding Torrents
+                        Enable/disable display of torrents in the
+                        seeding state - if enabled, when a torrent has
+                        finished leeching and moves on to seed, the
+                        torrent will be hidden from the transfer list.
+                        Hidden torrents can be viewed in the transfer
+                        list by clicking the 'Show Seeding Torrents'
+                        link under the transfer list.
+
+                    o   Display Estimated Time To Seed
+                        Enable/disable display of estimated time until a
+                        transfer is completed in the transfer list.
+
+                    o   Transfer List Can Be Sorted
+                        Enable/disable sorting of the transfer list -
+                        when enabled, users will be able to change the
+                        order in which the transfer list is displayed
+                        based on date added, file name, size, totals
+                        up/down, transfer status, etc.
+
+                        Tip:
+                        ----
+                        To change the sort order of the transfer list,
+                        click on the column headers at the top of the
+                        transfer list.  Each click on the column header
+                        will toggle sorting in ascending/descending
+                        order for that column type.
+
+                        For example, clicking on the 'Transfer File'
+                        column header will sort the transfer list by
+                        transfer file name alphabetically ascending
+                        (A-Z); clicking again on the 'Transfer File'
+                        column header will sort the transfer list by
+                        transfer file name alphabetically descending
+                        (Z-A).
+
+                    o   Default Sort Order
+                        Select the default property by which the items
+                        in the transfer list should be sorted by.
+
+                        This is the column by which all transfers in the
+                        transfer list will be sorted by - for example,
+                        to have the transfer list automatically sorted
+                        by transfer file name alphabetically, select
+                        'Name - Ascending'.
+
+                    o   Transfer List Display Settings
+                        Select the types of columns to display for each
+                        transfer in the transfer list.
+
+                        *	Owner
+                        *	Size
+                        *	Total Down
+                        *	Total Up
+                        *	Status
+                        *	Progress
+                        *	Down-Speed
+                        *	Up-Speed
+                        *	Seeds
+                        *	Peers
+                        *	Estimated Time
+                        *	Client
+
+                        This option allows you to see a whole host of
+                        information directly in the transfer list
+                        without having to click on a transfer to pop up
+                        the transfer window.
+
+                        Note:
+                        -----
+                        The more transfer columns you display in the
+                        transfer list, the wider the index page will
+                        grow.  Take this into account especially when
+                        using a screen with lower resolutions.
+
+                    o   Default Transfer Window
+                        Select the default page to display in the
+                        transfer window - the window which is displayed
+                        when a transfer in the transfer list is clicked
+                        on.
+
+                        This option allows you to choose which tab of
+                        the popup transfer window will be displayed
+                        when you click on a transfer in the transfer
+                        list.
+
+                    o   Multi Operations
+                        Enable/disable display of the dropdown list of
+                        operations to perform on multiple transfer list
+                        items - when enabled, a list of operations
+                        (start/stop/delete/wipe/reset) will be available
+                        in a drop down list for users to perform on more
+                        than one transfer at a time.
+
+                        This option allows you to specify whether you
+                        want users to be able to use the multi ops
+                        feature.  Multi ops allow users to select a
+                        number of transfers from the transfer list and
+                        then apply an action to all of those selected
+                        transfers only - start, stop, delete, etc.
+
+                    o   Bulk Operations
+                        Enable/disable display of bulk operation links -
+                        when enabled, links will be displayed on the
+                        index page to allow users to control
+                        (start/stop/resume) all transfer in the transfer
+                        list at once.
+
+                        This option allows you to specify whether the
+                        bulk ops links will be displayed on the index
+                        page.
+
+                        Bulk ops allow users to perform an action on
+                        every single transfer in the transfer list at
+                        once - for example to restart all torrents in
+                        one go.
+
+                    o   Display Server Statistics
+                        Enable/disable display of various server
+                        statistics (transfer/queue/server stats) at the
+                        bottom of the index page.
+
+                    o   Display Network Connections
+                        Enable/disable display of total network
+                        connections on the index page.
+
+                    o   Display Server Load
+                        Enable/disable display of 1/5/15 minute average
+                        server load on the index page.
+
+            Directory / File Management options (dir)
+
+                Overview
+
+                    The 'Dir Settings' (dir) tab on the admin pages
+                    allows you to configure settings related to the
+                    torrentflux-b4rt file and directory manager -
+                    referred to as 'dir manager' from now on.
+
+                    See also:
+
+                    TODO_LINK: user ops, dir manager section
+
+                Configuration
+
+                    The configuration options for the dir tab are as
+                    follows:
+
+                    o   Public Read
+                        Enable/disable public read permission for all
+                        files/folders in torrentflux-b4rt - when
+                        enabled, all files and folders within
+                        torrentflux-b4rt will be readable by all
+                        torrentflux-b4rt users.
+
+                        This option makes all files transferred using
+                        torrentflux-b4rt visible to all torrentflux-b4rt
+                        users - admin and normal users alike.
+
+                        Note:
+                        -----
+                        This does *not* make the files automatically
+                        visible to all Unix users on the webserver where
+                        torrentflux-b4rt is installed.
+
+                        Tip:
+                        ----
+                        To make downloaded files accessible to Unix
+                        users other than the webserver user - for
+                        example so a 'regular' Unix user can download
+                        the transferred files via FTP:
+
+                        1.  When the transfer is complete, use the
+                            'chmod' torrentflux-b4rt functionality to
+                            change the mode of the transferred files to
+                            '777' - ie readable by everyone .
+
+                        2.  In the LINK_TODO:[admin.dir.move dir tab of
+                            the admin pages], add the target directory
+                            owned by the Unix user who will be
+                            downloading files via FTP as a valid move
+                            path.  This will allow files to be moved
+                            from torrentflux-b4rt into the target
+                            directory.
+
+                        3.  Move the files out of the torrentflux-b4rt
+                            transfer directory into the target directory
+                            owned by the Unix user who will be
+                            downloading the files via FTP.  Ensure the
+                            webserver user has permissions to write
+                            files to the target directory.
+
+                            The files should now be available for the
+                            Unix user to download via FTP.
+
+                            Note:
+                            -----
+                            Any problems encountered whilst using this
+                            method will almost always come down to Unix
+                            file permissions - make sure the webserver
+                            can write to the Unix user's target
+                            directory and in turn make sure the file
+                            permissions of the moved files are
+                            sufficient for the Unix user to access them
+                            once they're moved.
+
+                    o   Public Write
+                        Enable/disable public write permission for all
+                        files/folders in torrentflux-b4rt - when
+                        enabled, all files and folders within
+                        torrentflux-b4rt will be writable by all
+                        torrentflux-b4rt users.
+
+                        By default, files and folders in
+                        torrentflux-b4rt are only writable by the
+                        torrentflux-b4rt user that created them.  This
+                        option overrides this so that any
+                        torrentflux-b4rt user can write to any other
+                        torrentflux-b4rt user's files/folders.
+
+                        Note:
+                        -----
+                        This feature only applies to torrentflux-b4rt
+                        users - Unix users on the system will not be
+                        automatically given write permissions to
+                        torrentflux-b4rt files by enabling the 'Public
+                        Write' option in torrentflux-b4rt.
+
+                        Warning:
+                        --------
+                        Enabling the 'Public Write' option is a security
+                        risk to other torrentflux-b4rt users - only
+                        enable it on a server where all torrentflux-b4rt
+                        users are 'trusted'.  Of course the definition of
+                        'trusted' will vary from one system to another.
+
+                    o   Chmod
+                        Enable/disable the use of the chmod
+                        functionality for regular torrentflux-b4rt users
+                        - when enabled, users will be able to change the
+                        permissions on files and folders in
+                        torrentflux-b4rt to read/write/execute for
+                        everyone (777).
+
+                        This option will enable users to click a link in
+                        the dir manager to set the file permissions on
+                        the associated file to '777' -
+                        read/write/executable for *all Unix users*.
+
+                        Warning:
+                        --------
+                        Enabling the 'Chmod' option is a system wide
+                        security risk - any Unix user on the server
+                        hosting torrentflux-b4rt will have read, write
+                        and execute permissions on files and directories
+                        chmod'ed.  Sensitive files should not be
+                        chmod'ed 777 and if you do not trust your users
+                        to use the chmod functionality correctly you
+                        should not enable it.
+
+                    o   Full Directory Listings
+                        Enable/disable complete directory listings -
+                        when enabled, full directory listings will be
+                        used including size and last modified time
+                        similar to Unix 'ls -al' output.
+
+                    o   Make Torrent
+                        Enable/disable make torrent functionality for
+                        users - when enabled, users will be allowed to
+                        create torrent files from the contents in the
+                        directory manager.
+
+                        When enabled, an 'make torrent' icon will be
+                        available next to files in the dir manager so
+                        users will be able to create torrents from files
+                        and directories in the dir manager.
+
+                    o   Default Make Torrent Client
+                        Choose the default client to use to create
+                        torrent files with.
+
+                        The options available are: BitTornado (tornado)
+                        or BitTorrent Mainline (mainline).
+
+                    o   File Downloads
+                        Enable/disable file downloading from the
+                        directory manager - when enabled, users will be
+                        able to download files directly from the
+                        torrentflux-b4rt file manager.
+
+                        When enabled, a download icon will be displayed
+                        next to files and directories in the dir
+                        manager to allow users to download the files as
+                        a zip or tar archive - depending on the setting
+                        for 'Download Archive Type'.
+
+                    o   Download Archive Type
+                        Choose the type of archive to use when
+                        performing file downloads - files downloaded
+                        will be archived using this archive type prior
+                        to sending.
+
+                        The available archive types are: zip or tar
+
+                        Note:
+                        -----
+                        Ensure your operating system supports the
+                        archive type you select here.
+
+                        Compression is *not* used on downloaded archives.
+
+                    o   Text/NFO Viewer
+                        Enable/disable access to the built in NFO viewer
+                        - when enabled, users will be able to view
+                        nfo/txt/log file types directly within
+                        torrentflux-b4rt.
+
+                        When this option is enabled, files ending in
+                        .nfo, .txt or .log will have a 'View NFO' icon
+                        next to them in the dir manager to allow users
+                        to view the (presumably text) contents directly
+                        in torrentflux-b4rt.
+
+                    o   Unrar
+                        Enable/disable access to unrar functionality -
+                        when enabled, users will be able to use unrar to
+                        unpack rar archives directly within
+                        torrentflux-b4rt.
+
+                        When enabled, an 'unrar' icon will be displayed
+                        next to rar archives in the dir manager to allow
+                        users to decompress the archives directly from
+                        torrentflux-b4rt.
+
+                        Warning:
+                        --------
+                        Using the unrar functionality can add excessive
+                        load on the server hosting torrentflux-b4rt.
+                        The feature should *not* be enabled on servers
+                        where resource usage is a problem or can become
+                        a problem quickly.
+
+                    o   SFV File Checking
+                        Enable/disable access to sfv file checking
+                        functionality - when enabled, if an sfv file is
+                        present users will be able to verify the
+                        integrity of the files covered by the sfv file
+                        directly within torrentflux-b4rt.
+
+                        When enabled, a 'Check SFV' icon will be
+                        displayed next to any SFV files in the dir
+                        manager.  Users will be able to click the link
+                        to verify the integrity of the files to which
+                        the sfv file is associated with.
+
+                        Info:
+                        -----
+                        Simple File Verification (sfv) is a system
+                        whereby a cyclic redundancy check is performed
+                        on each file within a set of files and/or
+                        directories.  The checksum for each file/dir is
+                        then added to the 'sfv' file.
+
+                        The sfv file is then transferred along with the
+                        files it is associated with so that when
+                        transfer is complete, the integrity of the files
+                        can be verified by running an sfv checker
+                        against them.  In this way any corruption to
+                        the files that have occurred during transfer can
+                        be detected and any bad files retransmitted.
+
+                    o   VLC Streaming
+                        Enable/disable access to VLC file streaming
+                        functionality - when enabled, users will be able
+                        to stream media directly from the webserver
+                        using VLC.
+
+                    o   VLC Port
+                        Specify the port to use for VLC streaming.
+
+                    o   File And Folder Renaming
+                        Enable/disable access for users to rename files
+                        and folders.
+
+                        When enabled, an 'Rename' icon is displayed next
+                        to all files and directories in the dir manager
+                        to allow users to rename files.
+
+                        Note:
+                        -----
+                        This option allows users to rename files *only
+                        within the torrentflux-b4rt dir manager*.  To
+                        move files away from the torrentflux-b4rt dir
+                        manager directory structure, enable the 'File And
+                        Folder Moving' option below.
+
+                    o   File And Folder Moving
+                        Enable/disable access for users to move files
+                        and folders out of the torrentflux-b4rt
+                        directory hierarchy - when enabled, users will
+                        be allowed to move files and folders out of the
+                        torrentflux-b4rt directory hierarchy into a
+                        choice of folders elsewhere on the server.
+
+                        When enabled, an 'Move' icon is displayed next
+                        to all files and directories in the dir manager
+                        to allow users to move files out of the
+                        torrentflux-b4rt directory hierarchy elsewhere
+                        on the host server.
+
+                        This option is ideal for allowing access to
+                        files and folders for normal Unix users on the
+                        server hosting torrentflux-b4rt who don't have
+                        permission to access the torrentflux-b4rt
+                        directory hierarchy.
+
+                        Once transfers are complete, files can be moved
+                        away from the torrentflux-b4rt directory where
+                        they can then be accessed via FTP, Samba or any
+                        other suitable remote access protocol.
+
+                    o   Valid File And Folder Moving Paths
+                        Specify the paths to which users are allowed to
+                        move files and folders outside of the
+                        torrentflux-b4rt directory hierarchy.
+
+                        List all of the folders *outside of the
+                        torrentflux-b4rt directory structure* that you
+                        want torrentflux-b4rt users to be allowed to
+                        move files and folders to.
+
+                        This option is redundant without enabling the
+                        'File And Folder Moving' functionality above.
+
+                        Note:
+                        -----
+                        Paths must be absolute - ie
+                        '/path/to/somefolder' instead of just
+                        'somefolder'.
+
+                        Any newly created directories will not be
+                        deleted after removing a transfer from the
+                        transfer list.
+
+                        Ensure that the target directories have
+                        sufficient permissions for the webserver user to
+                        create files there.
+
+                    o   Restricted Files And Folders
+                        Specify a list of patterns which, when found in
+                        a file or folder name, will not be allowed
+                        access to by users.
+
+                        This option is useful if you have any files that
+                        you don't want users to be able to see within
+                        the dir manager.
+
+                        Note:
+                        -----
+                        Access to dot files/folders - files/folders
+                        starting with a dot - are always restricted.
+
+            Configuring stats.php output options (stats)
+
+                Overview
+
+					The stats.php script displays statistics for various
+					aspects of torrentflux-b4rt in a number of different
+					output formats.  For more information on using
+					stats.php, see [TODO_LINK:'appendix, Obtaining
+					statistics using stats.php'].
+
+					The operation of the stats.php script can be
+					configured to your requirements - these
+					configuration options are detailed in this section.
+
+                Configuration
+
+					The configuration options for the 'stats' tab on the
+					admin pages - for configuration of the stats.php
+					script - are as follows:
+
+					o	Publicly Visible Stats
+						Enable/disable publicly visible statistics - if
+						enabled, superadmin level stats (server load,
+						all transfer details, etc) are visible to the
+						public without needing to authenticate.
+
+						This option allows the stats.php script to be
+						requested in a web browser and the output sent
+						without requiring any user authentication -
+						username/password credentials are not required
+						to access stats output.
+
+						Warning:
+						--------
+						With this option enabled, full server statistics
+						will be displayed to anyone who requests them.
+						As a result this should be considered a security
+						risk - only enable this option if you understand
+						and accept that this will reveal server loads,
+						details of transfers, usernames and their online
+						status and so on.  See the [TODO_LINK:'stats.php
+						usage'] for more info of all information
+						revealed.
+
+					o	Show Usage When Called Without Arguments
+						Enable/disable the option to display usage/help
+						when the stats.php script is called without any
+						arguments in the URL.
+
+						This option displays the standard usage/help
+						output when called without any arguments.
+
+						When this option is disabled, the default action
+						to perform when stats.php is called without
+						arguments will be to display *all* statistics -
+						ie as if the stats.php script was called with
+						the argument 't=all'.
+
+					o	Use Compression
+						Enable/disable the use of compression by default
+						when sending statistics - using compression uses
+						less bandwidth but takes up more CPU time on the
+						server
+
+						By default compression is not used to send stats
+						out from stats.php, although compression can be
+						used by manually specifying the argument 'c=1'
+						in the request URL to stats.php.
+
+						This option specifies that stats.php will use
+						compression by default - setting 'c=1' is not
+						required in this case.
+
+						Note:
+						-----
+						The compression level used is specified in the
+						'Compression Level' option below.
+
+						Using compression will add some additional
+						load to the hosting server in terms of CPU
+						resource usage.  In most cases this additional
+						load will be negligible - the benefits of
+						reducing bandwidth consumption by using
+						compression should be weighed up against the
+						cost of additional server load introduced.
+
+					o	Compression Level
+						Specify the level of compression to use when
+						transferring data.
+
+						Level 0 = no compression (data is sent out 'as
+						is'), level 9 = maximum compression (data is
+						compressed as small as possible before sending
+						but takes up some minor extra CPU time to
+						compress).
+
+						This option determines the level of compression
+						to use *when the stats.php script is called with
+						the argument 'c=1' or compression is enabled on
+						the stats admin page*.
+
+						Compression levels run from 0 (lowest) to 9
+						(highest) - ie 0 will use no compression and 9
+						will use the maximum compression possible.
+
+						The compression is applied using PHP's inbuilt
+						'[TODO_LINK:@http://php.net/gzdeflate
+						gzdeflate]' function.
+
+					o	Delimiting Character To Use In TXT Mode
+						Specify the character to use for
+						delimiting/separating fields when the format
+						type is 'txt' - this character will be used to
+						split one item in the output from the next.
+
+						By default the ';' character is used to separate
+						fields in the output, ie:
+
+						0.00;0.00;0.00;1;12.37 GB; 1.25, 1.03, 0.56;1;0;0;0;87
+
+						where ';' marks the end of one field and the
+						start of another.
+
+						The field separator can be changed using this
+						option.
+
+					o	Send Header Fields In TXT Mode
+						Enable/disable sending of header fields at the
+						top of text mode output - when enabled, each of
+						the statistics column headers will be sent out
+						at the top of the output.
+
+						By default when using txt output format mode
+						(f=txt), the output will be sent raw without any
+						column headers to indicate what each field of
+						output relates to, ie:
+
+						0.00;0.00;0.00;1;12.37 GB; 1.25, 1.03, 0.56;1;0;0;0;87
+
+						With this option enabled, stats.php will also
+						send out column headers by default whenever txt
+						format is used, ie:
+
+						Speed Down;Speed Up;Speed Total;Connections;Free Space;Load;Running;Queued;Speed Down (Percent);Speed Up (Percent);Drive Space (Percent)
+						0.00;0.00;0.00;1;12.37 GB; 1.25, 1.03, 0.56;1;0;0;0;87
+
+						This makes it easier to see what values
+						represent what type of statistical information.
+
+						Note:
+						-----
+						Only used in txt mode.
+
+					o	Default Statistic Type To Send
+						Specify the default type of statistic to send
+						out.
+
+						The default statistic type sent out is 'all'.
+						This option allows you to change this to send
+						out a different type of statistic by default.
+
+						The options to choose from are: all, xfer,
+						users, transfers.
+
+						For more info on what is included in the output
+						for each type of statistic, see
+						[TODO_LINK:'the appendix section on using
+						stats.php].
+
+					o	Default Output Format
+						Specify the default output format to use when
+						sending statistics - rss/txt/custom xml.
+
+						By default the format used when sending
+						stats.php info out is xml.  This option allows
+						you to change the default format to use.
+
+						The option to choose from are: rss, txt, xml.
+
+						For more info on each type of output format, see
+						[TODO_LINK:'the appendix section on using
+						stats.php].
+
+					o	Send As Attachment
+						Enable/disable sending of statistics as an HTTP
+						attachment - browser client will prompt to save
+						the attachment as a file.
+
+						By default, stats.php will display output
+						directly in the user agent as long as the agent
+						is configured to do so correctly.  This option
+						allows you to specify that the stats.php output
+						will be sent to the user agent as an HTTP
+						attachment - in most cases the user agent will
+						then prompt the user to download the file and
+						save it locally before viewing it.
+
+            Fluxd control and configuration (fluxd)
+                Controlling fluxd (start/stop/restart)
+                Setting the database mode to use
+                Setting the log level to use
+
+            Transfer statistics configuration (xfer)
+                Enabling the display of transfer stats
+                Updating transfer stats in real time
+                Allowing all users to view all other users stats
+                (Re)setting the transfer stats
+                Setting the first day of the week
+                Setting the month day on which a month starts
+
+            Search engine configuration (search)
+                Checking for search engine updates
+                Filtering out unwanted categories from search results
+
+            Website link list management (links)
+                Adding, editing, moving and deleting link items
+
+            RSS Feed list configuration (RSS)
+                Adding, editing and deleting RSS feeds
+
+            Activity / Audit / Event viewer (activity)
+                Filtering items listed in the activity panel
+
+        Appendices
+
+			Guide to automatically fetching and uploading RSS torrents to torrentflux-b4rt using fluxd
+
+				Overview
+
+					This guide describes the steps required to configure
+					the fluxd daemon to automatically check RSS torrent
+					feeds for torrents matching one or more specified
+                    patterns.  When a matching torrent is found it is
+                    automatically downloaded to a directory.
+
+					In turn, the fluxd daemon is configured to watch
+					this auto download directory for new torrents.  When
+					a new torrent is found in the watch directory, the
+					torrent is automatically injected into
+					torrentflux-b4rt and optionally started
+					automatically.
+
+					The guide will describe the steps required to:
+
+                    TODO_LINK: add links to each of the sections below:
+                    *	Configure the fluxd Rssad module to check RSS
+                        feeds for new torrents matching a filter and
+                        download them to a watch directory.
+
+                    *   Configure the fluxd Watch module to check the
+                        watch directory for new torrents and
+                        automatically inject them into torrentflux-b4rt.
+
+                    *   Configure the fluxd core and finally start
+                        fluxd.
+
+                    Important:
+                    ----------
+                    The RSS feeds *MUST* be 'direct download' feeds -
+                    each item in the RSS feed must contain a direct link
+                    to the torrent file itself and not just a link to a
+                    description page for the torrent.
+
+                    Error messages about 'URL does not appear to contain
+                    a valid torrent file' or similar are often because
+                    the RSS feed is not a direct download feed.
+
+				Configuring the Rssad fluxd module to download torrents
+
+                    Rssad configuration is split into three parts -
+                    filter creation, job creation and finally setting
+                    up the Rssad module to load with fluxd.  These steps
+                    are covered below:
+
+                    *   Filter creation
+                        A filter is a list of patterns that Rssad uses
+                        in a job to find and download new torrents
+                        within RSS feeds.
+
+                        As an example, you may have a filter called 'OS'
+                        which contains a number of patterns to match
+                        your favourite operating systems within RSS
+                        feed(s).  When any of the patterns in the filter
+                        are matched, Rssad will download the torrents
+                        automatically.
+
+                        One filter can be applied to any number of RSS
+                        feeds, making it easy to search for the same
+                        patterns on a number of different trackers.
+
+                        Tip:
+                        ----
+                        Rssad filters are applied using the PHP function
+                        preg_match() and are treated almost the same as
+                        Perl Compatible Regular Expressions (PCRE), so
+                        advanced regular expression metacharacter search
+                        patterns can be used.
+
+                        Note:
+                        -----
+                        Filter patterns are case insensitive, so the
+                        pattern 'foobar' will match 'foobar', 'FOOBAR'
+                        and 'FoOBaR'.
+
+                        TODO_LINK:See the example filter section below
+                        for practical examples.
+
+                        To create a new filter:
+
+                        *   On the 'fluxd' tab of the admin pages, click
+                            the link labelled 'Configure Rssad Filters
+                            And Jobs' in the Rssad section.
+
+                        *   In the 'Name:' textbox on the 'Configure
+                            Rssad Filters and Jobs' page, enter the
+                            name you want to refer to the filter by.
+
+                            The filter name is used to identify one type
+                            of filter from another and should be
+                            easily identifiable for you.  For example,
+                            you might have a filter called 'Films' which
+                            contains all the patterns of upcoming films
+                            you want to keep a watch out for.
+
+                        *   Click 'Add Filter' - you will be taken to
+                            a page where you can add the patterns you
+                            want to search for.
+
+                        *   On the 'Add Filter' screen, enter the first
+                            pattern you want to search for in the 'Add
+                            Filter-Entry' textbox and click 'add' to add
+                            the pattern to the 'Filter-Entries' list.
+
+                            Repeat this step for each pattern you want
+                            this filter to search for.
+
+                        *   When all the patterns have been added to the
+                            filter entries list, click the 'Add Filter'
+                            button to save the filter.
+
+                        The filter will now be added to the list of
+                        filters available for use in Rssad jobs.
+
+                    *   Job creation
+                        With the filter added, you can now move on to
+                        use it in an Rssad job.
+
+                        An Rssad job is composed of three components:
+
+                        *   A filter - comprising of one or more
+                            patterns to search for (see above).
+
+                        *   An RSS feed URL to apply the filter to.
+
+                        *   A directory in which to download any torrent
+                            files to if a match is made for the pattern
+                            against the RSS feed.
+
+                        To create a new Rssad job:
+
+                        *   On the 'fluxd' tab of the admin pages, click
+                            the link labelled 'Configure Rssad Filters
+                            And Jobs' in the Rssad section.
+
+                        *   Click on the 'Add Job' button on the
+                            'Configure Rssad Filters And Jobs' to open
+                            up the Rssad job configuration page.
+
+                        *   On the 'Add Job' screen, enter or select the
+                            following:
+
+                            *   In the 'Savedir' textbox, enter in the
+                                directory you want any found torrents to
+                                be saved to.
+
+                                The directory path should be absolute -
+                                ie '/path/to/savefolder' and not just
+                                'savefolder'.
+
+                            *   Check the 'Check/Create dir' checkbox if
+                                you want the 'Savedir' directory to be
+                                created automatically if it doesn't
+                                already exist.
+
+                            *   In the 'URL' textbox, enter in the URL
+                                of the direct download torrent RSS feed
+                                you want to search in.
+
+                                Important:
+                                ----------
+                                Remember, the RSS feed must be a direct
+                                download feed - ie the links in the feed
+                                are links to the torrent files
+                                themselves and not just to a description
+                                of the torrents.
+
+                                Verify this is the case before
+                                continuing to save headaches later!
+
+                            *   Finally, select the filter you want to
+                                apply to the RSS feed from the 'Filter'
+                                dropdown select field.  This filter will
+                                be used to search for the patterns you
+                                defined in the feed URL.
+
+                        *   Finally, click the 'Add Job' button to add
+                            the job to the list of jobs that Rssad will
+                            run.
+
+                    *   Configuring the Rssad module to load up in Fluxd
+                        Now the Rssad job has been added to Fluxd, all
+                        that remains is to configure Fluxd to load the
+                        Rssad module when it starts up and tell it how
+                        often to run the job.
+
+                        *   On the 'fluxd' admin page in the Rssad
+                            section, set 'Rssad Enabled:' to 'True'.
+
+                            This will make Fluxd load up Rssad when it
+                            starts.
+
+                        *   Change the 'Rssad Polling Interval:' to the
+                            time in seconds you want to wait between
+                            Rssad job runs.
+
+                            Important:
+                            ----------
+                            Be careful when setting this value as some
+                            trackers may ban you if you request their
+                            RSS feeds too frequently.  If in doubt, ask
+                            the administrators of the tracker how often
+                            you can check their feeds for new items.
+
+                        Fluxd is now configured to run the Rssad job.
+
+                        Note:
+                        -----
+                        If fluxd is already running, you may need to
+                        stop and restart it for the changes to take
+                        effect.
+
+                    With the steps above, Fluxd is now ready to search
+                    your feed URL for the filter patterns you specified.
+                    If it finds any matching torrents in the feed, they
+                    will be downloaded and saved to the directory you
+                    set up in the job specification.
+
+                    If you are happy to just manually check the save
+                    directory for new torrents and inject them into
+                    torrentflux-b4rt manually, you can skip the next
+                    section on configuring the Watch module.
+
+                    However, if you want to use the Watch module to keep
+                    an eye on the save directory and automatically
+                    inject any new torrents it finds there - carry on
+                    reading!
+
+				Configuring the Watch fluxd module to inject torrents into torrentflux-b4rt
+
+                    The Watch fluxd module can check a specified
+                    directory at regular intervals and if any new
+                    transfer files are found in that directory it will
+                    automatically inject the transfer into
+                    torrentflux-b4rt and optionally automatically start
+                    it.
+
+                    This is ideal when used in combination with the
+                    Rssad module; Rssad can automatically download any
+                    torrents found in an RSS feed into a directory that
+                    is being watched by the Watch module and in turn the
+                    Watch module can auto inject and optionally start
+                    the torrent for you.
+
+                    Watch configuration is split into two parts - watch
+                    job creation and configuring the Watch module to
+                    load up with fluxd:
+
+                    *   Configure Watch Jobs
+                        To configure a watch job:
+
+                        *   On the 'fluxd' tab of the admin pages, click
+                            the link labelled 'Configure Watch Jobs' in
+                            the 'Watch' module section.
+
+                        *   On the 'Configure Watch Jobs' screen, click
+                            the 'Add Job' button.
+
+                        *   On the 'Add Job' screen, enter or select the
+                            following items:
+
+                            *   Watchdir - this is the directory that
+                                you want the Watch module to keep an eye
+                                on for new transfer files.
+
+                                The directory path should be absolute -
+                                ie '/path/to/savefolder' and not just
+                                'savefolder'.
+
+                            *   Check the 'Check/Create dir' checkbox if
+                                you want the 'Watchdir' directory to be
+                                created automatically if it doesn't
+                                already exist.
+
+                            *   User - select the user you want to be
+                                owner of any transfer files injected by
+                                this job.
+
+                            *   Transfer Profile - select the transfer
+                                profile that will apply to any transfers
+                                injected by this job.
+
+                                TODO_LINK: See the section on transfer
+                                profiles in the user ops section for
+                                more info.
+
+                        *   Finally, click on the 'Add Job' button to
+                            save the job.
+
+
+                    *   Configuring the Watch module to load with Fluxd
+                        Watch is now configured to watch the directory
+                        you specified for new transfer files and
+                        automatically inject them as the user you
+                        specified and with the transfer profile you set.
+
+                        All that remains is to have the Watch module
+                        load up with fluxd below:
+
+                        *   On the 'fluxd' admin page under the 'Watch'
+                            section, set 'Watch Enabled:' to 'True'.
+
+                            This will make Fluxd load up Watch when it
+                            starts.
+
+                        *   Change the 'Watch Polling Interval:' to the
+                            time in seconds you want to wait between
+                            Watch job runs - ie if you set this to '120'
+                            then Fluxd/Watch will check every 2 minutes
+                            for new transfer files and if it finds any,
+                            auto inject them into torrentflux-b4rt.
+
+                        Fluxd is now configured to run the Watch job.
+
+                        Note:
+                        -----
+                        If fluxd is already running, you may need to
+                        stop and restart it for the changes to take
+                        effect.
+
+                    With the steps above, Fluxd is now ready to watch
+                    the directory you specified for new transfer files
+                    and auto inject them.
+
+                    Watch is ideally used in conjunction with Rssad to
+                    search for torrents, download them and drop them
+                    into a directory that the Watch module is watching -
+                    from where Watch can then auto inject the torrent.
+
+                    However Watch can be used on it's own to monitor
+                    directories for new transfer files - for example you
+                    might always download your torrent files into a
+                    certain directory and then have Watch auto inject
+                    them.
+
+				Starting fluxd
+
+                    Finally after the Fluxd modules have been configured
+                    and loaded above, you can now actually start up the
+                    Fluxd daemon.  The daemon will autoload all the
+                    modules you configured and the jobs will run at
+                    their scheduled intervals.
+
+                    To start Fluxd:
+
+                    *   On the 'fluxd' admin page, modify any 'core'
+                        options as required:
+
+                        *   Database Mode - by default this is set to
+                            PHP + fluxcli.php but 'Perl + DBI/DBD' can
+                            be used alternatively.
+
+                            Note:
+                            -----
+                            Running in perl mode is MUCH faster than
+                            PHP.  However, you may need to install
+                            additional perl modules to run in Perl mode
+                            - check the 'check requirements' tab on the
+                            superadmin pages to see if your installation
+                            meets the requirements (TODO_LINK).
+
+                        *   Loglevel - this only needs changing if you
+                            experience problems and need to debug or
+                            troubleshoot fluxd operation.  The default
+                            loglevel is '0' (which doesn't mean 'no
+                            output', just 'normal' level of output!).
+
+                            Higher levels of loglevel increase the
+                            verbosity of fluxd output.
+
+                    *   Finally, click on the 'Start fluxd' button to
+                        start the daemon in the background.
+
+                    The status light should change to green with the
+                    message 'fluxd Running' along with the process id of
+                    the fluxd process.
+
+                Example Rssad filter patterns
+
+                    This section contains a few examples of Rssad
+                    filters - regular expressions - and examples of what
+                    each filter will match.  Notes are provided for each
+                    example.
+
+                    *   Pattern: ^foobar.*'
+
+                        Matches: foobar foOBAr foobarblimp foobar...
+
+                    *   Pattern: foobar.*s07.*
+
+                        Matches: foobarblimpS07E01 foobarblimpS07E99
+                        foobarblimpS07Complete
+
+                        Note:
+                        Useful for matching anything from season
+                        07 of the 'foobar' show.
+
+                    *   Pattern: foobar.*s0[1-3].*
+
+                        Matches: foobarS01E01 foobarS02E01 foobarS03E01
+
+                        Note: Useful for matching anything from seasons
+                        01 through 03 of the 'foobar' show.
+
+                    *   Pattern: foobar.*s01e0[1-9].*
+
+                        Matches: foobarS01E01 foobarS01E02 foobarS01E09
+
+                        Note:
+                        Useful for matching anything from season 01,
+                        episodes 01 through 09.
+
+                    *   Pattern: (?!toast)foobar.*
+
+                        Does *NOT* match: foobartoast toastfoobar
+                        foobarblimptoast
+
+                        Note:
+                        Useful for matching something that does not
+                        contain a certain string.  The patterns above
+                        will only match if the string does not contain
+                        'toast' in it.
+
+            Using fluxcli.php on the command-line
+                Running fluxcli.php from a cron job to auto fetch RSS feed items
+
+            Obtaining statistics using stats.php
+
+                Overview
+
+                    Torrentflux-b4rt can output a variety of detailed
+                    statistical information using TODO_LINK:[ link to
+                    stats.php script, but only for the distribution html
+                    manual NOT for the tfb main website version of
+                    manual - is this possible with quickbook? 'the
+                    stats.php script'].
+
+                    The stats can be displayed in various formats
+                    suitable for different usage purposes - plain text
+                    for viewing/usage on the commandline or in a web
+                    browser, RSS for reading in an RSS feed reader for
+                    regular updates on the torrentflux-b4rt installation
+                    or even customized XML formats for
+                    specialised/custom use.
+
+                    To view detailed usage instructions, call the
+                    stats.php script with the single argument 'usage':
+
+                    http://example.com/stats.php?usage
+
+                Specifying the type of output to display
+
+                    The type of output displayed by the stats.php script
+                    is determined by the 't' argument used in the query
+                    string of the request URL.
+
+                    For example, to display statistics relating to the
+                    server you would browse to the following page:
+
+                    http://example.com/torrentflux-b4rt/stats.php?t=server
+
+                    The valid options for the 't' argument are:
+
+                    o   all
+                        Displays all types of statistics listed below -
+                        server, xfer, users, transfers.
+
+                        http://example.com/stats.php?t=all
+
+                    o   server
+                        Displays current statistics about the
+                        torrentflux-b4rt server, including:
+
+                        *   speedDown
+                            The total download bandwidth being utilised.
+
+                        *   speedUp
+                            The total upload bandwidth being utilised.
+
+                        *   speedTotal
+                            The total bandwidth being utilised
+                            (upload and download combined).
+
+                        *   cons
+                            The total number of network connections.
+
+                        *   freeSpace
+                            The available disk space left in MB/Gb.
+
+                        *   loadavg
+                            The server load average.
+
+                        *   running
+                            The number of transfers current in the
+                            'running' state.
+
+                        *   queued
+                            The number of transfers current in the
+                            'queued' state.
+
+                        *   speedDownPercent
+                            Download speed as a percentage of maximum
+                            available download bandwidth.
+
+                        *   speedUpPercent
+                            Upload speed as a percentage of maximum
+                            available upload bandwidth.
+
+                        *   driveSpacePercent
+                            Available drive space remaining as a
+                            percentage of total drive space.
+
+                        http://example.com/stats.php?t=server
+
+                    o   xfer
+                        Displays data transfer statistics, including:
+
+                        *   xferGlobalTotal
+                            The global transfer total (all users).
+
+                        *   xferGlobalMonth
+                            The current month's global transfer total.
+
+                        *   xferGlobalWeek
+                            The current week's global transfer total.
+
+                        *   xferGlobalDay
+                            The current day's global transfer total.
+
+                        *   xferUserTotal
+                            The current user's transfer total.
+
+                        *   xferUserMonth
+                            The current user's transfer total for the
+                            current month.
+
+                        *   xferUserWeek
+                            The current user's transfer total for the
+                            current week.
+
+                        *   xferUserDay
+                            The current user's transfer total for the
+                            current day.
+
+                        http://example.com/stats.php?t=xfer
+
+                    o   users
+                        Displays the current online status for each
+                        torrentflux-b4rt user.
+
+                        http://example.com/stats.php?t=users
+
+                    o   transfers
+                        Displays statistics for all transfers currently
+                        being handled by torrentflux-b4rt.
+
+                        The statistics displayed for each transfer include:
+
+                        *   running
+                            Whether the transfer is running or not.
+
+                        *   speedDown
+                            The current download speed of the transfer.
+
+                        *   speedUp
+                            The current upload speed of the transfer.
+
+                        *   percentDone
+                            The percentage of the transfer currently
+                            completed.
+
+                        *   sharing
+                            The share ratio for the current transfer as
+                            a percentage.
+
+                        *   eta
+                            The estimated time until the transfer is
+                            complete.
+
+                        *   seeds
+                            The number of seeds currently connected.
+
+                        *   peers
+                            The number of peers currently connected.
+
+                        *   downCurrent
+                            The current download bandwidth being
+                            utilised for the transfer.
+
+                        *   upCurrent
+                            The current upload bandwidth being utilised
+                            for the transfer.
+
+                        The following statistics are also displayed for
+                        the 'transfers' argument:
+
+                        *   downTotal
+                            The current total download bandwidth being
+                            utilised.
+
+                        *   upTotal
+                            The current total upload bandwidth being
+                            utilised.
+
+                        *   cons
+                            The current total number of network
+                            connections.
+
+                        http://example.com/stats.php?t=transfers
+
+                    o   transfer
+                        Displays statistics for a single transfer
+                        specified using the 'i' argument.
+
+                        http://example.com/stats.php?t=transfer&i=foobar.torrent
+
+                Specifying the format of the stats.php output
+
+                    The type of format to use when displaying output
+                    from the stats.php script is determined by the 'f'
+                    argument used in the query string of the request
+                    URL.
+
+                    For example, to display statistics relating to the
+                    server in RSS format, you would browse to the
+                    following page:
+
+                    http://example.com/torrentflux-b4rt/stats.php?t=server&f=rss
+
+                    The valid options for the 'f' argument are:
+
+                    o   xml
+                        Displays output in XML format.
+
+                    o   rss
+                        Displays output in RSS 0.91 format.
+
+                        Suitable for receiving regular updates in an RSS
+                        feed reader.
+
+                    o   txt
+                        Displays output in plain text format.
+
+                        Suitable for parsing with custom command line
+                        scripts.
+
+				Sending column headers in text mode stats.php output
+
+					The 'h' argument to the stats.php specifies that
+					header columns should be sent out with the output
+					from the stats.php script (only in text format).
+
+					For example, the plain text output generated with
+					the request URL:
+
+                    http://example.com/torrentflux-b4rt/stats.php?t=server&f=txt
+
+					might look like this:
+
+					0.00;0.00;0.00;1;12.37 GB; 1.25, 1.03, 0.56;1;0;0;0;87
+
+					which is just the raw server statistics.
+
+					Instead if we add on the argument 'h=1' to indicate
+					we want column headers displayed as well - so the
+					request URL looks like this:
+
+                    http://example.com/torrentflux-b4rt/stats.php?t=server&f=txt&h=1
+
+					then the output displayed will look like this:
+
+					Speed Down;Speed Up;Speed Total;Connections;Free Space;Load;Running;Queued;Speed Down (Percent);Speed Up (Percent);Drive Space (Percent)
+					0.00;0.00;0.00;1;12.37 GB; 1.25, 1.03, 0.56;1;0;0;0;87
+
+					This makes it easier to tell what each field/value
+					of the output represents.
+
+					Tip:
+					----
+
+					The field separator - ';' in the example above - can
+					be changed on [TODO_LINK:admin.stats 'the Admin Ops,
+					Configuring stats.php page'].
+
+					The stats.php script can also be configured to send
+					out header columns by default - again on
+					[TODO_LINK:admin.stats 'the Admin Ops, Configuring
+					stats.php page'].
+
+				Sending stats.php output as HTTP file attachments
+
+					The 'a' argument to the stats.php request URL allows
+					you to specify that you want the output to be sent
+					as an HTTP attachment.  This has the effect in most
+					web browsers of prompting the user for a location to
+					download the file to or open the file up in an
+					associated application if MIME types are configured
+					correctly.
+
+					To have output sent as an attachment, use 'a=1' as
+					an argument to the stats.php request URL - for
+					example:
+
+                    http://example.com/torrentflux-b4rt/stats.php?t=server&a=1
+
+					This request URL would send out the server
+					statistics as an HTTP attachment, prompting the user
+					to save the file to disk or open it in an external
+					application.
+
+				Sending compressed stats.php output
+
+					The 'c' argument to the stats.php request URL allows
+					you to specify that the output from stats.php should
+					be compressed when sending.  Most modern web
+					browsers automatically 'deflate' the compressed
+					content on receipt so it's human readable.
+
+					Compression saves bandwidth at the cost of adding
+					some small additional CPU resource cost on the
+					server.
+
+					To have output compressed when sending, use 'c=1' as
+					an argument to the stats.php request URL - for
+					example:
+
+                    http://example.com/torrentflux-b4rt/stats.php?t=server&c=1
+
+					This request URL would compress the server
+					statistics output before sending it out.
+
+					Tip:
+					----
+					The level of compression to use can be configured on
+					the [TODO_LINK:admin.stats 'the Admin Ops,
+					Configuring stats.php page'].
+
+                Configuring stats.php
+
+                    A number of options can be configured to control the
+                    way the stats.php script operates.  Refer to the
+                    [TODO_LINK:admin.stats 'Admin Ops, configuring
+                    stats.php section of the manual.]

+ 5 - 0
html/.buildpath

@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<buildpath>
+	<buildpathentry kind="src" path=""/>
+	<buildpathentry kind="con" path="org.eclipse.php.core.LANGUAGE"/>
+</buildpath>

+ 124 - 0
html/admin.php

@@ -0,0 +1,124 @@
+<?php
+
+/* $Id: admin.php 3057 2007-05-27 13:06:39Z b4rt $ */
+
+/*******************************************************************************
+
+ LICENSE
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License (GPL)
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ To read the license please visit http://www.gnu.org/copyleft/gpl.html
+
+*******************************************************************************/
+
+// main.internal
+require_once("inc/main.internal.php");
+
+// common functions
+require_once('inc/functions/functions.common.php');
+
+// admin functions
+require_once('inc/functions/functions.admin.php');
+
+// access-check
+if ((!isset($cfg['isAdmin'])) || (!$cfg['isAdmin'])) {
+	 // the user probably hit this page direct
+	AuditAction($cfg["constants"]["access_denied"], "ILLEGAL ACCESS: No Admin");
+	@header("location: index.php?iid=index");
+}
+
+// op-arg
+$op = (isset($_REQUEST['op'])) ? tfb_getRequestVar('op') : "default";
+
+// check arg
+if (!preg_match('/^[a-zA-Z]+$/D', $op)) {
+	AuditAction($cfg["constants"]["error"], "INVALID ADMIN-OP : ".$op);
+	@error("Invalid Admin-Op", "admin.php", "Admin", array($op));
+}
+
+// op-switch
+switch ($op) {
+
+	case "updateServerSettings":
+		admin_updateServerSettings();
+
+	case "updateTransferSettings":
+		admin_updateTransferSettings();
+
+	case "updateWebappSettings":
+		admin_updateWebappSettings();
+
+	case "updateIndexSettings":
+		admin_updateIndexSettings();
+
+	case "updateControlSettings":
+		admin_updateControlSettings();
+
+	case "updateDirSettings":
+		admin_updateDirSettings();
+
+	case "updateStatsSettings":
+		admin_updateStatsSettings();
+
+	case "updateXferSettings":
+		admin_updateXferSettings();
+
+	case "updateFluxdSettings":
+		admin_updateFluxdSettings();
+
+	case "controlFluxd":
+		admin_controlFluxd();
+
+	case "controlFluAzu":
+		admin_controlFluAzu();
+
+	case "updateFluAzuSettings":
+		admin_updateFluAzuSettings();
+
+	case "updateAzureusSettings":
+		admin_updateAzureusSettings();
+
+	case "updateSearchSettings":
+		admin_updateSearchSettings();
+
+	case "addLink":
+		admin_addLink();
+
+	case "editLink":
+		admin_editLink();
+
+	case "moveLink":
+		admin_moveLink();
+
+	case "deleteLink":
+		admin_deleteLink();
+
+	case "addRSS":
+		admin_addRSS();
+
+	case "deleteRSS":
+		admin_deleteRSS();
+
+	case "deleteUser":
+		admin_deleteUser();
+
+	case "setUserState":
+		admin_setUserState();
+
+	default:
+		// set iid-var
+		$_REQUEST["iid"] = "admin";
+		// include page
+		@require_once("inc/iid/admin/".$op.".php");
+}
+
+?>

+ 2 - 0
html/bin/.htaccess

@@ -0,0 +1,2 @@
+Order deny,allow
+Deny from all

+ 243 - 0
html/bin/check/check-cli.php

@@ -0,0 +1,243 @@
+#!/usr/bin/env php
+<?php
+
+/* $Id: check-cli.php 3081 2007-06-08 19:08:42Z warion $ */
+
+/*******************************************************************************
+
+ LICENSE
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License (GPL)
+ as published by the Free Software Foundation; either version 2
+ of the License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ To read the license please visit http://www.gnu.org/copyleft/gpl.html
+
+*******************************************************************************/
+
+// prevent invocation from web
+if (empty($argv[0])) die();
+if (isset($_REQUEST['argv'])) die();
+
+/******************************************************************************/
+
+// defines
+define('_NAME', 'torrentflux-b4rt 1.0');
+preg_match('|.* (\d+) .*|', '$Revision: 3081 $', $revisionMatches);
+define('_REVISION', $revisionMatches[1]);
+define('_TITLE', _NAME.' - check-cli - Revision '._REVISION);
+
+// fields
+$errors = 0;
+$warnings = 0;
+$dbsupported = 0;
+$errorsMessages = array();
+$warningsMessages = array();
+
+// -----------------------------------------------------------------------------
+// Main
+// -----------------------------------------------------------------------------
+
+// title
+echo _TITLE."\n";
+
+// PHP-Version
+echo '1. PHP-Version'."\n";
+
+// version
+$phpVersion = 'PHP-Version : '.PHP_VERSION.' ';
+if (PHP_VERSION < 4.3) {
+	$phpVersion .= 'Failed';
+	$errors++;
+	array_push($errorsMessages, "PHP-Version : 4.3 or higher required.");
+} else {
+	$phpVersion .= 'Passed';
+}
+echo $phpVersion."\n";
+// cli
+$phpcli = 'PHP-SAPI : ';
+$phpsapi = php_sapi_name();
+if ($phpsapi != 'cli') {
+	$phpcli .= $phpsapi . ' Failed';
+	$errors++;
+	array_push($errorsMessages, "PHP-SAPI : CLI version of PHP required.");
+} else {
+	$phpcli .= $phpsapi . ' Passed';
+}
+echo $phpcli."\n";
+
+// PHP-Extensions
+echo '2. PHP-Extensions'."\n";
+
+$loadedExtensions = get_loaded_extensions();
+// pcre
+$pcre = 'pcre ';
+if (in_array("pcre", $loadedExtensions)) {
+	$pcre .= 'Passed';
+} else {
+	$pcre .= 'Failed';
+	$errors++;
+	array_push($errorsMessages, "PHP-Extensions : pcre required.");
+}
+echo $pcre."\n";
+// sockets
+$sockets = 'sockets ';
+if (in_array("sockets", $loadedExtensions)) {
+	$sockets .= 'Passed';
+} else {
+	$sockets .= 'Failed';
+	$warnings++;
+	array_push($warningsMessages, "PHP-Extensions : sockets required for communication with fluxd. fluxd cannot work without sockets.");
+}
+echo $sockets."\n";
+//
+
+// PHP-Configuration
+echo '3. PHP-Configuration'."\n";
+
+// safe_mode
+$safe_mode = 'safe_mode ';
+if ((ini_get("safe_mode")) == 0) {
+	$safe_mode .= 'Passed';
+} else {
+	$safe_mode .= 'Failed';
+	$errors++;
+	array_push($errorsMessages, "PHP-Configuration : safe_mode must be turned off.");
+}
+echo $safe_mode."\n";
+// allow_url_fopen
+$allow_url_fopen = 'allow_url_fopen ';
+if ((ini_get("allow_url_fopen")) == 1) {
+	$allow_url_fopen .= 'Passed';
+} else {
+	$allow_url_fopen .= 'Failed';
+	array_push($warningsMessages, "PHP-Configuration : allow_url_fopen must be turned on. some features wont work if it is turned off.");
+	$warnings++;
+}
+echo $allow_url_fopen."\n";
+// register_globals
+$register_globals = 'register_globals ';
+if ((ini_get("register_globals")) == 0) {
+	$register_globals .= 'Passed';
+} else {
+	$register_globals .= 'Failed';
+	$errors++;
+	array_push($errorsMessages, "PHP-Configuration : register_globals must be turned off.");
+}
+echo $register_globals."\n";
+//
+
+// PHP-Database-Support
+echo '4. PHP-Database-Support'."\n";
+
+// define valid db-types
+$databaseTypes = array();
+$databaseTypes['mysql'] = 'mysql_connect';
+$databaseTypes['mysqli'] = 'mysqli_connect';
+$databaseTypes['sqlite'] = 'sqlite_open';
+$databaseTypes['postgres'] = 'pg_connect';
+// test db-types
+foreach ($databaseTypes as $databaseTypeName => $databaseTypeFunction) {
+	$dbtest = $databaseTypeName.' ';
+	if (function_exists($databaseTypeFunction)) {
+		$dbtest .= 'Passed';
+		$dbsupported++;
+	} else {
+		$dbtest .= 'Failed';
+	}
+	echo $dbtest."\n";
+}
+// db-state
+if ($dbsupported == 0) {
+	$errors++;
+	array_push($errorsMessages, "PHP-Database-Support : no supported database-type found.");
+}
+
+// OS-Specific
+// get os
+$osString = php_uname('s');
+if (isset($osString)) {
+    if (!(stristr($osString, 'linux') === false)) /* linux */
+    	define('_OS', 1);
+    else if (!(stristr($osString, 'bsd') === false)) /* bsd */
+    	define('_OS', 2);
+    else
+    	define('_OS', 0);
+} else {
+	define('_OS', 0);
+}
+echo '5. OS-Specific ('.$osString.' '.php_uname('r').')'."\n";
+switch (_OS) {
+	case 1: // linux
+		echo 'No Special Requirements on Linux-OS. Passed'."\n";
+		break;
+	case 2: // bsd
+		// posix
+		$posix = 'posix ';
+		if ((function_exists('posix_geteuid')) && (function_exists('posix_getpwuid'))) {
+			$posix .= 'Passed';
+		} else {
+			$posix .= 'Failed';
+			$warnings++;
+			array_push($warningsMessages, "OS-Specific : PHP-extension posix missing. some netstat-features wont work without.");
+		}
+		echo $posix."\n";
+		break;
+	case 0: // unknown
+	default:
+		echo "OS not supported.\n";
+		$errors++;
+		array_push($errorsMessages, "OS-Specific : ".$osString." not supported.");
+		break;
+}
+
+// summary
+echo '----- Summary -----'."\n";
+
+// state
+$state = "State : ";
+if (($warnings + $errors) == 0) {
+	// good
+	$state .= 'Ok'."\n";
+	echo $state;
+	echo _NAME." should run on this system.\n";
+} else {
+	if (($errors == 0) && ($warnings > 0)) {
+		// may run with flaws
+		$state .= 'Warning'."\n";
+		echo $state;
+		echo _NAME." may run on this system, but there may be problems.\n";
+	} else {
+		// not ok
+		$state .= 'Failed'."\n";
+		echo $state;
+		echo _NAME." cannot run on this system.\n";
+	}
+}
+
+// errors
+if (count($errorsMessages) > 0) {
+	echo ('Errors :'."\n");
+	foreach ($errorsMessages as $errorsMessage) {
+		echo " - ".$errorsMessage."\n";
+	}
+}
+
+// warnings
+if (count($warningsMessages) > 0) {
+	echo ('Warnings :'."\n");
+	foreach ($warningsMessages as $warningsMessage) {
+		echo " - ".$warningsMessage."\n";
+	}
+}
+
+//  exit
+exit();
+
+?>

+ 357 - 0
html/bin/check/check.pl

@@ -0,0 +1,357 @@
+#!/usr/bin/perl
+################################################################################
+# $Id: check.pl 2823 2007-04-07 08:37:43Z warion $
+# $Date: 2007-04-07 03:37:43 -0500 (Sat, 07 Apr 2007) $
+# $Revision: 2823 $
+################################################################################
+#                                                                              #
+# LICENSE                                                                      #
+#                                                                              #
+# This program is free software; you can redistribute it and/or                #
+# modify it under the terms of the GNU General Public License (GPL)            #
+# as published by the Free Software Foundation; either version 2               #
+# of the License, or (at your option) any later version.                       #
+#                                                                              #
+# This program is distributed in the hope that it will be useful,              #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of               #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the                 #
+# GNU General Public License for more details.                                 #
+#                                                                              #
+# To read the license please visit http://www.gnu.org/copyleft/gpl.html        #
+#                                                                              #
+#                                                                              #
+################################################################################
+#                                                                              #
+# check.pl is a simple script to check Perl-Module-Requirements.               #
+#                                                                              #
+################################################################################
+use strict;
+################################################################################
+
+# Internal Vars
+my ($VERSION, $DIR, $PROG, $EXTENSION, $USAGE);
+
+#-------------------------------------------------------------------------------
+# Main
+#-------------------------------------------------------------------------------
+
+# init some vars
+$VERSION =
+	do { my @r = (q$Revision: 2823 $ =~ /\d+/g); sprintf "%d"."%02d" x $#r, @r };
+($DIR=$0) =~ s/([^\/\\]*)$//;
+($PROG=$1) =~ s/\.([^\.]*)$//;
+$EXTENSION=$1;
+
+# check args
+my $argCount = scalar(@ARGV);
+if ($argCount != 1) {
+	printUsage();
+	exit;
+}
+
+# ops
+if ($argCount == 1) {
+	SWITCH: {
+		$_ = shift @ARGV;
+		/all/ && do { # --- all ---
+			checkAll();
+			exit;
+		};
+		/fluxd/ && do { # --- fluxd ---
+			checkFluxd();
+			exit;
+		};
+		/nzbperl/ && do { # --- nzbperl ---
+			checkNzbperl();
+			exit;
+		};
+		/ttools/ && do { # --- ttools ---
+			checkTtools();
+			exit;
+		};
+		/.*(version|-v).*/ && do { # --- version ---
+			printVersion();
+			exit;
+		};
+		/.*(help|-h).*/ && do { # --- help ---
+			printUsage();
+			exit;
+		};
+		printUsage();
+		exit;
+	}
+}
+
+# exit
+exit;
+
+#===============================================================================
+# Subs
+#===============================================================================
+
+#------------------------------------------------------------------------------#
+# Sub: checkAll                                                                #
+# Arguments: Null                                                              #
+# Returns: info on system requirements                                         #
+#------------------------------------------------------------------------------#
+sub checkAll {
+	# print
+	print "checking all requirements...\n";
+	# 1. fluxd
+	checkFluxd();
+	# 2. nzbperl
+	checkNzbperl();
+	# 3. ttools
+	checkTtools();
+	# done
+	print "done checking all requirements.\n";
+}
+
+#------------------------------------------------------------------------------#
+# Sub: checkFluxd                                                              #
+# Arguments: Null                                                              #
+# Returns: info on system requirements                                         #
+#------------------------------------------------------------------------------#
+sub checkFluxd {
+	# print
+	print "checking fluxd requirements...\n";
+
+	my $errors = 0;
+	my $warnings = 0;
+	my @errorMessages = ();
+	my @warningMessages = ();
+
+	# 1. CORE-Perl-modules
+	print "1. CORE-Perl-modules\n";
+	my @mods = (
+		'IO::Select',
+		'IO::Socket::UNIX',
+		'IO::Socket::INET',
+		'POSIX'
+	);
+	foreach my $mod (@mods) {
+		if (eval "require $mod")  {
+			print "   - OK : ".$mod."\n";
+			next;
+		} else {
+			$errors++;
+			push(@errorMessages, "Loading of CORE-Perl-module ".$mod." failed.\n");
+			print "   - FAILED : ".$mod."\n";
+		}
+	}
+
+	# 2. FluxDB-Perl-modules
+	print "2. Database-Perl-modules\n";
+	if (eval "require DBI")  {
+		print "   - OK : DBI\n";
+	} else {
+		$warnings++;
+		push(@warningMessages, "Loading of FluxDB-Perl-module DBI failed. fluxd cannot work in DBI/DBD-mode but only in PHP-mode.\n");
+		print "   - FAILED : DBI\n";
+	}
+	my $dbdwarnings = 0;
+	@mods = (
+		'DBD::mysql',
+		'DBD::SQLite',
+		'DBD::Pg'
+	);
+	foreach my $mod (@mods) {
+		if (eval "require $mod")  {
+			print "   - OK : ".$mod."\n";
+			next;
+		} else {
+			$dbdwarnings++;
+			print "   - FAILED : ".$mod."\n";
+		}
+	}
+	if ($dbdwarnings == 3) {
+		$warnings++;
+		push(@warningMessages, "No DBD-Module could be loaded. fluxd cannot work in DBI/DBD-mode but only in PHP-mode.\n");
+	}
+
+	# 3. Result
+	print "3. Result : ".(($errors == 0) ? "PASSED" : "FAILED")."\n";
+	# failures
+	if ($errors > 0) {
+		print "Errors:\n";
+		foreach my $msg (@errorMessages) {
+			print $msg;
+		}
+	}
+	# warnings
+	if ($warnings > 0) {
+		print "Warnings:\n";
+		foreach my $msg (@warningMessages) {
+			print $msg;
+		}
+	}
+
+	# done
+	print "done checking fluxd requirements.\n";
+}
+
+#------------------------------------------------------------------------------#
+# Sub: checkNzbperl                                                            #
+# Arguments: Null                                                              #
+# Returns: info on system requirements                                         #
+#------------------------------------------------------------------------------#
+sub checkNzbperl {
+	# print
+	print "checking nzbperl requirements...\n";
+
+	my $errors = 0;
+	my $warnings = 0;
+	my @errorMessages = ();
+	my @warningMessages = ();
+
+	# 1. CORE-Perl-modules
+	print "1. CORE-Perl-modules\n";
+	my @mods = (
+		'IO::File',
+		'IO::Select',
+		'IO::Socket::INET',
+		'File::Basename',
+		'Getopt::Long',
+		'Time::HiRes',
+		'Cwd',
+		'XML::Simple',
+		'XML::DOM'
+	);
+	foreach my $mod (@mods) {
+		if (eval "require $mod")  {
+			print "   - OK : ".$mod."\n";
+			next;
+		} else {
+			$errors++;
+			push(@errorMessages, "Loading of CORE-Perl-module ".$mod." failed.\n");
+			print "   - FAILED : ".$mod."\n";
+		}
+	}
+
+	# 2. Perl-Threads
+	my $threadproblems = 0;
+	print "2. Perl-Threads\n";
+	eval "use threads;";
+	if ($@) {
+		$warnings++;
+		print "   - FAILED : threads\n";
+		$threadproblems++;
+	} else {
+		print "   - OK : threads\n";
+	}
+	@mods = ('Thread::Queue');
+	foreach my $mod (@mods) {
+		if (eval "require $mod")  {
+			print "   - OK : ".$mod."\n";
+			next;
+		} else {
+			$warnings++;
+			$threadproblems++;
+			print "   - FAILED : ".$mod."\n";
+		}
+	}
+	if ($threadproblems != 0) {
+		$warnings++;
+		push(@warningMessages, "Could not use Perl thread modules.\n");
+	}
+
+	# 3. Result
+	print "3. Result : ".(($errors == 0) ? "PASSED" : "FAILED")."\n";
+	# failures
+	if ($errors > 0) {
+		print "Errors:\n";
+		foreach my $msg (@errorMessages) {
+			print $msg;
+		}
+	}
+	# warnings
+	if ($warnings > 0) {
+		print "Warnings:\n";
+		foreach my $msg (@warningMessages) {
+			print $msg;
+		}
+	}
+
+	# done
+	print "done checking nzbperl requirements.\n";
+}
+
+#------------------------------------------------------------------------------#
+# Sub: checkTtools                                                             #
+# Arguments: Null                                                              #
+# Returns: info on system requirements                                         #
+#------------------------------------------------------------------------------#
+sub checkTtools {
+	# print
+	print "checking ttools requirements...\n";
+
+	my $errors = 0;
+	my $warnings = 0;
+	my @errorMessages = ();
+	my @warningMessages = ();
+
+	# 1. CORE-Perl-modules
+	print "1. CORE-Perl-modules\n";
+	my @mods = ('Digest::SHA1', 'LWP::UserAgent');
+	foreach my $mod (@mods) {
+		if (eval "require $mod")  {
+			print "   - OK : ".$mod."\n";
+			next;
+		} else {
+			$errors++;
+			push(@errorMessages, "Loading of CORE-Perl-module ".$mod." failed.\n");
+			print "   - FAILED : ".$mod."\n";
+		}
+	}
+
+	# 2. Result
+	print "2. Result : ".(($errors == 0) ? "PASSED" : "FAILED")."\n";
+	# failures
+	if ($errors > 0) {
+		print "Errors:\n";
+		foreach my $msg (@errorMessages) {
+			print $msg;
+		}
+	}
+	# warnings
+	if ($warnings > 0) {
+		print "Warnings:\n";
+		foreach my $msg (@warningMessages) {
+			print $msg;
+		}
+	}
+
+	# done
+	print "done checking ttools requirements.\n";
+}
+
+#------------------------------------------------------------------------------#
+# Sub: printVersion                                                            #
+# Arguments: Null                                                              #
+# Returns: Null                                                                #
+#------------------------------------------------------------------------------#
+sub printVersion {
+	print $PROG.".".$EXTENSION." Version ".$VERSION."\n";
+}
+
+#------------------------------------------------------------------------------#
+# Sub: printUsage                                                              #
+# Parameters: null                                                             #
+# Return: null                                                                 #
+#------------------------------------------------------------------------------#
+sub printUsage {
+	print <<"USAGE";
+$PROG.$EXTENSION (Revision $VERSION)
+
+Usage: $PROG.$EXTENSION type
+       type may be : all/fluxd/nzbperl/ttools
+
+Examples:
+$PROG.$EXTENSION fluxd
+$PROG.$EXTENSION all
+
+USAGE
+
+}
+
+# EOF

+ 340 - 0
html/bin/clients/fluazu/dopal/COPYING

@@ -0,0 +1,340 @@
+            GNU GENERAL PUBLIC LICENSE
+               Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+            GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+             END OF TERMS AND CONDITIONS
+
+        How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.

+ 9 - 0
html/bin/clients/fluazu/dopal/README

@@ -0,0 +1,9 @@
+DOPAL is a library to allow programs written in Python to easily communicate the Java BitTorrent client Azureus, via the XML/HTTP plugin (allowing communication over a network).
+
+This software is released under the GNU Public License - see COPYING for more details.
+
+Details and updates for this library can be found at http://dopal.sourceforge.net/
+
+To determine the current version of the library, type the following line into a Python command prompt:
+
+  print __import__('dopal').__version_str__

+ 64 - 0
html/bin/clients/fluazu/dopal/__init__.py

@@ -0,0 +1,64 @@
+# File: __init__.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+__version__ = (0, 6, 0)
+__version_str__ = '%s.%s' % (__version__[0], ''.join([str(part) for part in __version__[1:]]))
+__user_agent__ = 'DOPAL/' + __version_str__
+
+__all__ = [
+
+    # Module variables.
+    '__version__', '__version_str__', '__user_agent__',
+
+    # Front-end modules.
+    'interact', 'main', 'scripting',
+
+    # Core-level modules.
+    'aztypes', 'core', 'debug', 'errors', 'utils', 'xmlutils',
+
+    # Object-level modules.
+    'classes', 'class_defs', 'convert', 'objects', 'obj_impl', 'persistency',
+    'logutils',
+]
+
+# Mode definitions:
+#   0 - Normal behaviour - should always be distributed with this value.
+#   1 - Debug mode - raise debug errors when appropriate.
+#   2 - Epydoc mode - used when Epydoc API documentation is being generated.
+__dopal_mode__ = 0
+
+__doc__ = '''
+DOPAL - DO Python Azureus Library (version %(__version_str__)s)
+
+@var __version__: DOPAL version as a tuple.
+@var __version_str__: DOPAL version as a string.
+@var __user_agent__: User agent string used by DOPAL when communicating with
+   Azureus.
+@var __dopal_mode__: Debug internal variable which controls some of the
+   behaviour of how DOPAL works - not meant for external use.
+
+@group Front-end modules: interact, main, scripting
+@group Core-level modules: aztypes, core, debug, errors, utils, xmlutils
+@group Object-level modules: classes, class_defs, convert, objects, obj_impl,
+          persistency, logutils
+''' % vars()
+
+# If we are in debug mode, auto-detect whether Epydoc is running and adjust the
+# mode accordingly.
+import sys
+if __dopal_mode__ == 1 and 'epydoc' in sys.modules:
+   __dopal_mode__ = 2
+del sys

+ 352 - 0
html/bin/clients/fluazu/dopal/aztypes.py

@@ -0,0 +1,352 @@
+# File: aztypes.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Utilities used to convert basic values between DOPAL and Azureus.
+'''
+
+__pychecker__ = 'no-unreachable'
+
+import types
+from dopal.errors import ArgumentWrapError, WrapError, UnwrapError, \
+    InvalidWrapTypeError, InvalidUnwrapTypeError, InconsistentWrapTypeError, \
+    NoSuchAzMethodError, IncorrectArgumentCountError, MethodArgumentWrapError,\
+    raise_as
+
+#
+# Methods / classes related to type wrappers
+#
+class TypeWrapper:
+
+    def __init__(self, xml_type, xml_value): # TypeWrapper
+        self.xml_type = xml_type
+        self.xml_value = xml_value
+
+    def get_xml_type(self): # TypeWrapper
+        return self.xml_type
+
+    def as_xml(self): # TypeWrapper
+        return self.xml_value
+
+# Stores the type name to _wrapobject mappings.
+_wrap_value_dict = {}
+class _wrapobject(object):
+
+    # If it is a native type, then it means that you can wrap the
+    # object without having to return a TypeWrapper instance.
+    native_type = False
+
+    def __init__(self, type_name, instance_type): # _wrapobject
+        self.type_name = type_name
+        self.instance_type = instance_type
+        _wrap_value_dict[type_name] = self
+
+    def __call__(self, value): # _wrapobject
+        return self.wrap(value)
+
+    # Only used if the value is not an instance of the type specified in
+    # instance_type.
+    def normalise_for_wrapping(self, value): # _wrapobject
+        raise WrapError("invalid type of object for %s: %s" % (self.type_name, type(value)), obj=value)
+
+    # Converts the value into a form where it can be added to a wrapper
+    # (usually as a string). For certain types (native types, like String,
+    # integer, long, float, boolean), it can return a value with one of those
+    # types instead.
+    def wrap_value(self, value): # _wrapobject
+        if not isinstance(value, types.StringTypes):
+            value = str(value)
+        return value
+
+    def wrap(self, value, force_wrapper=True): # _wrapobject
+
+        if self.instance_type is None or not isinstance(value, self.instance_type):
+            try:
+                value = self.normalise_for_wrapping(value)
+            except (TypeError, ValueError), error:
+                raise_as(error, WrapError)
+
+        if self.native_type and not force_wrapper:
+            return value
+
+        if self.wrap_value is None:
+            raise InvalidWrapTypeError(obj=self.type_name)
+
+        value = self.wrap_value(value)
+        return TypeWrapper(self.type_name, self.wrap_value(value))
+
+    def unwrap(self, value): # _wrapobject
+        if self.unwrap_value is None:
+            raise InvalidUnwrapTypeError(obj=self.type_name)
+
+        try:
+            value = self.unwrap_value(value)
+        except (TypeError, ValueError), error:
+            raise UnwrapError, error
+
+        return value
+
+    def unwrap_value(self, value): # _wrapobject
+        return value
+
+    def __str__(self): # _wrapobject
+        return '<wrapper object for type "%s">' % self.type_name
+
+class _wrapnative(_wrapobject):
+    native_type = True
+
+    # I know that it is confusing that the arguments are swapped round...
+    def __init__(self, type_obj):
+        _wrapobject.__init__(self, type_obj.__name__, type_obj)
+        self.normalise_for_wrapping = type_obj
+        self.unwrap_value = type_obj
+        self.wrap_value = type_obj
+
+def _value_as_boolean(value):
+
+    # Although this library is intended to be Python 2.2 compatible (i.e.
+    # before booleans were introduced), we only support versions of Python 2.2
+    # where False and True global constants are defined.
+    if value in [False, 0, 'False', 'false']:
+        return False
+    elif value in [True, 1, 'True', 'true']:
+        return True
+    else:
+        raise ValueError, "does not represent a boolean value"
+
+def _boolean_as_string(value):
+    if value:
+        return 'true'
+    else:
+        return 'false'
+
+wrap_int   = _wrapnative(int)
+wrap_long  = _wrapnative(long)
+wrap_float = _wrapnative(float)
+
+# We create it without passing an instance type, and then attempt to assign it
+# afterwards - Python 2.2 compatibility.
+wrap_boolean = _wrapobject('boolean', None)
+import sys
+if sys.version_info >= (2, 3):
+    wrap_boolean.instance_type = bool
+    wrap_boolean.native_type = True
+wrap_boolean.normalise_for_wrapping = _value_as_boolean
+wrap_boolean.wrap_value = _boolean_as_string
+
+# This function is a bit overkill for our needs, but it will suffice for now.
+wrap_boolean.unwrap_value = _value_as_boolean
+
+# We don't set normalise_for_wrapping, we want to impose a string type here.
+wrap_url = _wrapobject('URL', types.StringTypes)
+wrap_file = _wrapobject('File', types.StringTypes)
+
+wrap_short = _wrapobject('short', int)
+wrap_short.normalise_for_wrapping = int
+wrap_short.wrap_value = int
+wrap_short.unwrap_value = int
+
+from dopal.utils import string_to_hex_form, hex_string_to_binary
+
+# None - let the below functions deal with non string values.
+wrap_byte_array = _wrapobject('byte[]', None)
+wrap_byte_array.normalise_for_wrapping = string_to_hex_form
+wrap_byte_array.unwrap_value = hex_string_to_binary
+del string_to_hex_form, hex_string_to_binary
+
+wrap_string = _wrapobject('String', types.StringTypes)
+wrap_string.normalise_for_wrapping = str
+wrap_string.native_type = True
+
+def _unwrap_void(value):
+    if value == '':
+        return None
+    else:
+        raise ValueError, "non-null return value: %s" % value
+
+wrap_void = _wrapobject('void', None)
+wrap_void.wrap_value = None # Not an argument type.
+wrap_void.unwrap_value = _unwrap_void
+
+def wrap_value(value, value_type, force_wrapper=False):
+    if hasattr(value, 'get_xml_type'):
+        stored_type = value.get_xml_type()
+        if stored_type != value_type:
+            raise InconsistentWrapTypeError(stored_type, value_type)
+        return value
+
+    try:
+        converter = _wrap_value_dict[value_type]
+    except KeyError:
+        raise InvalidWrapTypeError(obj=value_type)
+    else:
+        return converter.wrap(value, force_wrapper=force_wrapper)
+
+def unwrap_value(value, value_type):
+    try:
+        converter = _wrap_value_dict[value_type]
+    except KeyError:
+        raise InvalidUnwrapTypeError(obj=value_type)
+    else:
+        return converter.unwrap(value)
+
+def is_java_argument_type(java_type):
+    return getattr(_wrap_value_dict.get(java_type), 'wrap_value', None) is not None
+
+def is_java_return_type(java_type):
+    return getattr(_wrap_value_dict.get(java_type), 'unwrap_value', None) is not None
+
+def get_component_type(java_type):
+    if isinstance(java_type, str):
+        if java_type[-2:] == '[]':
+            return java_type[:-2]
+    return None
+
+def get_basic_component_type(java_type):
+    while True:
+        component_type = get_component_type(java_type)
+        if component_type is None:
+            return java_type
+        java_type = component_type
+
+    # This can't happen, but it just stops PyChecker getting worried about it.
+    return None
+
+def is_array_type(java_type):
+    return get_component_type(java_type) is not None
+
+class AzMethod(object):
+
+    def __init__(self, name, arguments=(), return_type='void'): # AzMethod
+        object.__init__(super)
+        self.name = name
+        self.arg_types = arguments
+        self.arg_count = len(arguments)
+        self.return_type = return_type
+
+    def has_return_type(self): # AzMethod
+        return self.return_type == 'void'
+
+    def wrap_args(self, *args): # AzMethod
+        if len(args) != self.arg_count:
+            raise IncorrectArgumentCountError, (self.name, len(args), self.arg_count)
+
+        result = []
+        for i in range(self.arg_count):
+            try:
+                result.append(wrap_value(args[i], self.arg_types[i]))
+            except WrapError, error:
+                raise ArgumentWrapError(i, args[i], self.arg_types[i], error)
+
+        return result
+
+    def __eq__(self, other): # AzMethod
+        if not isinstance(other, AzMethod):
+            return False
+        for attr in ['name', 'return_type', 'arg_types']:
+            if getattr(self, attr) != getattr(other, attr):
+                return False
+        return True
+
+    def __ne__(self, other): # AzMethod
+        return not (self == other)
+
+    def __str__(self): # AzMethod
+        arg_string = ', '.join(self.arg_types)
+        return "%s %s(%s)" % (self.return_type, self.name, arg_string)
+
+    def __repr__(self): # AzMethod
+        return "AzMethod [%s]" % self
+
+class AzureusMethods(object):
+
+    def __init__(self, methods=None):
+        object.__init__(self)
+        self.__data = {}
+        if methods:
+            map(self.add_method, methods)
+
+    def add_method(self, az_method):
+        methods_dict = self.__data.setdefault(az_method.name, {})
+        method_args_seq = methods_dict.setdefault(az_method.arg_count, [])
+
+        if az_method not in method_args_seq:
+            method_args_seq.append(az_method)
+
+    def get_method_names(self):
+        names = self.__data.keys()
+        names.sort()
+        return names
+
+    def get_method_arg_count(self, name):
+        try:
+            result = self.__data[name].keys()
+        except KeyError:
+            return []
+        else:
+            result.sort()
+            return result
+
+    def match_method_spec(self, name, argcount):
+        try:
+            return self.__data[name][argcount]
+        except KeyError:
+            try:
+                argcounts_dict = self.__data[name]
+            except KeyError:
+                raise NoSuchAzMethodError, name
+            argcounts = argcounts_dict.keys()
+            argcounts.sort()
+            raise IncorrectArgumentCountError, (name, argcount, argcounts_dict.keys())
+
+    def get_matching_methods(self, name, args):
+        methods = self.match_method_spec(name, len(args))
+        accepted_methods = {}
+        rejected_methods = {}
+        for methodobj in methods:
+            try:
+                accepted_methods[methodobj] = methodobj.wrap_args(*args)
+            except ArgumentWrapError, error:
+                rejected_methods[methodobj] = error
+
+        if not accepted_methods:
+            raise MethodArgumentWrapError, (name, rejected_methods.items())
+
+        return accepted_methods.items()
+
+    def wrap_args(self, name, args):
+        method_data_tpls = self.get_matching_methods(name, args)
+        if len(method_data_tpls) > 1:
+            method_data = self.resolve_ambiguous_method(args, method_data_tpls)
+        else:
+            method_data = method_data_tpls[0]
+
+        return method_data[1], method_data[0].return_type
+
+    def resolve_ambiguous_method(self, args, method_data_tpls):
+        # XXX: We'll implement something cleverer at a later date.
+        return method_data_tpls[0]
+
+    def get_all_methods(self):
+        result = []
+        for method_name, method_dict in self.__data.items():
+            for argcount, method_seq in method_dict.items():
+                result.extend(method_seq)
+        return result
+
+    def update(self, azmethodsobj):
+        for azmethod in azmethodsobj.get_all_methods():
+            self.add_method(azmethod)

+ 4817 - 0
html/bin/clients/fluazu/dopal/class_defs.py

@@ -0,0 +1,4817 @@
+# File: class_defs.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Defines all constants, variables and methods available on classes in Azureus's
+plugin API.
+
+Not all classes are defined here are mentioned in this API documentation
+(simply because too much documentation will be generated).
+
+For each class that DOPAL has explicit support for, there will be a class
+defined here which contains the class information. The list of classes
+supported in this version is described in L{classes}.
+
+For each class that we support, there will be a class in this module with the
+same name with the suffix of "DataType". It will have a C{get_xml_type} method
+defined on the class. It may have some of the following attributes on the
+class:
+  - A C{__az_methods__} attribute (which will be a L{AzureusMethods}
+    instance) - defining what methods we support. These correlate to the
+    methods defined on each class.
+  - A C{__az_constants__} attribute (which will be a dictionary) - defining
+    what class constants are defined. These correlate to the constants on
+    the class.
+  - A C{__az_attributes__} attribute (which will be a dictionary) - defining
+    what attributes will be on the object. These attributes are unique to the
+    XML/HTTP plugin. The plugin defines certain precalculated 'attributes' when
+    returning the data for a given object, by invoking certain methods on the
+    object. This saves the client making multiple calls on an object to
+    retrieve certain pieces of information, by calculating it and returning it
+    all in one go.
+
+If you are interested to know what methods are available on each classes, then
+please look at Azureus's own
+U{Javadoc API documentation<http://azureus.sourceforge.net/plugins/docCVS/>}.
+
+The meta-attributes for these classes are defined by the XML/HTTP plugin - you
+can find their definitions in the
+U{GenericRPAttributes.java<http://cvs.sourceforge.net/viewcvs.py/azureus/azureus2/org/gudy/azureus2/pluginsimpl/remote/GenericRPAttributes.java?view=markup>}
+source file.
+'''
+
+#
+# The contents of this module are automatically generated by
+# class_defs_make.py.
+#
+
+import dopal.classes
+from dopal.aztypes import AzMethod as _method
+from dopal.aztypes import AzureusMethods
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins
+#
+#------------------------------------------------------------------------------
+
+class LaunchablePluginDataType:
+    
+    def get_xml_type():
+        return "LaunchablePlugin"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('initialize', ['PluginInterface']),
+        _method('process', [], 'boolean'),
+        _method('setDefaults', ['String[]']),
+    ])
+
+class PluginDataType:
+    
+    def get_xml_type():
+        return "Plugin"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('initialize', ['PluginInterface']),
+    ])
+
+class PluginConfigDataType:
+    
+    def get_xml_type():
+        return "PluginConfig"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'CORE_PARAM_BOOLEAN_FRIENDLY_HASH_CHECKING': "CORE_PARAM_BOOLEAN_FRIENDLY_HASH_CHECKING",
+        'CORE_PARAM_BOOLEAN_MAX_ACTIVE_SEEDING': "Max Active Torrents When Only Seeding Enabled",
+        'CORE_PARAM_BOOLEAN_MAX_UPLOAD_SPEED_SEEDING': "Max Upload Speed When Only Seeding Enabled",
+        'CORE_PARAM_BOOLEAN_NEW_SEEDS_START_AT_TOP': "Newly Seeding Torrents Get First Priority",
+        'CORE_PARAM_BOOLEAN_SOCKS_PROXY_NO_INWARD_CONNECTION': "SOCKS Proxy No Inward Connection",
+        'CORE_PARAM_INT_MAX_ACTIVE': "Max Active Torrents",
+        'CORE_PARAM_INT_MAX_ACTIVE_SEEDING': "Max Active Torrents When Only Seeding",
+        'CORE_PARAM_INT_MAX_CONNECTIONS_GLOBAL': "Max Connections Global",
+        'CORE_PARAM_INT_MAX_CONNECTIONS_PER_TORRENT': "Max Connections Per Torrent",
+        'CORE_PARAM_INT_MAX_DOWNLOAD_SPEED_KBYTES_PER_SEC': "Max Download Speed KBs",
+        'CORE_PARAM_INT_MAX_DOWNLOADS': "Max Downloads",
+        'CORE_PARAM_INT_MAX_UPLOAD_SPEED_KBYTES_PER_SEC': "Max Upload Speed KBs",
+        'CORE_PARAM_INT_MAX_UPLOAD_SPEED_SEEDING_KBYTES_PER_SEC': "Max Upload Speed When Only Seeding KBs",
+        'CORE_PARAM_INT_MAX_UPLOADS': "Max Uploads",
+        'CORE_PARAM_INT_MAX_UPLOADS_SEEDING': "Max Uploads Seeding",
+        'CORE_PARAM_STRING_LOCAL_BIND_IP': "CORE_PARAM_STRING_LOCAL_BIND_IP",
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   List getPluginListParameter(String, List)
+    #   Map getPluginMapParameter(String, Map)
+    #   void setPluginListParameter(String, List)
+    #   void setPluginMapParameter(String, Map)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['PluginConfigListener']),
+        _method('getBooleanParameter', ['String', 'boolean'], 'boolean'),
+        _method('getBooleanParameter', ['String'], 'boolean'),
+        _method('getByteParameter', ['String', 'byte[]'], 'byte[]'),
+        _method('getFloatParameter', ['String'], 'float'),
+        _method('getIntParameter', ['String'], 'int'),
+        _method('getIntParameter', ['String', 'int'], 'int'),
+        _method('getParameter', ['String'], 'ConfigParameter'),
+        _method('getPluginBooleanParameter', ['String', 'boolean'], 'boolean'),
+        _method('getPluginBooleanParameter', ['String'], 'boolean'),
+        _method('getPluginByteParameter', ['String', 'byte[]'], 'byte[]'),
+        _method('getPluginConfigKeyPrefix', [], 'String'),
+        _method('getPluginIntParameter', ['String'], 'int'),
+        _method('getPluginIntParameter', ['String', 'int'], 'int'),
+        _method('getPluginParameter', ['String'], 'ConfigParameter'),
+        _method('getPluginStringParameter', ['String'], 'String'),
+        _method('getPluginStringParameter', ['String', 'String'], 'String'),
+        _method('getPluginUserFile', ['String'], 'File'),
+        _method('getStringParameter', ['String'], 'String'),
+        _method('getStringParameter', ['String', 'String'], 'String'),
+        _method('save'),
+        _method('setBooleanParameter', ['String', 'boolean']),
+        _method('setIntParameter', ['String', 'int']),
+        _method('setPluginParameter', ['String', 'byte[]']),
+        _method('setPluginParameter', ['String', 'int']),
+        _method('setPluginParameter', ['String', 'String']),
+        _method('setPluginParameter', ['String', 'boolean']),
+    ])
+    
+    # 
+    # The following constants are not defined here, because their types
+    # are not available in DOPAL:
+    # 
+    #   "cached_property_values" (has type "java.lang.Object[])"
+    #   
+    __az_attributes__ = {
+        'cached_property_names': 'String[]',
+    }
+
+class PluginConfigListenerDataType:
+    
+    def get_xml_type():
+        return "PluginConfigListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('configSaved'),
+    ])
+
+class PluginEventDataType:
+    
+    def get_xml_type():
+        return "PluginEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PEV_CONFIGURATION_WIZARD_COMPLETES': 2,
+        'PEV_CONFIGURATION_WIZARD_STARTS': 1,
+        'PEV_FIRST_USER_EVENT': 1024,
+        'PEV_INITIALISATION_PROGRESS_PERCENT': 4,
+        'PEV_INITIALISATION_PROGRESS_TASK': 3,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getValue()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getType', [], 'int'),
+    ])
+
+class PluginEventListenerDataType:
+    
+    def get_xml_type():
+        return "PluginEventListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('handleEvent', ['PluginEvent']),
+    ])
+
+class PluginInterfaceDataType:
+    
+    def get_xml_type():
+        return "PluginInterface"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void addView(PluginView)
+    #   PluginInterface getLocalPluginInterface(Class, String)
+    #   ClassLoader getPluginClassLoader()
+    #   Properties getPluginProperties()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addColumnToMyTorrentsTable', ['String', 'PluginMyTorrentsItemFactory']),
+        _method('addColumnToPeersTable', ['String', 'PluginPeerItemFactory']),
+        _method('addConfigSection', ['ConfigSection']),
+        _method('addConfigUIParameters', ['Parameter[]', 'String']),
+        _method('addEventListener', ['PluginEventListener']),
+        _method('addListener', ['PluginListener']),
+        _method('firePluginEvent', ['PluginEvent']),
+        _method('getAzureusName', [], 'String'),
+        _method('getAzureusVersion', [], 'String'),
+        _method('getClientIDManager', [], 'ClientIDManager'),
+        _method('getConnectionManager', [], 'ConnectionManager'),
+        _method('getDistributedDatabase', [], 'DistributedDatabase'),
+        _method('getDownloadManager', [], 'DownloadManager'),
+        _method('getIPC', [], 'IPCInterface'),
+        _method('getIPFilter', [], 'IPFilter'),
+        _method('getLogger', [], 'Logger'),
+        _method('getMessageManager', [], 'MessageManager'),
+        _method('getPeerProtocolManager', [], 'PeerProtocolManager'),
+        _method('getPlatformManager', [], 'PlatformManager'),
+        _method('getPlugin', [], 'Plugin'),
+        _method('getPluginconfig', [], 'PluginConfig'),
+        _method('getPluginConfigUIFactory', [], 'PluginConfigUIFactory'),
+        _method('getPluginDirectoryName', [], 'String'),
+        _method('getPluginID', [], 'String'),
+        _method('getPluginManager', [], 'PluginManager'),
+        _method('getPluginName', [], 'String'),
+        _method('getPluginVersion', [], 'String'),
+        _method('getShareManager', [], 'ShareManager'),
+        _method('getShortCuts', [], 'ShortCuts'),
+        _method('getTorrentManager', [], 'TorrentManager'),
+        _method('getTracker', [], 'Tracker'),
+        _method('getUIManager', [], 'UIManager'),
+        _method('getUpdateManager', [], 'UpdateManager'),
+        _method('getUtilities', [], 'Utilities'),
+        _method('isBuiltIn', [], 'boolean'),
+        _method('isDisabled', [], 'boolean'),
+        _method('isMandatory', [], 'boolean'),
+        _method('isOperational', [], 'boolean'),
+        _method('isUnloadable', [], 'boolean'),
+        _method('openTorrentFile', ['String']),
+        _method('openTorrentURL', ['String']),
+        _method('reload'),
+        _method('removeConfigSection', ['ConfigSection']),
+        _method('removeEventListener', ['PluginEventListener']),
+        _method('removeListener', ['PluginListener']),
+        _method('setDisabled', ['boolean']),
+        _method('uninstall'),
+        _method('unload'),
+    ])
+    
+    __az_attributes__ = {
+        'azureus_name': 'String',
+        'azureus_version': 'String',
+        'plugin_id': 'String',
+        'plugin_name': 'String',
+    }
+
+class PluginListenerDataType:
+    
+    def get_xml_type():
+        return "PluginListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('closedownComplete'),
+        _method('closedownInitiated'),
+        _method('initializationComplete'),
+    ])
+
+class PluginManagerDataType:
+    
+    def get_xml_type():
+        return "PluginManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PR_MULTI_INSTANCE': "MULTI_INSTANCE",
+        'UI_NONE': 0,
+        'UI_SWT': 1,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   boolean equals(Object)
+    #   Class getClass()
+    #   PluginInterface getPluginInterfaceByClass(Class)
+    #   void registerPlugin(Class)
+    #   PluginManager startAzureus(int, Properties)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('firePluginEvent', ['int']),
+        _method('getDefaultPluginInterface', [], 'PluginInterface'),
+        _method('getDefaults', [], 'PluginManagerDefaults'),
+        _method('getPluginInstaller', [], 'PluginInstaller'),
+        _method('getPluginInterfaceByClass', ['String'], 'PluginInterface'),
+        _method('getPluginInterfaceByID', ['String'], 'PluginInterface'),
+        _method('getPluginInterfaces', [], 'PluginInterface[]'),
+        _method('getPlugins', [], 'PluginInterface[]'),
+        _method('hashCode', [], 'int'),
+        _method('notify'),
+        _method('notifyAll'),
+        _method('refreshPluginList'),
+        _method('registerPlugin', ['Plugin', 'String']),
+        _method('restartAzureus'),
+        _method('stopAzureus'),
+        _method('toString', [], 'String'),
+        _method('wait', ['long', 'int']),
+        _method('wait'),
+        _method('wait', ['long']),
+    ])
+
+class PluginManagerArgumentHandlerDataType:
+    
+    def get_xml_type():
+        return "PluginManagerArgumentHandler"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('processArguments', ['String[]']),
+    ])
+
+class PluginManagerDefaultsDataType:
+    
+    def get_xml_type():
+        return "PluginManagerDefaults"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PID_CLIENT_ID': "Client ID",
+        'PID_CORE_PATCH_CHECKER': "Core Patch Checker",
+        'PID_CORE_UPDATE_CHECKER': "Core Update Checker",
+        'PID_DEFAULT_TRACKER_WEB': "Default Tracker Web",
+        'PID_DHT': "DHT",
+        'PID_DHT_TRACKER': "DHT Tracker",
+        'PID_EXTERNAL_SEED': "External Seed",
+        'PID_JPC': "JPC",
+        'PID_LOCAL_TRACKER': "Local Tracker",
+        'PID_MAGNET': "Magnet URI Handler",
+        'PID_PLATFORM_CHECKER': "Platform Checker",
+        'PID_PLUGIN_UPDATE_CHECKER': "Plugin Update Checker",
+        'PID_REMOVE_RULES': "Torrent Removal Rules",
+        'PID_SHARE_HOSTER': "Share Hoster",
+        'PID_START_STOP_RULES': "Start/Stop Rules",
+        'PID_UPNP': "UPnP",
+        'PLUGIN_IDS': [
+            "Start/Stop Rules",
+            "Torrent Removal Rules",
+            "Share Hoster",
+            "Default Tracker Web",
+            "Core Update Checker",
+            "Core Patch Checker",
+            "Platform Checker",
+            "UPnP",
+            "Client ID",
+            "DHT",
+            "DHT Tracker",
+            "Magnet URI Handler",
+            "JPC",
+            "External Seed",
+            "Local Tracker",
+        ],
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('getApplicationEntryPoint', [], 'String'),
+        _method('getApplicationIdentifier', [], 'String'),
+        _method('getApplicationName', [], 'String'),
+        _method('getDefaultPlugins', [], 'String[]'),
+        _method('isDefaultPluginEnabled', ['String'], 'boolean'),
+        _method('setApplicationEntryPoint', ['String']),
+        _method('setApplicationIdentifier', ['String']),
+        _method('setApplicationName', ['String']),
+        _method('setDefaultPluginEnabled', ['String', 'boolean']),
+        _method('setSingleInstanceHandler', ['int', 'PluginManagerArgumentHandler']),
+    ])
+
+class UnloadablePluginDataType:
+    
+    def get_xml_type():
+        return "UnloadablePlugin"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('initialize', ['PluginInterface']),
+        _method('unload'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.clientid
+#
+#------------------------------------------------------------------------------
+
+class ClientIDGeneratorDataType:
+    
+    def get_xml_type():
+        return "ClientIDGenerator"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PR_URL': "URL",
+        'PR_USER_AGENT': "User-Agent",
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void generateHTTPProperties(Properties)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('filterHTTP', ['String[]'], 'String[]'),
+        _method('generatePeerID', ['Torrent', 'boolean'], 'byte[]'),
+    ])
+
+class ClientIDManagerDataType:
+    
+    def get_xml_type():
+        return "ClientIDManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getGenerator', [], 'ClientIDGenerator'),
+        _method('setGenerator', ['ClientIDGenerator', 'boolean']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.config
+#
+#------------------------------------------------------------------------------
+
+class ConfigParameterDataType:
+    
+    def get_xml_type():
+        return "ConfigParameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+    ])
+
+class ConfigParameterListenerDataType:
+    
+    def get_xml_type():
+        return "ConfigParameterListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('configParameterChanged', ['ConfigParameter']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ddb
+#
+#------------------------------------------------------------------------------
+
+class DistributedDatabaseDataType:
+    
+    def get_xml_type():
+        return "DistributedDatabase"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'OP_EXHAUSTIVE_READ': 1,
+        'OP_NONE': 0,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   DistributedDatabaseKey createKey(Object)
+    #   DistributedDatabaseKey createKey(Object, String)
+    #   DistributedDatabaseValue createValue(Object)
+    #   DistributedDatabaseContact importContact(InetSocketAddress)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addTransferHandler', ['DistributedDatabaseTransferType', 'DistributedDatabaseTransferHandler']),
+        _method('delete', ['DistributedDatabaseListener', 'DistributedDatabaseKey']),
+        _method('getStandardTransferType', ['int'], 'DistributedDatabaseTransferType'),
+        _method('isAvailable', [], 'boolean'),
+        _method('isExtendedUseAllowed', [], 'boolean'),
+        _method('read', ['DistributedDatabaseListener', 'DistributedDatabaseKey', 'long']),
+        _method('read', ['DistributedDatabaseListener', 'DistributedDatabaseKey', 'long', 'int']),
+        _method('write', ['DistributedDatabaseListener', 'DistributedDatabaseKey', 'DistributedDatabaseValue[]']),
+        _method('write', ['DistributedDatabaseListener', 'DistributedDatabaseKey', 'DistributedDatabaseValue']),
+    ])
+
+class DistributedDatabaseContactDataType:
+    
+    def get_xml_type():
+        return "DistributedDatabaseContact"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   InetSocketAddress getAddress()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getName', [], 'String'),
+        _method('isAlive', ['long'], 'boolean'),
+        _method('isOrHasBeenLocal', [], 'boolean'),
+        _method('openTunnel', [], 'boolean'),
+        _method('read', ['DistributedDatabaseProgressListener', 'DistributedDatabaseTransferType', 'DistributedDatabaseKey', 'long'], 'DistributedDatabaseValue'),
+        _method('write', ['DistributedDatabaseTransferType', 'DistributedDatabaseKey', 'DistributedDatabaseValue']),
+    ])
+
+class DistributedDatabaseEventDataType:
+    
+    def get_xml_type():
+        return "DistributedDatabaseEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ET_OPERATION_COMPLETE': 4,
+        'ET_OPERATION_TIMEOUT': 5,
+        'ET_VALUE_DELETED': 3,
+        'ET_VALUE_READ': 2,
+        'ET_VALUE_WRITTEN': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('getContact', [], 'DistributedDatabaseContact'),
+        _method('getKey', [], 'DistributedDatabaseKey'),
+        _method('getType', [], 'int'),
+        _method('getValue', [], 'DistributedDatabaseValue'),
+    ])
+
+class DistributedDatabaseKeyDataType:
+    
+    def get_xml_type():
+        return "DistributedDatabaseKey"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getKey()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getDescription', [], 'String'),
+    ])
+
+class DistributedDatabaseListenerDataType:
+    
+    def get_xml_type():
+        return "DistributedDatabaseListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('event', ['DistributedDatabaseEvent']),
+    ])
+
+class DistributedDatabaseProgressListenerDataType:
+    
+    def get_xml_type():
+        return "DistributedDatabaseProgressListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('reportActivity', ['String']),
+        _method('reportCompleteness', ['int']),
+        _method('reportSize', ['long']),
+    ])
+
+class DistributedDatabaseTransferHandlerDataType:
+    
+    def get_xml_type():
+        return "DistributedDatabaseTransferHandler"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('read', ['DistributedDatabaseContact', 'DistributedDatabaseTransferType', 'DistributedDatabaseKey'], 'DistributedDatabaseValue'),
+        _method('write', ['DistributedDatabaseContact', 'DistributedDatabaseTransferType', 'DistributedDatabaseKey', 'DistributedDatabaseValue']),
+    ])
+
+class DistributedDatabaseTransferTypeDataType:
+    
+    def get_xml_type():
+        return "DistributedDatabaseTransferType"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ST_TORRENT': 1,
+    }
+
+class DistributedDatabaseValueDataType:
+    
+    def get_xml_type():
+        return "DistributedDatabaseValue"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getValue(Class)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getContact', [], 'DistributedDatabaseContact'),
+        _method('getCreationTime', [], 'long'),
+        _method('getVersion', [], 'long'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.disk
+#
+#------------------------------------------------------------------------------
+
+class DiskManagerDataType:
+    
+    def get_xml_type():
+        return "DiskManager"
+    get_xml_type = staticmethod(get_xml_type)
+
+class DiskManagerChannelDataType:
+    
+    def get_xml_type():
+        return "DiskManagerChannel"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('createRequest', [], 'DiskManagerRequest'),
+        _method('destroy'),
+    ])
+
+class DiskManagerEventDataType:
+    
+    def get_xml_type():
+        return "DiskManagerEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'EVENT_TYPE_BLOCKED': 3,
+        'EVENT_TYPE_FAILED': 2,
+        'EVENT_TYPE_SUCCESS': 1,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Throwable getFailure()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getBuffer', [], 'PooledByteBuffer'),
+        _method('getLength', [], 'int'),
+        _method('getOffset', [], 'long'),
+        _method('getRequest', [], 'DiskManagerRequest'),
+        _method('getType', [], 'int'),
+    ])
+
+class DiskManagerFileInfoDataType:
+    
+    def get_xml_type():
+        return "DiskManagerFileInfo"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'READ': 1,
+        'WRITE': 2,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('createChannel', [], 'DiskManagerChannel'),
+        _method('getAccessMode', [], 'int'),
+        _method('getDownload', [], 'Download'),
+        _method('getDownloaded', [], 'long'),
+        _method('getFile', [], 'File'),
+        _method('getFirstPieceNumber', [], 'int'),
+        _method('getIndex', [], 'int'),
+        _method('getLength', [], 'long'),
+        _method('getLink', [], 'File'),
+        _method('getNumPieces', [], 'int'),
+        _method('isDeleted', [], 'boolean'),
+        _method('isPriority', [], 'boolean'),
+        _method('isSkipped', [], 'boolean'),
+        _method('setDeleted', ['boolean']),
+        _method('setLink', ['File']),
+        _method('setPriority', ['boolean']),
+        _method('setSkipped', ['boolean']),
+    ])
+    
+    __az_attributes__ = {
+        'access_mode': 'int',
+        'downloaded': 'long',
+        'file': 'File',
+        'first_piece_number': 'int',
+        'is_priority': 'boolean',
+        'is_skipped': 'boolean',
+        'num_pieces': 'int',
+    }
+
+class DiskManagerListenerDataType:
+    
+    def get_xml_type():
+        return "DiskManagerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('eventOccurred', ['DiskManagerEvent']),
+    ])
+
+class DiskManagerRequestDataType:
+    
+    def get_xml_type():
+        return "DiskManagerRequest"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'REQUEST_READ': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['DiskManagerListener']),
+        _method('cancel'),
+        _method('removeListener', ['DiskManagerListener']),
+        _method('run'),
+        _method('setLength', ['long']),
+        _method('setOffset', ['long']),
+        _method('setType', ['int']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.download
+#
+#------------------------------------------------------------------------------
+
+class DownloadDataType:
+    
+    def get_xml_type():
+        return "Download"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'FLAG_ONLY_EVER_SEEDED': 1,
+        'FLAG_SCAN_INCOMPLETE_PIECES': 2,
+        'PR_HIGH_PRIORITY': 1,
+        'PR_LOW_PRIORITY': 2,
+        'ST_DOWNLOADING': 4,
+        'ST_ERROR': 8,
+        'ST_NAMES': [
+            "",
+            "Waiting",
+            "Preparing",
+            "Ready",
+            "Downloading",
+            "Seeding",
+            "Stopping",
+            "Stopped",
+            "Error",
+            "Queued",
+        ],
+        'ST_PREPARING': 2,
+        'ST_QUEUED': 9,
+        'ST_READY': 3,
+        'ST_SEEDING': 5,
+        'ST_STOPPED': 7,
+        'ST_STOPPING': 6,
+        'ST_WAITING': 1,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Map getMapAttribute(TorrentAttribute)
+    #   void setMapAttribute(TorrentAttribute, Map)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addDownloadWillBeRemovedListener', ['DownloadWillBeRemovedListener']),
+        _method('addListener', ['DownloadListener']),
+        _method('addPeerListener', ['DownloadPeerListener']),
+        _method('addPropertyListener', ['DownloadPropertyListener']),
+        _method('addTrackerListener', ['DownloadTrackerListener']),
+        _method('canBeRemoved', [], 'boolean'),
+        _method('getAttribute', ['TorrentAttribute'], 'String'),
+        _method('getCategoryName', [], 'String'),
+        _method('getCreationTime', [], 'long'),
+        _method('getDiskManager', [], 'DiskManager'),
+        _method('getDiskManagerFileInfo', [], 'DiskManagerFileInfo[]'),
+        _method('getDownloadPeerId', [], 'byte[]'),
+        _method('getErrorStateDetails', [], 'String'),
+        _method('getFlag', ['long'], 'boolean'),
+        _method('getIndex', [], 'int'),
+        _method('getLastAnnounceResult', [], 'DownloadAnnounceResult'),
+        _method('getLastScrapeResult', [], 'DownloadScrapeResult'),
+        _method('getListAttribute', ['TorrentAttribute'], 'String[]'),
+        _method('getMaximumDownloadKBPerSecond', [], 'int'),
+        _method('getName', [], 'String'),
+        _method('getPeerManager', [], 'PeerManager'),
+        _method('getPosition', [], 'int'),
+        _method('getPriority', [], 'int'),
+        _method('getSavePath', [], 'String'),
+        _method('getSeedingRank', [], 'int'),
+        _method('getState', [], 'int'),
+        _method('getStats', [], 'DownloadStats'),
+        _method('getSubState', [], 'int'),
+        _method('getTorrent', [], 'Torrent'),
+        _method('getTorrentFileName', [], 'String'),
+        _method('getUploadRateLimitBytesPerSecond', [], 'int'),
+        _method('initialize'),
+        _method('isChecking', [], 'boolean'),
+        _method('isComplete', [], 'boolean'),
+        _method('isCompleteExcludingDND', [], 'boolean'),
+        _method('isForceStart', [], 'boolean'),
+        _method('isMessagingEnabled', [], 'boolean'),
+        _method('isPaused', [], 'boolean'),
+        _method('isPersistent', [], 'boolean'),
+        _method('isPriorityLocked', [], 'boolean'),
+        _method('isStartStopLocked', [], 'boolean'),
+        _method('moveDataFiles', ['File']),
+        _method('moveDown'),
+        _method('moveTo', ['int']),
+        _method('moveTorrentFile', ['File']),
+        _method('moveUp'),
+        _method('recheckData'),
+        _method('remove'),
+        _method('remove', ['boolean', 'boolean']),
+        _method('removeDownloadWillBeRemovedListener', ['DownloadWillBeRemovedListener']),
+        _method('removeListener', ['DownloadListener']),
+        _method('removePeerListener', ['DownloadPeerListener']),
+        _method('removePropertyListener', ['DownloadPropertyListener']),
+        _method('removeTrackerListener', ['DownloadTrackerListener']),
+        _method('requestTrackerAnnounce'),
+        _method('requestTrackerAnnounce', ['boolean']),
+        _method('requestTrackerScrape', ['boolean']),
+        _method('restart'),
+        _method('setAnnounceResult', ['DownloadAnnounceResult']),
+        _method('setAttribute', ['TorrentAttribute', 'String']),
+        _method('setCategory', ['String']),
+        _method('setForceStart', ['boolean']),
+        _method('setMaximumDownloadKBPerSecond', ['int']),
+        _method('setMessagingEnabled', ['boolean']),
+        _method('setPosition', ['int']),
+        _method('setPriority', ['int']),
+        _method('setScrapeResult', ['DownloadScrapeResult']),
+        _method('setSeedingRank', ['int']),
+        _method('setSessionAuthenticator', ['SessionAuthenticator']),
+        _method('setUploadRateLimitBytesPerSecond', ['int']),
+        _method('start'),
+        _method('stop'),
+        _method('stopAndQueue'),
+    ])
+    
+    __az_attributes__ = {
+        'announce_result': 'DownloadAnnounceResult',
+        'force_start': 'boolean',
+        'position': 'int',
+        'scrape_result': 'DownloadScrapeResult',
+        'stats': 'DownloadStats',
+        'torrent': 'Torrent',
+    }
+
+class DownloadAnnounceResultDataType:
+    
+    def get_xml_type():
+        return "DownloadAnnounceResult"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'RT_ERROR': 2,
+        'RT_SUCCESS': 1,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Map getExtensions()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getDownload', [], 'Download'),
+        _method('getError', [], 'String'),
+        _method('getNonSeedCount', [], 'int'),
+        _method('getPeers', [], 'DownloadAnnounceResultPeer[]'),
+        _method('getReportedPeerCount', [], 'int'),
+        _method('getResponseType', [], 'int'),
+        _method('getSeedCount', [], 'int'),
+        _method('getTimeToWait', [], 'long'),
+        _method('getURL', [], 'URL'),
+    ])
+    
+    __az_attributes__ = {
+        'non_seed_count': 'int',
+        'seed_count': 'int',
+    }
+
+class DownloadAnnounceResultPeerDataType:
+    
+    def get_xml_type():
+        return "DownloadAnnounceResultPeer"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PROTOCOL_CRYPT': 2,
+        'PROTOCOL_NORMAL': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('getAddress', [], 'String'),
+        _method('getPeerID', [], 'byte[]'),
+        _method('getPort', [], 'int'),
+        _method('getProtocol', [], 'short'),
+        _method('getSource', [], 'String'),
+    ])
+
+class DownloadListenerDataType:
+    
+    def get_xml_type():
+        return "DownloadListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('positionChanged', ['Download', 'int', 'int']),
+        _method('stateChanged', ['Download', 'int', 'int']),
+    ])
+
+class DownloadManagerDataType:
+    
+    def get_xml_type():
+        return "DownloadManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addDownload', ['Torrent', 'File', 'File'], 'Download'),
+        _method('addDownload', ['URL', 'URL']),
+        _method('addDownload', ['Torrent'], 'Download'),
+        _method('addDownload', ['URL']),
+        _method('addDownload', ['File']),
+        _method('addDownloadWillBeAddedListener', ['DownloadWillBeAddedListener']),
+        _method('addListener', ['DownloadManagerListener']),
+        _method('addNonPersistentDownload', ['Torrent', 'File', 'File'], 'Download'),
+        _method('getDownload', ['byte[]'], 'Download'),
+        _method('getDownload', ['Torrent'], 'Download'),
+        _method('getDownloads', [], 'Download[]'),
+        _method('getDownloads', ['boolean'], 'Download[]'),
+        _method('getStats', [], 'DownloadManagerStats'),
+        _method('isSeedingOnly', [], 'boolean'),
+        _method('pauseDownloads'),
+        _method('removeDownloadWillBeAddedListener', ['DownloadWillBeAddedListener']),
+        _method('removeListener', ['DownloadManagerListener']),
+        _method('resumeDownloads'),
+        _method('startAllDownloads'),
+        _method('stopAllDownloads'),
+    ])
+
+class DownloadManagerListenerDataType:
+    
+    def get_xml_type():
+        return "DownloadManagerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('downloadAdded', ['Download']),
+        _method('downloadRemoved', ['Download']),
+    ])
+
+class DownloadManagerStatsDataType:
+    
+    def get_xml_type():
+        return "DownloadManagerStats"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getDataBytesReceived', [], 'long'),
+        _method('getDataBytesSent', [], 'long'),
+        _method('getDataReceiveRate', [], 'int'),
+        _method('getDataSendRate', [], 'int'),
+        _method('getOverallDataBytesReceived', [], 'long'),
+        _method('getOverallDataBytesSent', [], 'long'),
+        _method('getProtocolBytesReceived', [], 'long'),
+        _method('getProtocolBytesSent', [], 'long'),
+        _method('getProtocolReceiveRate', [], 'int'),
+        _method('getProtocolSendRate', [], 'int'),
+        _method('getSessionUptimeSeconds', [], 'long'),
+    ])
+
+class DownloadPeerListenerDataType:
+    
+    def get_xml_type():
+        return "DownloadPeerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('peerManagerAdded', ['Download', 'PeerManager']),
+        _method('peerManagerRemoved', ['Download', 'PeerManager']),
+    ])
+
+class DownloadPropertyEventDataType:
+    
+    def get_xml_type():
+        return "DownloadPropertyEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PT_TORRENT_ATTRIBUTE_WILL_BE_READ': 2,
+        'PT_TORRENT_ATTRIBUTE_WRITTEN': 1,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getData()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getType', [], 'int'),
+    ])
+
+class DownloadPropertyListenerDataType:
+    
+    def get_xml_type():
+        return "DownloadPropertyListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('propertyChanged', ['Download', 'DownloadPropertyEvent']),
+    ])
+
+class DownloadScrapeResultDataType:
+    
+    def get_xml_type():
+        return "DownloadScrapeResult"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'RT_ERROR': 2,
+        'RT_SUCCESS': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('getDownload', [], 'Download'),
+        _method('getNextScrapeStartTime', [], 'long'),
+        _method('getNonSeedCount', [], 'int'),
+        _method('getResponseType', [], 'int'),
+        _method('getScrapeStartTime', [], 'long'),
+        _method('getSeedCount', [], 'int'),
+        _method('getStatus', [], 'String'),
+        _method('getURL', [], 'URL'),
+        _method('setNextScrapeStartTime', ['long']),
+    ])
+    
+    __az_attributes__ = {
+        'non_seed_count': 'int',
+        'seed_count': 'int',
+    }
+
+class DownloadStatsDataType:
+    
+    def get_xml_type():
+        return "DownloadStats"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'HEALTH_ERROR': 6,
+        'HEALTH_KO': 5,
+        'HEALTH_NO_REMOTE': 3,
+        'HEALTH_NO_TRACKER': 2,
+        'HEALTH_OK': 4,
+        'HEALTH_STOPPED': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('getAvailability', [], 'float'),
+        _method('getCompleted', [], 'int'),
+        _method('getDiscarded', [], 'long'),
+        _method('getDownloadAverage', [], 'long'),
+        _method('getDownloadCompleted', ['boolean'], 'int'),
+        _method('getDownloadDirectory', [], 'String'),
+        _method('getDownloaded', [], 'long'),
+        _method('getElapsedTime', [], 'String'),
+        _method('getETA', [], 'String'),
+        _method('getHashFails', [], 'long'),
+        _method('getHealth', [], 'int'),
+        _method('getSecondsDownloading', [], 'long'),
+        _method('getSecondsOnlySeeding', [], 'long'),
+        _method('getShareRatio', [], 'int'),
+        _method('getStatus', ['boolean'], 'String'),
+        _method('getStatus', [], 'String'),
+        _method('getTargetFileOrDir', [], 'String'),
+        _method('getTimeStarted', [], 'long'),
+        _method('getTimeStartedSeeding', [], 'long'),
+        _method('getTotalAverage', [], 'long'),
+        _method('getTrackerStatus', [], 'String'),
+        _method('getUploadAverage', [], 'long'),
+        _method('getUploaded', [], 'long'),
+    ])
+    
+    __az_attributes__ = {
+        'availability': 'float',
+        'completed': 'int',
+        'download_average': 'long',
+        'downloadCompletedLive': 'int',
+        'downloadCompletedStored': 'int',
+        'downloaded': 'long',
+        'eta': 'String',
+        'health': 'int',
+        'share_ratio': 'int',
+        'status': 'String',
+        'status_localised': 'String',
+        'upload_average': 'long',
+        'uploaded': 'long',
+    }
+
+class DownloadTrackerListenerDataType:
+    
+    def get_xml_type():
+        return "DownloadTrackerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('announceResult', ['DownloadAnnounceResult']),
+        _method('scrapeResult', ['DownloadScrapeResult']),
+    ])
+
+class DownloadWillBeAddedListenerDataType:
+    
+    def get_xml_type():
+        return "DownloadWillBeAddedListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('initialised', ['Download']),
+    ])
+
+class DownloadWillBeRemovedListenerDataType:
+    
+    def get_xml_type():
+        return "DownloadWillBeRemovedListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('downloadWillBeRemoved', ['Download']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.download.session
+#
+#------------------------------------------------------------------------------
+
+class SessionAuthenticatorDataType:
+    
+    def get_xml_type():
+        return "SessionAuthenticator"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Map createSessionSyn(Peer)
+    #   void verifySessionAck(Peer, Map)
+    #   Map verifySessionSyn(Peer, Map)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('decodeSessionData', ['Peer', 'PooledByteBuffer'], 'PooledByteBuffer'),
+        _method('encodeSessionData', ['Peer', 'PooledByteBuffer'], 'PooledByteBuffer'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.installer
+#
+#------------------------------------------------------------------------------
+
+class FilePluginInstallerDataType:
+    
+    def get_xml_type():
+        return "FilePluginInstaller"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getAlreadyInstalledPlugin', [], 'PluginInterface'),
+        _method('getDescription', [], 'String'),
+        _method('getFile', [], 'File'),
+        _method('getId', [], 'String'),
+        _method('getInstaller', [], 'PluginInstaller'),
+        _method('getName', [], 'String'),
+        _method('getVersion', [], 'String'),
+        _method('install', ['boolean']),
+        _method('uninstall'),
+    ])
+
+class InstallablePluginDataType:
+    
+    def get_xml_type():
+        return "InstallablePlugin"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getAlreadyInstalledPlugin', [], 'PluginInterface'),
+        _method('getDescription', [], 'String'),
+        _method('getId', [], 'String'),
+        _method('getInstaller', [], 'PluginInstaller'),
+        _method('getName', [], 'String'),
+        _method('getVersion', [], 'String'),
+        _method('install', ['boolean']),
+        _method('uninstall'),
+    ])
+
+class PluginInstallerDataType:
+    
+    def get_xml_type():
+        return "PluginInstaller"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['PluginInstallerListener']),
+        _method('getStandardPlugins', [], 'StandardPlugin[]'),
+        _method('install', ['InstallablePlugin[]', 'boolean']),
+        _method('installFromFile', ['File'], 'FilePluginInstaller'),
+        _method('removeListener', ['PluginInstallerListener']),
+        _method('requestInstall', ['String', 'InstallablePlugin']),
+        _method('uninstall', ['PluginInterface[]']),
+        _method('uninstall', ['PluginInterface']),
+    ])
+
+class PluginInstallerListenerDataType:
+    
+    def get_xml_type():
+        return "PluginInstallerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('installRequest', ['String', 'InstallablePlugin'], 'boolean'),
+    ])
+
+class StandardPluginDataType:
+    
+    def get_xml_type():
+        return "StandardPlugin"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getAlreadyInstalledPlugin', [], 'PluginInterface'),
+        _method('getDescription', [], 'String'),
+        _method('getId', [], 'String'),
+        _method('getInstaller', [], 'PluginInstaller'),
+        _method('getName', [], 'String'),
+        _method('getVersion', [], 'String'),
+        _method('install', ['boolean']),
+        _method('uninstall'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ipc
+#
+#------------------------------------------------------------------------------
+
+class IPCInterfaceDataType:
+    
+    def get_xml_type():
+        return "IPCInterface"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object invoke(String, Object[])
+    #   
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ipfilter
+#
+#------------------------------------------------------------------------------
+
+class IPBlockedDataType:
+    
+    def get_xml_type():
+        return "IPBlocked"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getBlockedIP', [], 'String'),
+        _method('getBlockedTime', [], 'long'),
+        _method('getBlockedTorrentName', [], 'String'),
+        _method('getBlockingRange', [], 'IPRange'),
+    ])
+
+class IPFilterDataType:
+    
+    def get_xml_type():
+        return "IPFilter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addRange', ['IPRange']),
+        _method('block', ['String']),
+        _method('createAndAddRange', ['String', 'String', 'String', 'boolean'], 'IPRange'),
+        _method('createRange', ['boolean'], 'IPRange'),
+        _method('getBlockedIPs', [], 'IPBlocked[]'),
+        _method('getFile', [], 'File'),
+        _method('getInRangeAddressesAreAllowed', [], 'boolean'),
+        _method('getLastUpdateTime', [], 'long'),
+        _method('getNumberOfBlockedIPs', [], 'int'),
+        _method('getNumberOfRanges', [], 'int'),
+        _method('getRanges', [], 'IPRange[]'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isInRange', ['String'], 'boolean'),
+        _method('markAsUpToDate'),
+        _method('reload'),
+        _method('removeRange', ['IPRange']),
+        _method('save'),
+        _method('setEnabled', ['boolean']),
+        _method('setInRangeAddressesAreAllowed', ['boolean']),
+    ])
+    
+    __az_attributes__ = {
+        'last_update_time': 'long',
+        'number_of_blocked_ips': 'int',
+        'number_of_ranges': 'int',
+    }
+
+class IPRangeDataType:
+    
+    def get_xml_type():
+        return "IPRange"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   int compareTo(Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('checkValid'),
+        _method('delete'),
+        _method('getDescription', [], 'String'),
+        _method('getEndIP', [], 'String'),
+        _method('getStartIP', [], 'String'),
+        _method('isInRange', ['String'], 'boolean'),
+        _method('isSessionOnly', [], 'boolean'),
+        _method('isValid', [], 'boolean'),
+        _method('setDescription', ['String']),
+        _method('setEndIP', ['String']),
+        _method('setSessionOnly', ['boolean']),
+        _method('setStartIP', ['String']),
+    ])
+    
+    __az_attributes__ = {
+        'description': 'String',
+        'end_ip': 'String',
+        'start_ip': 'String',
+    }
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.logging
+#
+#------------------------------------------------------------------------------
+
+class LoggerDataType:
+    
+    def get_xml_type():
+        return "Logger"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void addFileLoggingListener(FileLoggerAdapter)
+    #   void removeFileLoggingListener(FileLoggerAdapter)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addAlertListener', ['LoggerAlertListener']),
+        _method('getChannel', ['String'], 'LoggerChannel'),
+        _method('getChannels', [], 'LoggerChannel[]'),
+        _method('getNullChannel', ['String'], 'LoggerChannel'),
+        _method('getPluginInterface', [], 'PluginInterface'),
+        _method('getTimeStampedChannel', ['String'], 'LoggerChannel'),
+        _method('removeAlertListener', ['LoggerAlertListener']),
+    ])
+
+class LoggerAlertListenerDataType:
+    
+    def get_xml_type():
+        return "LoggerAlertListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void alertLogged(String, Throwable, boolean)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('alertLogged', ['int', 'String', 'boolean']),
+    ])
+
+class LoggerChannelDataType:
+    
+    def get_xml_type():
+        return "LoggerChannel"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'LT_ERROR': 3,
+        'LT_INFORMATION': 1,
+        'LT_WARNING': 2,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void log(Object, int, String)
+    #   void log(Object[], int, String)
+    #   void log(String, Throwable)
+    #   void log(Throwable)
+    #   void log(Object[], String, Throwable)
+    #   void log(Object, String, Throwable)
+    #   void logAlert(String, Throwable)
+    #   void logAlertRepeatable(String, Throwable)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['LoggerChannelListener']),
+        _method('getLogger', [], 'Logger'),
+        _method('getName', [], 'String'),
+        _method('isEnabled', [], 'boolean'),
+        _method('log', ['String']),
+        _method('log', ['int', 'String']),
+        _method('logAlert', ['int', 'String']),
+        _method('logAlertRepeatable', ['int', 'String']),
+        _method('removeListener', ['LoggerChannelListener']),
+        _method('setDiagnostic'),
+    ])
+    
+    __az_attributes__ = {
+        'enabled': 'boolean',
+        'name': 'String',
+    }
+
+class LoggerChannelListenerDataType:
+    
+    def get_xml_type():
+        return "LoggerChannelListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void messageLogged(String, Throwable)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('messageLogged', ['int', 'String']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.messaging
+#
+#------------------------------------------------------------------------------
+
+class MessageDataType:
+    
+    def get_xml_type():
+        return "Message"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'TYPE_DATA_PAYLOAD': 1,
+        'TYPE_PROTOCOL_PAYLOAD': 0,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Message create(ByteBuffer)
+    #   ByteBuffer[] getPayload()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('destroy'),
+        _method('getDescription', [], 'String'),
+        _method('getID', [], 'String'),
+        _method('getType', [], 'int'),
+    ])
+
+class MessageManagerDataType:
+    
+    def get_xml_type():
+        return "MessageManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('cancelCompatiblePeersLocation', ['MessageManagerListener']),
+        _method('deregisterMessageType', ['Message']),
+        _method('locateCompatiblePeers', ['PluginInterface', 'Message', 'MessageManagerListener']),
+        _method('registerMessageType', ['Message']),
+    ])
+
+class MessageManagerListenerDataType:
+    
+    def get_xml_type():
+        return "MessageManagerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('compatiblePeerFound', ['Download', 'Peer', 'Message']),
+        _method('peerRemoved', ['Download', 'Peer']),
+    ])
+
+class MessageStreamDecoderDataType:
+    
+    def get_xml_type():
+        return "MessageStreamDecoder"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   ByteBuffer destroy()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getDataBytesDecoded', [], 'int'),
+        _method('getProtocolBytesDecoded', [], 'int'),
+        _method('pauseDecoding'),
+        _method('performStreamDecode', ['Transport', 'int'], 'int'),
+        _method('removeDecodedMessages', [], 'Message[]'),
+        _method('resumeDecoding'),
+    ])
+
+class MessageStreamEncoderDataType:
+    
+    def get_xml_type():
+        return "MessageStreamEncoder"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('encodeMessage', ['Message'], 'RawMessage'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.network
+#
+#------------------------------------------------------------------------------
+
+class ConnectionDataType:
+    
+    def get_xml_type():
+        return "Connection"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('close'),
+        _method('connect', ['ConnectionListener']),
+        _method('getIncomingMessageQueue', [], 'IncomingMessageQueue'),
+        _method('getOutgoingMessageQueue', [], 'OutgoingMessageQueue'),
+        _method('startMessageProcessing'),
+    ])
+
+class ConnectionListenerDataType:
+    
+    def get_xml_type():
+        return "ConnectionListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void connectFailure(Throwable)
+    #   void exceptionThrown(Throwable)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('connectStarted'),
+        _method('connectSuccess'),
+    ])
+
+class ConnectionManagerDataType:
+    
+    def get_xml_type():
+        return "ConnectionManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'NAT_BAD': 3,
+        'NAT_OK': 1,
+        'NAT_PROBABLY_OK': 2,
+        'NAT_UNKNOWN': 0,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Connection createConnection(InetSocketAddress, MessageStreamEncoder, MessageStreamDecoder)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getNATStatus', [], 'int'),
+    ])
+
+class IncomingMessageQueueDataType:
+    
+    def get_xml_type():
+        return "IncomingMessageQueue"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('deregisterListener', ['IncomingMessageQueueListener']),
+        _method('getPercentDoneOfCurrentMessage', [], 'int'),
+        _method('notifyOfExternalReceive', ['Message']),
+        _method('registerListener', ['IncomingMessageQueueListener']),
+        _method('setDecoder', ['MessageStreamDecoder']),
+    ])
+
+class IncomingMessageQueueListenerDataType:
+    
+    def get_xml_type():
+        return "IncomingMessageQueueListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('bytesReceived', ['int']),
+        _method('messageReceived', ['Message'], 'boolean'),
+    ])
+
+class OutgoingMessageQueueDataType:
+    
+    def get_xml_type():
+        return "OutgoingMessageQueue"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('deregisterListener', ['OutgoingMessageQueueListener']),
+        _method('getPercentDoneOfCurrentMessage', [], 'int'),
+        _method('notifyOfExternalSend', ['Message']),
+        _method('registerListener', ['OutgoingMessageQueueListener']),
+        _method('sendMessage', ['Message']),
+        _method('setEncoder', ['MessageStreamEncoder']),
+    ])
+
+class OutgoingMessageQueueListenerDataType:
+    
+    def get_xml_type():
+        return "OutgoingMessageQueueListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('bytesSent', ['int']),
+        _method('messageAdded', ['Message'], 'boolean'),
+        _method('messageSent', ['Message']),
+    ])
+
+class RawMessageDataType:
+    
+    def get_xml_type():
+        return "RawMessage"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'TYPE_DATA_PAYLOAD': 1,
+        'TYPE_PROTOCOL_PAYLOAD': 0,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Message create(ByteBuffer)
+    #   ByteBuffer[] getPayload()
+    #   ByteBuffer[] getRawPayload()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('destroy'),
+        _method('getDescription', [], 'String'),
+        _method('getID', [], 'String'),
+        _method('getOriginalMessage', [], 'Message'),
+        _method('getType', [], 'int'),
+    ])
+
+class TransportDataType:
+    
+    def get_xml_type():
+        return "Transport"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   long read(ByteBuffer[], int, int)
+    #   long write(ByteBuffer[], int, int)
+    #   
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.peers
+#
+#------------------------------------------------------------------------------
+
+class PeerDataType:
+    
+    def get_xml_type():
+        return "Peer"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'CLOSING': 40,
+        'CONNECTING': 10,
+        'DISCONNECTED': 50,
+        'HANDSHAKING': 20,
+        'TRANSFERING': 30,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   List getExpiredRequests()
+    #   Map getProperties()
+    #   List getRequests()
+    #   Object getUserData(Object)
+    #   void setUserData(Object, Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['PeerListener2']),
+        _method('addListener', ['PeerListener']),
+        _method('addRequest', ['PeerReadRequest'], 'boolean'),
+        _method('cancelRequest', ['PeerReadRequest']),
+        _method('close', ['String', 'boolean', 'boolean']),
+        _method('getAvailable', [], 'boolean[]'),
+        _method('getClient', [], 'String'),
+        _method('getConnection', [], 'Connection'),
+        _method('getId', [], 'byte[]'),
+        _method('getIp', [], 'String'),
+        _method('getManager', [], 'PeerManager'),
+        _method('getNumberOfRequests', [], 'int'),
+        _method('getPercentDone', [], 'int'),
+        _method('getPercentDoneInThousandNotation', [], 'int'),
+        _method('getPercentDoneOfCurrentIncomingRequest', [], 'int'),
+        _method('getPercentDoneOfCurrentOutgoingRequest', [], 'int'),
+        _method('getPort', [], 'int'),
+        _method('getSnubbedTime', [], 'long'),
+        _method('getState', [], 'int'),
+        _method('getStats', [], 'PeerStats'),
+        _method('getSupportedMessages', [], 'Message[]'),
+        _method('getTCPListenPort', [], 'int'),
+        _method('getUDPListenPort', [], 'int'),
+        _method('isChoked', [], 'boolean'),
+        _method('isChoking', [], 'boolean'),
+        _method('isDownloadPossible', [], 'boolean'),
+        _method('isIncoming', [], 'boolean'),
+        _method('isInterested', [], 'boolean'),
+        _method('isInteresting', [], 'boolean'),
+        _method('isOptimisticUnchoke', [], 'boolean'),
+        _method('isPieceAvailable', ['int'], 'boolean'),
+        _method('isSeed', [], 'boolean'),
+        _method('isSnubbed', [], 'boolean'),
+        _method('isTransferAvailable', [], 'boolean'),
+        _method('removeListener', ['PeerListener2']),
+        _method('removeListener', ['PeerListener']),
+        _method('setOptimisticUnchoke', ['boolean']),
+        _method('setSnubbed', ['boolean']),
+        _method('supportsMessaging', [], 'boolean'),
+    ])
+    
+    __az_attributes__ = {
+        'ip': 'String',
+        'port': 'int',
+    }
+
+class PeerEventDataType:
+    
+    def get_xml_type():
+        return "PeerEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ET_ADD_AVAILABILITY': 3,
+        'ET_BAD_CHUNK': 2,
+        'ET_REMOVE_AVAILABILITY': 4,
+        'ET_STATE_CHANGED': 1,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getData()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getType', [], 'int'),
+    ])
+
+class PeerListenerDataType:
+    
+    def get_xml_type():
+        return "PeerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('sentBadChunk', ['int', 'int']),
+        _method('stateChanged', ['int']),
+    ])
+
+class PeerListener2DataType:
+    
+    def get_xml_type():
+        return "PeerListener2"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('eventOccurred', ['PeerEvent']),
+    ])
+
+class PeerManagerDataType:
+    
+    def get_xml_type():
+        return "PeerManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['PeerManagerListener']),
+        _method('addPeer', ['String', 'int']),
+        _method('addPeer', ['Peer']),
+        _method('addPeer', ['String', 'int', 'boolean']),
+        _method('createPeerStats', ['Peer'], 'PeerStats'),
+        _method('getDiskManager', [], 'DiskManager'),
+        _method('getDownload', [], 'Download'),
+        _method('getPeers', [], 'Peer[]'),
+        _method('getStats', [], 'PeerManagerStats'),
+        _method('isSeeding', [], 'boolean'),
+        _method('isSuperSeeding', [], 'boolean'),
+        _method('removeListener', ['PeerManagerListener']),
+        _method('removePeer', ['Peer']),
+        _method('requestCancelled', ['PeerReadRequest', 'Peer']),
+        _method('requestComplete', ['PeerReadRequest', 'PooledByteBuffer', 'Peer']),
+    ])
+
+class PeerManagerListenerDataType:
+    
+    def get_xml_type():
+        return "PeerManagerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('peerAdded', ['PeerManager', 'Peer']),
+        _method('peerRemoved', ['PeerManager', 'Peer']),
+    ])
+
+class PeerManagerStatsDataType:
+    
+    def get_xml_type():
+        return "PeerManagerStats"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getConnectedLeechers', [], 'int'),
+        _method('getConnectedSeeds', [], 'int'),
+        _method('getDiscarded', [], 'long'),
+        _method('getDownloadAverage', [], 'long'),
+        _method('getDownloaded', [], 'long'),
+        _method('getHashFailBytes', [], 'long'),
+        _method('getUploadAverage', [], 'long'),
+        _method('getUploaded', [], 'long'),
+    ])
+
+class PeerReadRequestDataType:
+    
+    def get_xml_type():
+        return "PeerReadRequest"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('cancel'),
+        _method('getLength', [], 'int'),
+        _method('getOffset', [], 'int'),
+        _method('getPieceNumber', [], 'int'),
+        _method('isCancelled', [], 'boolean'),
+        _method('isExpired', [], 'boolean'),
+        _method('resetTime', ['long']),
+    ])
+
+class PeerStatsDataType:
+    
+    def get_xml_type():
+        return "PeerStats"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('discarded', ['int']),
+        _method('getDownloadAverage', [], 'int'),
+        _method('getReception', [], 'int'),
+        _method('getStatisticSentAverage', [], 'int'),
+        _method('getTimeSinceConnectionEstablished', [], 'long'),
+        _method('getTotalAverage', [], 'int'),
+        _method('getTotalDiscarded', [], 'long'),
+        _method('getTotalReceived', [], 'long'),
+        _method('getTotalSent', [], 'long'),
+        _method('getUploadAverage', [], 'int'),
+        _method('received', ['int']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.peers.protocol
+#
+#------------------------------------------------------------------------------
+
+class PeerProtocolBTDataType:
+    
+    def get_xml_type():
+        return "PeerProtocolBT"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following constants are not defined here, because their types
+    # are not available in DOPAL:
+    # 
+    #   "BT_BITFIELD" (has type "byte)"
+    #   "BT_CANCEL" (has type "byte)"
+    #   "BT_CHOKED" (has type "byte)"
+    #   "BT_HAVE" (has type "byte)"
+    #   "BT_INTERESTED" (has type "byte)"
+    #   "BT_PIECE" (has type "byte)"
+    #   "BT_REQUEST" (has type "byte)"
+    #   "BT_UNCHOKED" (has type "byte)"
+    #   "BT_UNINTERESTED" (has type "byte)"
+    #   
+
+class PeerProtocolExtensionHandlerDataType:
+    
+    def get_xml_type():
+        return "PeerProtocolExtensionHandler"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Peer[] handleExtension(PeerManager, Map)
+    #   
+
+class PeerProtocolManagerDataType:
+    
+    def get_xml_type():
+        return "PeerProtocolManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('registerExtensionHandler', ['String', 'PeerProtocolExtensionHandler']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.platform
+#
+#------------------------------------------------------------------------------
+
+class PlatformManagerDataType:
+    
+    def get_xml_type():
+        return "PlatformManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'LOC_MUSIC': 2,
+        'LOC_USER_DATA': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('getLocation', ['long'], 'File'),
+        _method('isAdditionalFileTypeRegistered', ['String', 'String'], 'boolean'),
+        _method('registerAdditionalFileType', ['String', 'String', 'String', 'String']),
+        _method('showFile', ['String']),
+        _method('unregisterAdditionalFileType', ['String', 'String']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.sharing
+#
+#------------------------------------------------------------------------------
+
+class ShareItemDataType:
+    
+    def get_xml_type():
+        return "ShareItem"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getTorrent', [], 'Torrent'),
+        _method('getTorrentFile', [], 'File'),
+    ])
+
+class ShareManagerDataType:
+    
+    def get_xml_type():
+        return "ShareManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addDir', ['File'], 'ShareResourceDir'),
+        _method('addDirContents', ['File', 'boolean'], 'ShareResourceDirContents'),
+        _method('addFile', ['File'], 'ShareResourceFile'),
+        _method('addListener', ['ShareManagerListener']),
+        _method('cancelOperation'),
+        _method('getShare', ['File'], 'ShareResource'),
+        _method('getShares', [], 'ShareResource[]'),
+        _method('initialise'),
+        _method('isInitialising', [], 'boolean'),
+        _method('removeListener', ['ShareManagerListener']),
+    ])
+
+class ShareManagerListenerDataType:
+    
+    def get_xml_type():
+        return "ShareManagerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('reportCurrentTask', ['String']),
+        _method('reportProgress', ['int']),
+        _method('resourceAdded', ['ShareResource']),
+        _method('resourceDeleted', ['ShareResource']),
+        _method('resourceModified', ['ShareResource']),
+    ])
+
+class ShareResourceDataType:
+    
+    def get_xml_type():
+        return "ShareResource"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ST_DIR': 2,
+        'ST_DIR_CONTENTS': 3,
+        'ST_FILE': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addChangeListener', ['ShareResourceListener']),
+        _method('addDeletionListener', ['ShareResourceWillBeDeletedListener']),
+        _method('canBeDeleted', [], 'boolean'),
+        _method('delete'),
+        _method('getAttribute', ['TorrentAttribute'], 'String'),
+        _method('getAttributes', [], 'TorrentAttribute[]'),
+        _method('getName', [], 'String'),
+        _method('getParent', [], 'ShareResourceDirContents'),
+        _method('getType', [], 'int'),
+        _method('removeChangeListener', ['ShareResourceListener']),
+        _method('removeDeletionListener', ['ShareResourceWillBeDeletedListener']),
+        _method('setAttribute', ['TorrentAttribute', 'String']),
+    ])
+
+class ShareResourceDirDataType:
+    
+    def get_xml_type():
+        return "ShareResourceDir"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ST_DIR': 2,
+        'ST_DIR_CONTENTS': 3,
+        'ST_FILE': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addChangeListener', ['ShareResourceListener']),
+        _method('addDeletionListener', ['ShareResourceWillBeDeletedListener']),
+        _method('canBeDeleted', [], 'boolean'),
+        _method('delete'),
+        _method('getAttribute', ['TorrentAttribute'], 'String'),
+        _method('getAttributes', [], 'TorrentAttribute[]'),
+        _method('getDir', [], 'File'),
+        _method('getItem', [], 'ShareItem'),
+        _method('getName', [], 'String'),
+        _method('getParent', [], 'ShareResourceDirContents'),
+        _method('getType', [], 'int'),
+        _method('removeChangeListener', ['ShareResourceListener']),
+        _method('removeDeletionListener', ['ShareResourceWillBeDeletedListener']),
+        _method('setAttribute', ['TorrentAttribute', 'String']),
+    ])
+
+class ShareResourceDirContentsDataType:
+    
+    def get_xml_type():
+        return "ShareResourceDirContents"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ST_DIR': 2,
+        'ST_DIR_CONTENTS': 3,
+        'ST_FILE': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addChangeListener', ['ShareResourceListener']),
+        _method('addDeletionListener', ['ShareResourceWillBeDeletedListener']),
+        _method('canBeDeleted', [], 'boolean'),
+        _method('delete'),
+        _method('getAttribute', ['TorrentAttribute'], 'String'),
+        _method('getAttributes', [], 'TorrentAttribute[]'),
+        _method('getChildren', [], 'ShareResource[]'),
+        _method('getName', [], 'String'),
+        _method('getParent', [], 'ShareResourceDirContents'),
+        _method('getRoot', [], 'File'),
+        _method('getType', [], 'int'),
+        _method('isRecursive', [], 'boolean'),
+        _method('removeChangeListener', ['ShareResourceListener']),
+        _method('removeDeletionListener', ['ShareResourceWillBeDeletedListener']),
+        _method('setAttribute', ['TorrentAttribute', 'String']),
+    ])
+
+class ShareResourceEventDataType:
+    
+    def get_xml_type():
+        return "ShareResourceEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ET_ATTRIBUTE_CHANGED': 1,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getData()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getType', [], 'int'),
+    ])
+
+class ShareResourceFileDataType:
+    
+    def get_xml_type():
+        return "ShareResourceFile"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ST_DIR': 2,
+        'ST_DIR_CONTENTS': 3,
+        'ST_FILE': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addChangeListener', ['ShareResourceListener']),
+        _method('addDeletionListener', ['ShareResourceWillBeDeletedListener']),
+        _method('canBeDeleted', [], 'boolean'),
+        _method('delete'),
+        _method('getAttribute', ['TorrentAttribute'], 'String'),
+        _method('getAttributes', [], 'TorrentAttribute[]'),
+        _method('getFile', [], 'File'),
+        _method('getItem', [], 'ShareItem'),
+        _method('getName', [], 'String'),
+        _method('getParent', [], 'ShareResourceDirContents'),
+        _method('getType', [], 'int'),
+        _method('removeChangeListener', ['ShareResourceListener']),
+        _method('removeDeletionListener', ['ShareResourceWillBeDeletedListener']),
+        _method('setAttribute', ['TorrentAttribute', 'String']),
+    ])
+
+class ShareResourceListenerDataType:
+    
+    def get_xml_type():
+        return "ShareResourceListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('shareResourceChanged', ['ShareResource', 'ShareResourceEvent']),
+    ])
+
+class ShareResourceWillBeDeletedListenerDataType:
+    
+    def get_xml_type():
+        return "ShareResourceWillBeDeletedListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('resourceWillBeDeleted', ['ShareResource']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.torrent
+#
+#------------------------------------------------------------------------------
+
+class TorrentDataType:
+    
+    def get_xml_type():
+        return "Torrent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getAdditionalProperty(String)
+    #   Map getMapProperty(String)
+    #   byte[][] getPieces()
+    #   void setMapProperty(String, Map)
+    #   Map writeToMap()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getAnnounceURL', [], 'URL'),
+        _method('getAnnounceURLList', [], 'TorrentAnnounceURLList'),
+        _method('getComment', [], 'String'),
+        _method('getCreatedBy', [], 'String'),
+        _method('getCreationDate', [], 'long'),
+        _method('getEncoding', [], 'String'),
+        _method('getFiles', [], 'TorrentFile[]'),
+        _method('getHash', [], 'byte[]'),
+        _method('getMagnetURI', [], 'URL'),
+        _method('getName', [], 'String'),
+        _method('getPieceCount', [], 'long'),
+        _method('getPieceSize', [], 'long'),
+        _method('getPluginStringProperty', ['String'], 'String'),
+        _method('getSize', [], 'long'),
+        _method('isDecentralised', [], 'boolean'),
+        _method('isDecentralisedBackupEnabled', [], 'boolean'),
+        _method('isDecentralisedBackupRequested', [], 'boolean'),
+        _method('isPrivate', [], 'boolean'),
+        _method('removeAdditionalProperties', [], 'Torrent'),
+        _method('save'),
+        _method('setAnnounceURL', ['URL']),
+        _method('setComment', ['String']),
+        _method('setComplete', ['File']),
+        _method('setDecentralisedBackupRequested', ['boolean']),
+        _method('setPluginStringProperty', ['String', 'String']),
+        _method('setPrivate', ['boolean']),
+        _method('writeToBEncodedData', [], 'byte[]'),
+        _method('writeToFile', ['File']),
+    ])
+    
+    __az_attributes__ = {
+        'hash': 'byte[]',
+        'name': 'String',
+        'size': 'long',
+    }
+
+class TorrentAnnounceURLListDataType:
+    
+    def get_xml_type():
+        return "TorrentAnnounceURLList"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addSet', ['URL[]']),
+        _method('create', ['URL[]'], 'TorrentAnnounceURLListSet'),
+        _method('getSets', [], 'TorrentAnnounceURLListSet[]'),
+        _method('insertSetAtFront', ['URL[]']),
+        _method('setSets', ['TorrentAnnounceURLListSet[]']),
+    ])
+
+class TorrentAnnounceURLListSetDataType:
+    
+    def get_xml_type():
+        return "TorrentAnnounceURLListSet"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getURLs', [], 'URL[]'),
+        _method('setURLs', ['URL[]']),
+    ])
+
+class TorrentAttributeDataType:
+    
+    def get_xml_type():
+        return "TorrentAttribute"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'TA_CATEGORY': "Category",
+        'TA_NETWORKS': "Networks",
+        'TA_PEER_SOURCES': "PeerSources",
+        'TA_SHARE_PROPERTIES': "ShareProperties",
+        'TA_TRACKER_CLIENT_EXTENSIONS': "TrackerClientExtensions",
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addDefinedValue', ['String']),
+        _method('addTorrentAttributeListener', ['TorrentAttributeListener']),
+        _method('getDefinedValues', [], 'String[]'),
+        _method('getName', [], 'String'),
+        _method('removeDefinedValue', ['String']),
+        _method('removeTorrentAttributeListener', ['TorrentAttributeListener']),
+    ])
+
+class TorrentAttributeEventDataType:
+    
+    def get_xml_type():
+        return "TorrentAttributeEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ET_ATTRIBUTE_VALUE_ADDED': 1,
+        'ET_ATTRIBUTE_VALUE_REMOVED': 2,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getData()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getAttribute', [], 'TorrentAttribute'),
+        _method('getType', [], 'int'),
+    ])
+
+class TorrentAttributeListenerDataType:
+    
+    def get_xml_type():
+        return "TorrentAttributeListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('event', ['TorrentAttributeEvent']),
+    ])
+
+class TorrentDownloaderDataType:
+    
+    def get_xml_type():
+        return "TorrentDownloader"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('download', [], 'Torrent'),
+        _method('download', ['String'], 'Torrent'),
+    ])
+
+class TorrentFileDataType:
+    
+    def get_xml_type():
+        return "TorrentFile"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getName', [], 'String'),
+        _method('getSize', [], 'long'),
+    ])
+
+class TorrentManagerDataType:
+    
+    def get_xml_type():
+        return "TorrentManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Torrent createFromBEncodedInputStream(InputStream)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['TorrentManagerListener']),
+        _method('createFromBEncodedData', ['byte[]'], 'Torrent'),
+        _method('createFromBEncodedFile', ['File'], 'Torrent'),
+        _method('createFromDataFile', ['File', 'URL', 'boolean'], 'Torrent'),
+        _method('createFromDataFile', ['File', 'URL'], 'Torrent'),
+        _method('getAttribute', ['String'], 'TorrentAttribute'),
+        _method('getDefinedAttributes', [], 'TorrentAttribute[]'),
+        _method('getPluginAttribute', ['String'], 'TorrentAttribute'),
+        _method('getURLDownloader', ['URL', 'String', 'String'], 'TorrentDownloader'),
+        _method('getURLDownloader', ['URL'], 'TorrentDownloader'),
+        _method('removeListener', ['TorrentManagerListener']),
+    ])
+
+class TorrentManagerEventDataType:
+    
+    def get_xml_type():
+        return "TorrentManagerEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getData()
+    #   
+
+class TorrentManagerListenerDataType:
+    
+    def get_xml_type():
+        return "TorrentManagerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('event', ['TorrentManagerEvent']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.tracker
+#
+#------------------------------------------------------------------------------
+
+class TrackerDataType:
+    
+    def get_xml_type():
+        return "Tracker"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PR_HTTP': 1,
+        'PR_HTTPS': 2,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addAuthenticationListener', ['TrackerAuthenticationListener']),
+        _method('addListener', ['TrackerListener']),
+        _method('addPageGenerator', ['TrackerWebPageGenerator']),
+        _method('createWebContext', ['String', 'int', 'int'], 'TrackerWebContext'),
+        _method('createWebContext', ['int', 'int'], 'TrackerWebContext'),
+        _method('getName', [], 'String'),
+        _method('getPageGenerators', [], 'TrackerWebPageGenerator[]'),
+        _method('getTorrent', ['Torrent'], 'TrackerTorrent'),
+        _method('getTorrents', [], 'TrackerTorrent[]'),
+        _method('getURLs', [], 'URL[]'),
+        _method('host', ['Torrent', 'boolean'], 'TrackerTorrent'),
+        _method('host', ['Torrent', 'boolean', 'boolean'], 'TrackerTorrent'),
+        _method('publish', ['Torrent'], 'TrackerTorrent'),
+        _method('removeAuthenticationListener', ['TrackerAuthenticationListener']),
+        _method('removeListener', ['TrackerListener']),
+        _method('removePageGenerator', ['TrackerWebPageGenerator']),
+    ])
+
+class TrackerListenerDataType:
+    
+    def get_xml_type():
+        return "TrackerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('torrentAdded', ['TrackerTorrent']),
+        _method('torrentChanged', ['TrackerTorrent']),
+        _method('torrentRemoved', ['TrackerTorrent']),
+    ])
+
+class TrackerPeerDataType:
+    
+    def get_xml_type():
+        return "TrackerPeer"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getAmountLeft', [], 'long'),
+        _method('getDownloaded', [], 'long'),
+        _method('getIP', [], 'String'),
+        _method('getIPRaw', [], 'String'),
+        _method('getUploaded', [], 'long'),
+        _method('isSeed', [], 'boolean'),
+    ])
+
+class TrackerPeerEventDataType:
+    
+    def get_xml_type():
+        return "TrackerPeerEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ET_PEER_ADDED': 1,
+        'ET_PEER_CHANGED': 2,
+        'ET_PEER_REMOVED': 3,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('getEventType', [], 'int'),
+    ])
+
+class TrackerPeerListenerDataType:
+    
+    def get_xml_type():
+        return "TrackerPeerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('eventOccurred', ['TrackerPeerEvent']),
+    ])
+
+class TrackerTorrentDataType:
+    
+    def get_xml_type():
+        return "TrackerTorrent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'TS_PUBLISHED': 2,
+        'TS_STARTED': 0,
+        'TS_STOPPED': 1,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['TrackerTorrentListener']),
+        _method('addRemovalListener', ['TrackerTorrentWillBeRemovedListener']),
+        _method('canBeRemoved', [], 'boolean'),
+        _method('disableReplyCaching'),
+        _method('getAnnounceCount', [], 'long'),
+        _method('getAverageAnnounceCount', [], 'long'),
+        _method('getAverageBytesIn', [], 'long'),
+        _method('getAverageBytesOut', [], 'long'),
+        _method('getAverageDownloaded', [], 'long'),
+        _method('getAverageScrapeCount', [], 'long'),
+        _method('getAverageUploaded', [], 'long'),
+        _method('getBadNATCount', [], 'int'),
+        _method('getCompletedCount', [], 'long'),
+        _method('getDateAdded', [], 'long'),
+        _method('getLeecherCount', [], 'int'),
+        _method('getPeers', [], 'TrackerPeer[]'),
+        _method('getScrapeCount', [], 'long'),
+        _method('getSeedCount', [], 'int'),
+        _method('getStatus', [], 'int'),
+        _method('getTorrent', [], 'Torrent'),
+        _method('getTotalBytesIn', [], 'long'),
+        _method('getTotalBytesOut', [], 'long'),
+        _method('getTotalDownloaded', [], 'long'),
+        _method('getTotalLeft', [], 'long'),
+        _method('getTotalUploaded', [], 'long'),
+        _method('isPassive', [], 'boolean'),
+        _method('remove'),
+        _method('removeListener', ['TrackerTorrentListener']),
+        _method('removeRemovalListener', ['TrackerTorrentWillBeRemovedListener']),
+        _method('start'),
+        _method('stop'),
+    ])
+    
+    __az_attributes__ = {
+        'announce_count': 'long',
+        'average_announce_count': 'long',
+        'average_bytes_in': 'long',
+        'average_bytes_out': 'long',
+        'average_downloaded': 'long',
+        'average_scrape_count': 'long',
+        'average_uploaded': 'long',
+        'bad_NAT_count': 'int',
+        'completed_count': 'long',
+        'leecher_count': 'int',
+        'scrape_count': 'long',
+        'seed_count': 'int',
+        'status': 'int',
+        'torrent': 'Torrent',
+        'total_bytes_in': 'long',
+        'total_bytes_out': 'long',
+        'total_downloaded': 'long',
+        'total_left': 'long',
+        'total_uploaded': 'long',
+    }
+
+class TrackerTorrentListenerDataType:
+    
+    def get_xml_type():
+        return "TrackerTorrentListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('postProcess', ['TrackerTorrentRequest']),
+        _method('preProcess', ['TrackerTorrentRequest']),
+    ])
+
+class TrackerTorrentRequestDataType:
+    
+    def get_xml_type():
+        return "TrackerTorrentRequest"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'RT_ANNOUNCE': 1,
+        'RT_FULL_SCRAPE': 3,
+        'RT_SCRAPE': 2,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Map getResponse()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getPeer', [], 'TrackerPeer'),
+        _method('getRequest', [], 'String'),
+        _method('getRequestType', [], 'int'),
+        _method('getTorrent', [], 'TrackerTorrent'),
+    ])
+
+class TrackerTorrentWillBeRemovedListenerDataType:
+    
+    def get_xml_type():
+        return "TrackerTorrentWillBeRemovedListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('torrentWillBeRemoved', ['TrackerTorrent']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.tracker.web
+#
+#------------------------------------------------------------------------------
+
+class TrackerAuthenticationListenerDataType:
+    
+    def get_xml_type():
+        return "TrackerAuthenticationListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('authenticate', ['URL', 'String', 'String'], 'boolean'),
+        _method('authenticate', ['URL', 'String'], 'byte[]'),
+    ])
+
+class TrackerWebContextDataType:
+    
+    def get_xml_type():
+        return "TrackerWebContext"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addAuthenticationListener', ['TrackerAuthenticationListener']),
+        _method('addPageGenerator', ['TrackerWebPageGenerator']),
+        _method('getName', [], 'String'),
+        _method('getPageGenerators', [], 'TrackerWebPageGenerator[]'),
+        _method('getURLs', [], 'URL[]'),
+        _method('removeAuthenticationListener', ['TrackerAuthenticationListener']),
+        _method('removePageGenerator', ['TrackerWebPageGenerator']),
+    ])
+
+class TrackerWebPageGeneratorDataType:
+    
+    def get_xml_type():
+        return "TrackerWebPageGenerator"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('generate', ['TrackerWebPageRequest', 'TrackerWebPageResponse'], 'boolean'),
+    ])
+
+class TrackerWebPageRequestDataType:
+    
+    def get_xml_type():
+        return "TrackerWebPageRequest"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   InetSocketAddress getClientAddress2()
+    #   Map getHeaders()
+    #   InputStream getInputStream()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getAbsoluteURL', [], 'URL'),
+        _method('getClientAddress', [], 'String'),
+        _method('getContext', [], 'TrackerWebContext'),
+        _method('getHeader', [], 'String'),
+        _method('getTracker', [], 'Tracker'),
+        _method('getURL', [], 'String'),
+        _method('getUser', [], 'String'),
+    ])
+
+class TrackerWebPageResponseDataType:
+    
+    def get_xml_type():
+        return "TrackerWebPageResponse"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   OutputStream getOutputStream()
+    #   void useStream(String, InputStream)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('setContentType', ['String']),
+        _method('setExpires', ['long']),
+        _method('setHeader', ['String', 'String']),
+        _method('setLastModified', ['long']),
+        _method('setReplyStatus', ['int']),
+        _method('useFile', ['String', 'String'], 'boolean'),
+        _method('writeTorrent', ['TrackerTorrent']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ui
+#
+#------------------------------------------------------------------------------
+
+class GraphicDataType:
+    
+    def get_xml_type():
+        return "Graphic"
+    get_xml_type = staticmethod(get_xml_type)
+
+class UIInstanceDataType:
+    
+    def get_xml_type():
+        return "UIInstance"
+    get_xml_type = staticmethod(get_xml_type)
+
+class UIInstanceFactoryDataType:
+    
+    def get_xml_type():
+        return "UIInstanceFactory"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('detach'),
+        _method('getInstance', ['PluginInterface'], 'UIInstance'),
+    ])
+
+class UIManagerDataType:
+    
+    def get_xml_type():
+        return "UIManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   PluginView createPluginView(PluginViewModel)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addUIEventListener', ['UIManagerEventListener']),
+        _method('addUIListener', ['UIManagerListener']),
+        _method('attachUI', ['UIInstanceFactory']),
+        _method('copyToClipBoard', ['String']),
+        _method('createBasicPluginConfigModel', ['String'], 'BasicPluginConfigModel'),
+        _method('createBasicPluginConfigModel', ['String', 'String'], 'BasicPluginConfigModel'),
+        _method('createBasicPluginViewModel', ['String'], 'BasicPluginViewModel'),
+        _method('detachUI', ['UIInstanceFactory']),
+        _method('getBasicPluginViewModel', ['String'], 'BasicPluginViewModel'),
+        _method('getSWTManager', [], 'SWTManager'),
+        _method('getTableManager', [], 'TableManager'),
+        _method('openURL', ['URL']),
+        _method('removeUIEventListener', ['UIManagerEventListener']),
+        _method('removeUIListener', ['UIManagerListener']),
+        _method('showConfigSection', ['String'], 'boolean'),
+        _method('showTextMessage', ['String', 'String', 'String']),
+    ])
+
+class UIManagerEventDataType:
+    
+    def get_xml_type():
+        return "UIManagerEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ET_ADD_TABLE_COLUMN': 11,
+        'ET_ADD_TABLE_CONTEXT_MENU_ITEM': 12,
+        'ET_COPY_TO_CLIPBOARD': 6,
+        'ET_CREATE_TABLE_COLUMN': 10,
+        'ET_OPEN_TORRENT_VIA_FILE': 2,
+        'ET_OPEN_TORRENT_VIA_URL': 3,
+        'ET_OPEN_URL': 9,
+        'ET_PLUGIN_CONFIG_MODEL_CREATED': 5,
+        'ET_PLUGIN_CONFIG_MODEL_DESTROYED': 8,
+        'ET_PLUGIN_VIEW_MODEL_CREATED': 4,
+        'ET_PLUGIN_VIEW_MODEL_DESTROYED': 7,
+        'ET_SHOW_CONFIG_SECTION': 13,
+        'ET_SHOW_TEXT_MESSAGE': 1,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getData()
+    #   Object getResult()
+    #   void setResult(Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getType', [], 'int'),
+    ])
+
+class UIManagerEventListenerDataType:
+    
+    def get_xml_type():
+        return "UIManagerEventListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('eventOccurred', ['UIManagerEvent'], 'boolean'),
+    ])
+
+class UIManagerListenerDataType:
+    
+    def get_xml_type():
+        return "UIManagerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('UIAttached', ['UIInstance']),
+        _method('UIDetached', ['UIInstance']),
+    ])
+
+class UIPluginViewDataType:
+    
+    def get_xml_type():
+        return "UIPluginView"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getDataSource()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('closeView'),
+        _method('getViewID', [], 'String'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ui.components
+#
+#------------------------------------------------------------------------------
+
+class UIComponentDataType:
+    
+    def get_xml_type():
+        return "UIComponent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PT_ENABLED': "enabled",
+        'PT_VALUE': "value",
+        'PT_VISIBLE': "visible",
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getProperty(String)
+    #   void setProperty(String, Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addPropertyChangeListener', ['UIPropertyChangeListener']),
+        _method('getEnabled', [], 'boolean'),
+        _method('getVisible', [], 'boolean'),
+        _method('removePropertyChangeListener', ['UIPropertyChangeListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class UIProgressBarDataType:
+    
+    def get_xml_type():
+        return "UIProgressBar"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PT_ENABLED': "enabled",
+        'PT_VALUE': "value",
+        'PT_VISIBLE': "visible",
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getProperty(String)
+    #   void setProperty(String, Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addPropertyChangeListener', ['UIPropertyChangeListener']),
+        _method('getEnabled', [], 'boolean'),
+        _method('getPercentageComplete', [], 'int'),
+        _method('getVisible', [], 'boolean'),
+        _method('removePropertyChangeListener', ['UIPropertyChangeListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setPercentageComplete', ['int']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class UIPropertyChangeEventDataType:
+    
+    def get_xml_type():
+        return "UIPropertyChangeEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getNewPropertyValue()
+    #   Object getOldPropertyValue()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getPropertyType', [], 'String'),
+        _method('getSource', [], 'UIComponent'),
+    ])
+
+class UIPropertyChangeListenerDataType:
+    
+    def get_xml_type():
+        return "UIPropertyChangeListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('propertyChanged', ['UIPropertyChangeEvent']),
+    ])
+
+class UITextAreaDataType:
+    
+    def get_xml_type():
+        return "UITextArea"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'DEFAULT_MAX_SIZE': 60000,
+        'PT_ENABLED': "enabled",
+        'PT_VALUE': "value",
+        'PT_VISIBLE': "visible",
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getProperty(String)
+    #   void setProperty(String, Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addPropertyChangeListener', ['UIPropertyChangeListener']),
+        _method('appendText', ['String']),
+        _method('getEnabled', [], 'boolean'),
+        _method('getText', [], 'String'),
+        _method('getVisible', [], 'boolean'),
+        _method('removePropertyChangeListener', ['UIPropertyChangeListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setMaximumSize', ['int']),
+        _method('setText', ['String']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class UITextFieldDataType:
+    
+    def get_xml_type():
+        return "UITextField"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PT_ENABLED': "enabled",
+        'PT_VALUE': "value",
+        'PT_VISIBLE': "visible",
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getProperty(String)
+    #   void setProperty(String, Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addPropertyChangeListener', ['UIPropertyChangeListener']),
+        _method('getEnabled', [], 'boolean'),
+        _method('getText', [], 'String'),
+        _method('getVisible', [], 'boolean'),
+        _method('removePropertyChangeListener', ['UIPropertyChangeListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setText', ['String']),
+        _method('setVisible', ['boolean']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ui.config
+#
+#------------------------------------------------------------------------------
+
+class ActionParameterDataType:
+    
+    def get_xml_type():
+        return "ActionParameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'STYLE_BUTTON': 1,
+        'STYLE_LINK': 2,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('addListener', ['ParameterListener']),
+        _method('getLabelKey', [], 'String'),
+        _method('getLabelText', [], 'String'),
+        _method('getStyle', [], 'int'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isVisible', [], 'boolean'),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeListener', ['ParameterListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setLabelKey', ['String']),
+        _method('setLabelText', ['String']),
+        _method('setStyle', ['int']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class BooleanParameterDataType:
+    
+    def get_xml_type():
+        return "BooleanParameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('addDisabledOnSelection', ['Parameter']),
+        _method('addEnabledOnSelection', ['Parameter']),
+        _method('addListener', ['ParameterListener']),
+        _method('getLabelKey', [], 'String'),
+        _method('getLabelText', [], 'String'),
+        _method('getValue', [], 'boolean'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isVisible', [], 'boolean'),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeListener', ['ParameterListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setLabelKey', ['String']),
+        _method('setLabelText', ['String']),
+        _method('setValue', ['boolean']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class ConfigSectionDataType:
+    
+    def get_xml_type():
+        return "ConfigSection"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'SECTION_CONNECTION': "server",
+        'SECTION_FILES': "files",
+        'SECTION_INTERFACE': "style",
+        'SECTION_PLUGINS': "plugins",
+        'SECTION_ROOT': "root",
+        'SECTION_TRACKER': "tracker",
+        'SECTION_TRANSFER': "transfer",
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('configSectionDelete'),
+        _method('configSectionGetName', [], 'String'),
+        _method('configSectionGetParentSection', [], 'String'),
+        _method('configSectionSave'),
+    ])
+
+class ConfigSectionSWTDataType:
+    
+    def get_xml_type():
+        return "ConfigSectionSWT"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'SECTION_CONNECTION': "server",
+        'SECTION_FILES': "files",
+        'SECTION_INTERFACE': "style",
+        'SECTION_PLUGINS': "plugins",
+        'SECTION_ROOT': "root",
+        'SECTION_TRACKER': "tracker",
+        'SECTION_TRANSFER': "transfer",
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Composite configSectionCreate(Composite)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('configSectionDelete'),
+        _method('configSectionGetName', [], 'String'),
+        _method('configSectionGetParentSection', [], 'String'),
+        _method('configSectionSave'),
+    ])
+
+class DirectoryParameterDataType:
+    
+    def get_xml_type():
+        return "DirectoryParameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('addListener', ['ParameterListener']),
+        _method('getLabelKey', [], 'String'),
+        _method('getLabelText', [], 'String'),
+        _method('getValue', [], 'String'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isVisible', [], 'boolean'),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeListener', ['ParameterListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setLabelKey', ['String']),
+        _method('setLabelText', ['String']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class EnablerParameterDataType:
+    
+    def get_xml_type():
+        return "EnablerParameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('addDisabledOnSelection', ['Parameter']),
+        _method('addEnabledOnSelection', ['Parameter']),
+        _method('addListener', ['ParameterListener']),
+        _method('getLabelKey', [], 'String'),
+        _method('getLabelText', [], 'String'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isVisible', [], 'boolean'),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeListener', ['ParameterListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setLabelKey', ['String']),
+        _method('setLabelText', ['String']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class IntParameterDataType:
+    
+    def get_xml_type():
+        return "IntParameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('addListener', ['ParameterListener']),
+        _method('getLabelKey', [], 'String'),
+        _method('getLabelText', [], 'String'),
+        _method('getValue', [], 'int'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isVisible', [], 'boolean'),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeListener', ['ParameterListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setLabelKey', ['String']),
+        _method('setLabelText', ['String']),
+        _method('setValue', ['int']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class LabelParameterDataType:
+    
+    def get_xml_type():
+        return "LabelParameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('addListener', ['ParameterListener']),
+        _method('getLabelKey', [], 'String'),
+        _method('getLabelText', [], 'String'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isVisible', [], 'boolean'),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeListener', ['ParameterListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setLabelKey', ['String']),
+        _method('setLabelText', ['String']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class ParameterDataType:
+    
+    def get_xml_type():
+        return "Parameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('addListener', ['ParameterListener']),
+        _method('getLabelKey', [], 'String'),
+        _method('getLabelText', [], 'String'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isVisible', [], 'boolean'),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeListener', ['ParameterListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setLabelKey', ['String']),
+        _method('setLabelText', ['String']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class ParameterGroupDataType:
+    
+    def get_xml_type():
+        return "ParameterGroup"
+    get_xml_type = staticmethod(get_xml_type)
+
+class ParameterListenerDataType:
+    
+    def get_xml_type():
+        return "ParameterListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('parameterChanged', ['Parameter']),
+    ])
+
+class PasswordParameterDataType:
+    
+    def get_xml_type():
+        return "PasswordParameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ET_MD5': 3,
+        'ET_PLAIN': 1,
+        'ET_SHA1': 2,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('addListener', ['ParameterListener']),
+        _method('getLabelKey', [], 'String'),
+        _method('getLabelText', [], 'String'),
+        _method('getValue', [], 'byte[]'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isVisible', [], 'boolean'),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeListener', ['ParameterListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setLabelKey', ['String']),
+        _method('setLabelText', ['String']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class PluginConfigUIFactoryDataType:
+    
+    def get_xml_type():
+        return "PluginConfigUIFactory"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('createBooleanParameter', ['String', 'String', 'boolean'], 'EnablerParameter'),
+        _method('createColorParameter', ['String', 'String', 'int', 'int', 'int'], 'Parameter'),
+        _method('createDirectoryParameter', ['String', 'String', 'String'], 'Parameter'),
+        _method('createFileParameter', ['String', 'String', 'String'], 'Parameter'),
+        _method('createIntParameter', ['String', 'String', 'int'], 'Parameter'),
+        _method('createIntParameter', ['String', 'String', 'int', 'int[]', 'String[]'], 'Parameter'),
+        _method('createStringParameter', ['String', 'String', 'String'], 'Parameter'),
+        _method('createStringParameter', ['String', 'String', 'String', 'String[]', 'String[]'], 'Parameter'),
+    ])
+
+class StringListParameterDataType:
+    
+    def get_xml_type():
+        return "StringListParameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('addListener', ['ParameterListener']),
+        _method('getLabelKey', [], 'String'),
+        _method('getLabelText', [], 'String'),
+        _method('getValue', [], 'String'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isVisible', [], 'boolean'),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeListener', ['ParameterListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setLabelKey', ['String']),
+        _method('setLabelText', ['String']),
+        _method('setValue', ['String']),
+        _method('setVisible', ['boolean']),
+    ])
+
+class StringParameterDataType:
+    
+    def get_xml_type():
+        return "StringParameter"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addConfigParameterListener', ['ConfigParameterListener']),
+        _method('addListener', ['ParameterListener']),
+        _method('getLabelKey', [], 'String'),
+        _method('getLabelText', [], 'String'),
+        _method('getValue', [], 'String'),
+        _method('isEnabled', [], 'boolean'),
+        _method('isVisible', [], 'boolean'),
+        _method('removeConfigParameterListener', ['ConfigParameterListener']),
+        _method('removeListener', ['ParameterListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setLabelKey', ['String']),
+        _method('setLabelText', ['String']),
+        _method('setValue', ['String']),
+        _method('setVisible', ['boolean']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ui.menus
+#
+#------------------------------------------------------------------------------
+
+class MenuItemDataType:
+    
+    def get_xml_type():
+        return "MenuItem"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'STYLE_CHECK': 2,
+        'STYLE_PUSH': 1,
+        'STYLE_RADIO': 3,
+        'STYLE_SEPARATOR': 4,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getData()
+    #   void setData(Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addFillListener', ['MenuItemFillListener']),
+        _method('addListener', ['MenuItemListener']),
+        _method('getGraphic', [], 'Graphic'),
+        _method('getResourceKey', [], 'String'),
+        _method('getStyle', [], 'int'),
+        _method('isEnabled', [], 'boolean'),
+        _method('removeFillListener', ['MenuItemFillListener']),
+        _method('removeListener', ['MenuItemListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setGraphic', ['Graphic']),
+        _method('setStyle', ['int']),
+    ])
+
+class MenuItemFillListenerDataType:
+    
+    def get_xml_type():
+        return "MenuItemFillListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void menuWillBeShown(MenuItem, Object)
+    #   
+
+class MenuItemListenerDataType:
+    
+    def get_xml_type():
+        return "MenuItemListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void selected(MenuItem, Object)
+    #   
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ui.model
+#
+#------------------------------------------------------------------------------
+
+class BasicPluginConfigModelDataType:
+    
+    def get_xml_type():
+        return "BasicPluginConfigModel"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addActionParameter2', ['String', 'String'], 'ActionParameter'),
+        _method('addBooleanParameter', ['String', 'String', 'boolean']),
+        _method('addBooleanParameter2', ['String', 'String', 'boolean'], 'BooleanParameter'),
+        _method('addDirectoryParameter2', ['String', 'String', 'String'], 'DirectoryParameter'),
+        _method('addIntParameter2', ['String', 'String', 'int'], 'IntParameter'),
+        _method('addLabelParameter2', ['String'], 'LabelParameter'),
+        _method('addPasswordParameter2', ['String', 'String', 'int', 'byte[]'], 'PasswordParameter'),
+        _method('addStringListParameter2', ['String', 'String', 'String[]', 'String'], 'StringListParameter'),
+        _method('addStringListParameter2', ['String', 'String', 'String[]', 'String[]', 'String'], 'StringListParameter'),
+        _method('addStringParameter', ['String', 'String', 'String']),
+        _method('addStringParameter2', ['String', 'String', 'String'], 'StringParameter'),
+        _method('createGroup', ['String', 'Parameter[]'], 'ParameterGroup'),
+        _method('destroy'),
+        _method('getParameters', [], 'Parameter[]'),
+        _method('getParentSection', [], 'String'),
+        _method('getPluginInterface', [], 'PluginInterface'),
+        _method('getSection', [], 'String'),
+    ])
+
+class BasicPluginViewModelDataType:
+    
+    def get_xml_type():
+        return "BasicPluginViewModel"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('destroy'),
+        _method('getActivity', [], 'UITextField'),
+        _method('getConfigSectionID', [], 'String'),
+        _method('getLogArea', [], 'UITextArea'),
+        _method('getName', [], 'String'),
+        _method('getProgress', [], 'UIProgressBar'),
+        _method('getStatus', [], 'UITextField'),
+        _method('setConfigSectionID', ['String']),
+    ])
+
+class PluginConfigModelDataType:
+    
+    def get_xml_type():
+        return "PluginConfigModel"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('destroy'),
+        _method('getPluginInterface', [], 'PluginInterface'),
+    ])
+
+class PluginViewModelDataType:
+    
+    def get_xml_type():
+        return "PluginViewModel"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('destroy'),
+        _method('getName', [], 'String'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ui.SWT
+#
+#------------------------------------------------------------------------------
+
+class GraphicSWTDataType:
+    
+    def get_xml_type():
+        return "GraphicSWT"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Image getImage()
+    #   boolean setImage(Image)
+    #   
+
+class SWTManagerDataType:
+    
+    def get_xml_type():
+        return "SWTManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void addView(PluginView)
+    #   void addView(PluginView, boolean)
+    #   GraphicSWT createGraphic(Image)
+    #   Display getDisplay()
+    #   
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ui.tables
+#
+#------------------------------------------------------------------------------
+
+class TableCellDataType:
+    
+    def get_xml_type():
+        return "TableCell"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void addListeners(Object)
+    #   Object getDataSource()
+    #   Comparable getSortValue()
+    #   Object getToolTip()
+    #   boolean setSortValue(Comparable)
+    #   void setToolTip(Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addDisposeListener', ['TableCellDisposeListener']),
+        _method('addMouseListener', ['TableCellMouseListener']),
+        _method('addRefreshListener', ['TableCellRefreshListener']),
+        _method('addToolTipListener', ['TableCellToolTipListener']),
+        _method('getGraphic', [], 'Graphic'),
+        _method('getHeight', [], 'int'),
+        _method('getTableColumn', [], 'TableColumn'),
+        _method('getTableID', [], 'String'),
+        _method('getTableRow', [], 'TableRow'),
+        _method('getText', [], 'String'),
+        _method('getWidth', [], 'int'),
+        _method('invalidate'),
+        _method('isDisposed', [], 'boolean'),
+        _method('isShown', [], 'boolean'),
+        _method('isValid', [], 'boolean'),
+        _method('removeDisposeListener', ['TableCellDisposeListener']),
+        _method('removeMouseListener', ['TableCellMouseListener']),
+        _method('removeRefreshListener', ['TableCellRefreshListener']),
+        _method('removeToolTipListener', ['TableCellToolTipListener']),
+        _method('setFillCell', ['boolean']),
+        _method('setForeground', ['int', 'int', 'int'], 'boolean'),
+        _method('setGraphic', ['Graphic'], 'boolean'),
+        _method('setMarginHeight', ['int']),
+        _method('setMarginWidth', ['int']),
+        _method('setSortValue', ['float'], 'boolean'),
+        _method('setSortValue', ['long'], 'boolean'),
+        _method('setText', ['String'], 'boolean'),
+    ])
+
+class TableCellAddedListenerDataType:
+    
+    def get_xml_type():
+        return "TableCellAddedListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('cellAdded', ['TableCell']),
+    ])
+
+class TableCellDisposeListenerDataType:
+    
+    def get_xml_type():
+        return "TableCellDisposeListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('dispose', ['TableCell']),
+    ])
+
+class TableCellMouseListenerDataType:
+    
+    def get_xml_type():
+        return "TableCellMouseListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void cellMouseTrigger(TableCellMouseEvent)
+    #   
+
+class TableCellRefreshListenerDataType:
+    
+    def get_xml_type():
+        return "TableCellRefreshListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('refresh', ['TableCell']),
+    ])
+
+class TableCellToolTipListenerDataType:
+    
+    def get_xml_type():
+        return "TableCellToolTipListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('cellHover', ['TableCell']),
+        _method('cellHoverComplete', ['TableCell']),
+    ])
+
+class TableColumnDataType:
+    
+    def get_xml_type():
+        return "TableColumn"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ALIGN_CENTER': 3,
+        'ALIGN_LEAD': 1,
+        'ALIGN_TRAIL': 2,
+        'INTERVAL_GRAPHIC': -1,
+        'INTERVAL_INVALID_ONLY': -3,
+        'INTERVAL_LIVE': -2,
+        'POSITION_INVISIBLE': -1,
+        'POSITION_LAST': -2,
+        'TYPE_GRAPHIC': 2,
+        'TYPE_TEXT': 1,
+        'TYPE_TEXT_ONLY': 3,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void addListeners(Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addCellAddedListener', ['TableCellAddedListener']),
+        _method('addCellDisposeListener', ['TableCellDisposeListener']),
+        _method('addCellMouseListener', ['TableCellMouseListener']),
+        _method('addCellRefreshListener', ['TableCellRefreshListener']),
+        _method('addCellToolTipListener', ['TableCellToolTipListener']),
+        _method('addContextMenuItem', ['String'], 'TableContextMenuItem'),
+        _method('getAlignment', [], 'int'),
+        _method('getName', [], 'String'),
+        _method('getPosition', [], 'int'),
+        _method('getRefreshInterval', [], 'int'),
+        _method('getTableID', [], 'String'),
+        _method('getType', [], 'int'),
+        _method('getWidth', [], 'int'),
+        _method('initialize', ['int', 'int', 'int', 'int']),
+        _method('initialize', ['int', 'int', 'int']),
+        _method('invalidateCells'),
+        _method('removeCellAddedListener', ['TableCellAddedListener']),
+        _method('removeCellDisposeListener', ['TableCellDisposeListener']),
+        _method('removeCellMouseListener', ['TableCellMouseListener']),
+        _method('removeCellRefreshListener', ['TableCellRefreshListener']),
+        _method('removeCellToolTipListener', ['TableCellToolTipListener']),
+        _method('setAlignment', ['int']),
+        _method('setPosition', ['int']),
+        _method('setRefreshInterval', ['int']),
+        _method('setType', ['int']),
+        _method('setWidth', ['int']),
+    ])
+
+class TableContextMenuItemDataType:
+    
+    def get_xml_type():
+        return "TableContextMenuItem"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'STYLE_CHECK': 2,
+        'STYLE_PUSH': 1,
+        'STYLE_RADIO': 3,
+        'STYLE_SEPARATOR': 4,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getData()
+    #   void setData(Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addFillListener', ['MenuItemFillListener']),
+        _method('addListener', ['MenuItemListener']),
+        _method('getGraphic', [], 'Graphic'),
+        _method('getResourceKey', [], 'String'),
+        _method('getStyle', [], 'int'),
+        _method('getTableID', [], 'String'),
+        _method('isEnabled', [], 'boolean'),
+        _method('removeFillListener', ['MenuItemFillListener']),
+        _method('removeListener', ['MenuItemListener']),
+        _method('setEnabled', ['boolean']),
+        _method('setGraphic', ['Graphic']),
+        _method('setStyle', ['int']),
+    ])
+
+class TableManagerDataType:
+    
+    def get_xml_type():
+        return "TableManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'TABLE_MYSHARES': "MyShares",
+        'TABLE_MYTORRENTS_COMPLETE': "MySeeders",
+        'TABLE_MYTORRENTS_INCOMPLETE': "MyTorrents",
+        'TABLE_MYTRACKER': "MyTracker",
+        'TABLE_TORRENT_FILES': "Files",
+        'TABLE_TORRENT_PEERS': "Peers",
+        'TABLE_TORRENT_PIECES': "Pieces",
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addColumn', ['TableColumn']),
+        _method('addContextMenuItem', ['String', 'String'], 'TableContextMenuItem'),
+        _method('createColumn', ['String', 'String'], 'TableColumn'),
+    ])
+
+class TableRowDataType:
+    
+    def get_xml_type():
+        return "TableRow"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getDataSource()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getTableCell', ['String'], 'TableCell'),
+        _method('getTableID', [], 'String'),
+        _method('isValid', [], 'boolean'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ui.tables.mytorrents
+#
+#------------------------------------------------------------------------------
+
+class MyTorrentsTableItemDataType:
+    
+    def get_xml_type():
+        return "MyTorrentsTableItem"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getDownload', [], 'Download'),
+        _method('setText', ['String'], 'boolean'),
+    ])
+
+class PluginMyTorrentsItemDataType:
+    
+    def get_xml_type():
+        return "PluginMyTorrentsItem"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getIntValue', [], 'int'),
+        _method('getStringValue', [], 'String'),
+        _method('refresh'),
+    ])
+
+class PluginMyTorrentsItemFactoryDataType:
+    
+    def get_xml_type():
+        return "PluginMyTorrentsItemFactory"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'ORIENT_LEFT': "L",
+        'ORIENT_RIGHT': "R",
+        'POSITION_INVISIBLE': -1,
+        'POSITION_LAST': -2,
+        'TABLE_ALL': 4,
+        'TABLE_COMPLETE': 1,
+        'TABLE_INCOMPLETE': 2,
+        'TYPE_INT': "I",
+        'TYPE_STRING': "S",
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('getDefaultPosition', [], 'int'),
+        _method('getDefaultSize', [], 'int'),
+        _method('getInstance', ['MyTorrentsTableItem'], 'PluginMyTorrentsItem'),
+        _method('getName', [], 'String'),
+        _method('getOrientation', [], 'String'),
+        _method('getTablesVisibleIn', [], 'int'),
+        _method('getType', [], 'String'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.ui.tables.peers
+#
+#------------------------------------------------------------------------------
+
+class PeerTableItemDataType:
+    
+    def get_xml_type():
+        return "PeerTableItem"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getPeer', [], 'Peer'),
+        _method('setText', ['String'], 'boolean'),
+    ])
+
+class PluginPeerItemDataType:
+    
+    def get_xml_type():
+        return "PluginPeerItem"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getIntValue', [], 'int'),
+        _method('getStringValue', [], 'String'),
+        _method('refresh'),
+    ])
+
+class PluginPeerItemFactoryDataType:
+    
+    def get_xml_type():
+        return "PluginPeerItemFactory"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'TYPE_INT': "I",
+        'TYPE_STRING': "S",
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('getDefaultSize', [], 'int'),
+        _method('getInstance', ['PeerTableItem'], 'PluginPeerItem'),
+        _method('getName', [], 'String'),
+        _method('getType', [], 'String'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.update
+#
+#------------------------------------------------------------------------------
+
+class UpdatableComponentDataType:
+    
+    def get_xml_type():
+        return "UpdatableComponent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('checkForUpdate', ['UpdateChecker']),
+        _method('getMaximumCheckTime', [], 'int'),
+        _method('getName', [], 'String'),
+    ])
+
+class UpdateDataType:
+    
+    def get_xml_type():
+        return "Update"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'RESTART_REQUIRED_MAYBE': 3,
+        'RESTART_REQUIRED_NO': 1,
+        'RESTART_REQUIRED_YES': 2,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object getDecision(int, String, String, Object)
+    #   Object getUserObject()
+    #   void setUserObject(Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['UpdateListener']),
+        _method('cancel'),
+        _method('complete'),
+        _method('getCheckInstance', [], 'UpdateCheckInstance'),
+        _method('getDescription', [], 'String[]'),
+        _method('getDownloaders', [], 'ResourceDownloader[]'),
+        _method('getName', [], 'String'),
+        _method('getNewVersion', [], 'String'),
+        _method('getRestartRequired', [], 'int'),
+        _method('isMandatory', [], 'boolean'),
+        _method('removeListener', ['UpdateListener']),
+        _method('setRestartRequired', ['int']),
+    ])
+
+class UpdateCheckerDataType:
+    
+    def get_xml_type():
+        return "UpdateChecker"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['UpdateCheckerListener']),
+        _method('addProgressListener', ['UpdateProgressListener']),
+        _method('addUpdate', ['String', 'String[]', 'String', 'ResourceDownloader', 'int'], 'Update'),
+        _method('addUpdate', ['String', 'String[]', 'String', 'ResourceDownloader[]', 'int'], 'Update'),
+        _method('completed'),
+        _method('createInstaller', [], 'UpdateInstaller'),
+        _method('failed'),
+        _method('getCheckInstance', [], 'UpdateCheckInstance'),
+        _method('getComponent', [], 'UpdatableComponent'),
+        _method('removeListener', ['UpdateCheckerListener']),
+        _method('removeProgressListener', ['UpdateProgressListener']),
+        _method('reportProgress', ['String']),
+    ])
+
+class UpdateCheckerListenerDataType:
+    
+    def get_xml_type():
+        return "UpdateCheckerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('cancelled', ['UpdateChecker']),
+        _method('completed', ['UpdateChecker']),
+        _method('failed', ['UpdateChecker']),
+    ])
+
+class UpdateCheckInstanceDataType:
+    
+    def get_xml_type():
+        return "UpdateCheckInstance"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'UCI_INSTALL': 1,
+        'UCI_UNINSTALL': 3,
+        'UCI_UPDATE': 2,
+    }
+    
+    __az_methods__ = AzureusMethods([
+        _method('addDecisionListener', ['UpdateManagerDecisionListener']),
+        _method('addListener', ['UpdateCheckInstanceListener']),
+        _method('addUpdatableComponent', ['UpdatableComponent', 'boolean']),
+        _method('cancel'),
+        _method('createInstaller', [], 'UpdateInstaller'),
+        _method('getCheckers', [], 'UpdateChecker[]'),
+        _method('getManager', [], 'UpdateManager'),
+        _method('getName', [], 'String'),
+        _method('getType', [], 'int'),
+        _method('getUpdates', [], 'Update[]'),
+        _method('isCancelled', [], 'boolean'),
+        _method('removeDecisionListener', ['UpdateManagerDecisionListener']),
+        _method('removeListener', ['UpdateCheckInstanceListener']),
+        _method('start'),
+    ])
+
+class UpdateCheckInstanceListenerDataType:
+    
+    def get_xml_type():
+        return "UpdateCheckInstanceListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('cancelled', ['UpdateCheckInstance']),
+        _method('complete', ['UpdateCheckInstance']),
+    ])
+
+class UpdateInstallerDataType:
+    
+    def get_xml_type():
+        return "UpdateInstaller"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void addResource(String, InputStream)
+    #   void addResource(String, InputStream, boolean)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addChangeRightsAction', ['String', 'String']),
+        _method('addMoveAction', ['String', 'String']),
+        _method('addRemoveAction', ['String']),
+        _method('getInstallDir', [], 'String'),
+        _method('getUserDir', [], 'String'),
+    ])
+
+class UpdateListenerDataType:
+    
+    def get_xml_type():
+        return "UpdateListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('complete', ['Update']),
+    ])
+
+class UpdateManagerDataType:
+    
+    def get_xml_type():
+        return "UpdateManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['UpdateManagerListener']),
+        _method('applyUpdates', ['boolean']),
+        _method('createEmptyUpdateCheckInstance', ['int', 'String'], 'UpdateCheckInstance'),
+        _method('createInstaller', [], 'UpdateInstaller'),
+        _method('createUpdateCheckInstance', [], 'UpdateCheckInstance'),
+        _method('createUpdateCheckInstance', ['int', 'String'], 'UpdateCheckInstance'),
+        _method('getInstallers', [], 'UpdateInstaller[]'),
+        _method('registerUpdatableComponent', ['UpdatableComponent', 'boolean']),
+        _method('removeListener', ['UpdateManagerListener']),
+        _method('restart'),
+    ])
+
+class UpdateManagerDecisionListenerDataType:
+    
+    def get_xml_type():
+        return "UpdateManagerDecisionListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'DT_STRING_ARRAY_TO_STRING': 0,
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Object decide(Update, int, String, String, Object)
+    #   
+
+class UpdateManagerListenerDataType:
+    
+    def get_xml_type():
+        return "UpdateManagerListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('checkInstanceCreated', ['UpdateCheckInstance']),
+    ])
+
+class UpdateProgressListenerDataType:
+    
+    def get_xml_type():
+        return "UpdateProgressListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('reportProgress', ['String']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.utils
+#
+#------------------------------------------------------------------------------
+
+class AggregatedDispatcherDataType:
+    
+    def get_xml_type():
+        return "AggregatedDispatcher"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void add(Runnable)
+    #   Runnable remove(Runnable)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('destroy'),
+    ])
+
+class AggregatedListDataType:
+    
+    def get_xml_type():
+        return "AggregatedList"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void add(Object)
+    #   Object remove(Object)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('destroy'),
+    ])
+
+class AggregatedListAcceptorDataType:
+    
+    def get_xml_type():
+        return "AggregatedListAcceptor"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void accept(List)
+    #   
+
+class ByteArrayWrapperDataType:
+    
+    def get_xml_type():
+        return "ByteArrayWrapper"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getBytes', [], 'byte[]'),
+    ])
+
+class FormattersDataType:
+    
+    def get_xml_type():
+        return "Formatters"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Map bDecode(byte[])
+    #   byte[] bEncode(Map)
+    #   Comparator getAlphanumericComparator(boolean)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('base32Decode', ['String'], 'byte[]'),
+        _method('base32Encode', ['byte[]'], 'String'),
+        _method('decodeBytesFromString', ['String'], 'byte[]'),
+        _method('encodeBytesToString', ['byte[]'], 'String'),
+        _method('formatByteArray', ['byte[]', 'boolean'], 'String'),
+        _method('formatByteCountToKiBEtc', ['long'], 'String'),
+        _method('formatByteCountToKiBEtcPerSec', ['long'], 'String'),
+        _method('formatDate', ['long'], 'String'),
+        _method('formatPercentFromThousands', ['long'], 'String'),
+        _method('formatTimeFromSeconds', ['long'], 'String'),
+    ])
+
+class LocaleDecoderDataType:
+    
+    def get_xml_type():
+        return "LocaleDecoder"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('decode', ['byte[]'], 'String'),
+        _method('getName', [], 'String'),
+    ])
+
+class LocaleListenerDataType:
+    
+    def get_xml_type():
+        return "LocaleListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   void localeChanged(Locale)
+    #   
+
+class LocaleUtilitiesDataType:
+    
+    def get_xml_type():
+        return "LocaleUtilities"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['LocaleListener']),
+        _method('getDecoders', [], 'LocaleDecoder[]'),
+        _method('getLocalisedMessageText', ['String'], 'String'),
+        _method('getLocalisedMessageText', ['String', 'String[]'], 'String'),
+        _method('integrateLocalisedMessageBundle', ['String']),
+        _method('removeListener', ['LocaleListener']),
+    ])
+
+class MonitorDataType:
+    
+    def get_xml_type():
+        return "Monitor"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('enter'),
+        _method('exit'),
+        _method('hasWaiters', [], 'boolean'),
+        _method('isOwned', [], 'boolean'),
+    ])
+
+class PooledByteBufferDataType:
+    
+    def get_xml_type():
+        return "PooledByteBuffer"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   ByteBuffer toByteBuffer()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('returnToPool'),
+        _method('toByteArray', [], 'byte[]'),
+    ])
+
+class SemaphoreDataType:
+    
+    def get_xml_type():
+        return "Semaphore"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('release'),
+        _method('reserve'),
+        _method('reserve', ['long'], 'boolean'),
+        _method('reserveIfAvailable', [], 'boolean'),
+    ])
+
+class ShortCutsDataType:
+    
+    def get_xml_type():
+        return "ShortCuts"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getDownload', ['byte[]'], 'Download'),
+        _method('getDownloadStats', ['byte[]'], 'DownloadStats'),
+        _method('removeDownload', ['byte[]']),
+        _method('restartDownload', ['byte[]']),
+        _method('stopDownload', ['byte[]']),
+    ])
+
+class UtilitiesDataType:
+    
+    def get_xml_type():
+        return "Utilities"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   ByteBuffer allocateDirectByteBuffer(int)
+    #   void createThread(String, Runnable)
+    #   void freeDirectByteBuffer(ByteBuffer)
+    #   InputStream getImageAsStream(String)
+    #   InetAddress getPublicAddress()
+    #   Map readResilientBEncodedFile(File, String, boolean)
+    #   String reverseDNSLookup(InetAddress)
+    #   void writeResilientBEncodedFile(File, String, Map, boolean)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('allocatePooledByteBuffer', ['byte[]'], 'PooledByteBuffer'),
+        _method('allocatePooledByteBuffer', ['int'], 'PooledByteBuffer'),
+        _method('compareVersions', ['String', 'String'], 'int'),
+        _method('createAggregatedDispatcher', ['long', 'long'], 'AggregatedDispatcher'),
+        _method('createAggregatedList', ['AggregatedListAcceptor', 'long', 'long'], 'AggregatedList'),
+        _method('createProcess', ['String']),
+        _method('createTimer', ['String', 'boolean'], 'UTTimer'),
+        _method('createTimer', ['String'], 'UTTimer'),
+        _method('createWrapper', ['byte[]'], 'ByteArrayWrapper'),
+        _method('getAzureusProgramDir', [], 'String'),
+        _method('getAzureusUserDir', [], 'String'),
+        _method('getCurrentSystemTime', [], 'long'),
+        _method('getFormatters', [], 'Formatters'),
+        _method('getLocaleUtilities', [], 'LocaleUtilities'),
+        _method('getMonitor', [], 'Monitor'),
+        _method('getResourceDownloaderFactory', [], 'ResourceDownloaderFactory'),
+        _method('getResourceUploaderFactory', [], 'ResourceUploaderFactory'),
+        _method('getRSSFeed', ['URL'], 'RSSFeed'),
+        _method('getRSSFeed', ['ResourceDownloader'], 'RSSFeed'),
+        _method('getSecurityManager', [], 'SESecurityManager'),
+        _method('getSemaphore', [], 'Semaphore'),
+        _method('getSimpleXMLParserDocumentFactory', [], 'SimpleXMLParserDocumentFactory'),
+        _method('isCVSVersion', [], 'boolean'),
+        _method('isLinux', [], 'boolean'),
+        _method('isOSX', [], 'boolean'),
+        _method('isSolaris', [], 'boolean'),
+        _method('isWindows', [], 'boolean'),
+    ])
+
+class UTTimerDataType:
+    
+    def get_xml_type():
+        return "UTTimer"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('addEvent', ['long', 'UTTimerEventPerformer'], 'UTTimerEvent'),
+        _method('addPeriodicEvent', ['long', 'UTTimerEventPerformer'], 'UTTimerEvent'),
+        _method('destroy'),
+    ])
+
+class UTTimerEventDataType:
+    
+    def get_xml_type():
+        return "UTTimerEvent"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('cancel'),
+    ])
+
+class UTTimerEventPerformerDataType:
+    
+    def get_xml_type():
+        return "UTTimerEventPerformer"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('perform', ['UTTimerEvent']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.utils.resourcedownloader
+#
+#------------------------------------------------------------------------------
+
+class ResourceDownloaderDataType:
+    
+    def get_xml_type():
+        return "ResourceDownloader"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_constants__ = {
+        'PR_STRING_CONTENT_TYPE': "ContentType",
+    }
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   InputStream download()
+    #   Object getProperty(String)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addListener', ['ResourceDownloaderListener']),
+        _method('asyncDownload'),
+        _method('cancel'),
+        _method('getName', [], 'String'),
+        _method('getSize', [], 'long'),
+        _method('isCancelled', [], 'boolean'),
+        _method('removeListener', ['ResourceDownloaderListener']),
+        _method('reportActivity', ['String']),
+    ])
+
+class ResourceDownloaderDelayedFactoryDataType:
+    
+    def get_xml_type():
+        return "ResourceDownloaderDelayedFactory"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('create', [], 'ResourceDownloader'),
+    ])
+
+class ResourceDownloaderFactoryDataType:
+    
+    def get_xml_type():
+        return "ResourceDownloaderFactory"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('create', ['File'], 'ResourceDownloader'),
+        _method('create', ['ResourceDownloaderDelayedFactory'], 'ResourceDownloader'),
+        _method('create', ['URL', 'String', 'String'], 'ResourceDownloader'),
+        _method('create', ['URL'], 'ResourceDownloader'),
+        _method('getAlternateDownloader', ['ResourceDownloader[]', 'int'], 'ResourceDownloader'),
+        _method('getAlternateDownloader', ['ResourceDownloader[]'], 'ResourceDownloader'),
+        _method('getMetaRefreshDownloader', ['ResourceDownloader'], 'ResourceDownloader'),
+        _method('getRandomDownloader', ['ResourceDownloader[]'], 'ResourceDownloader'),
+        _method('getRandomDownloader', ['ResourceDownloader[]', 'int'], 'ResourceDownloader'),
+        _method('getRetryDownloader', ['ResourceDownloader', 'int'], 'ResourceDownloader'),
+        _method('getSuffixBasedDownloader', ['ResourceDownloader'], 'ResourceDownloader'),
+        _method('getTimeoutDownloader', ['ResourceDownloader', 'int'], 'ResourceDownloader'),
+        _method('getTorrentDownloader', ['ResourceDownloader', 'boolean'], 'ResourceDownloader'),
+        _method('getTorrentDownloader', ['ResourceDownloader', 'boolean', 'File'], 'ResourceDownloader'),
+    ])
+
+class ResourceDownloaderListenerDataType:
+    
+    def get_xml_type():
+        return "ResourceDownloaderListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   boolean completed(ResourceDownloader, InputStream)
+    #   void failed(ResourceDownloader, ResourceDownloaderException)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('reportActivity', ['ResourceDownloader', 'String']),
+        _method('reportPercentComplete', ['ResourceDownloader', 'int']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.utils.resourceuploader
+#
+#------------------------------------------------------------------------------
+
+class ResourceUploaderDataType:
+    
+    def get_xml_type():
+        return "ResourceUploader"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   InputStream upload()
+    #   
+
+class ResourceUploaderFactoryDataType:
+    
+    def get_xml_type():
+        return "ResourceUploaderFactory"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   ResourceUploader create(URL, InputStream)
+    #   ResourceUploader create(URL, InputStream, String, String)
+    #   
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.utils.security
+#
+#------------------------------------------------------------------------------
+
+class CertificateListenerDataType:
+    
+    def get_xml_type():
+        return "CertificateListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   boolean trustCertificate(String, X509Certificate)
+    #   
+
+class PasswordListenerDataType:
+    
+    def get_xml_type():
+        return "PasswordListener"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   PasswordAuthentication getAuthentication(String, URL)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('setAuthenticationOutcome', ['String', 'URL', 'boolean']),
+    ])
+
+class SESecurityManagerDataType:
+    
+    def get_xml_type():
+        return "SESecurityManager"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Certificate createSelfSignedCertificate(String, String, int)
+    #   KeyStore getKeyStore()
+    #   KeyStore getTrustStore()
+    #   SSLSocketFactory installServerCertificate(URL)
+    #   void runWithAuthenticator(Authenticator, Runnable)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('addCertificateListener', ['CertificateListener']),
+        _method('addPasswordListener', ['PasswordListener']),
+        _method('calculateSHA1', ['byte[]'], 'byte[]'),
+        _method('removeCertificateListener', ['CertificateListener']),
+        _method('removePasswordListener', ['PasswordListener']),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.utils.xml.rss
+#
+#------------------------------------------------------------------------------
+
+class RSSChannelDataType:
+    
+    def get_xml_type():
+        return "RSSChannel"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Date getPublicationDate()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getDescription', [], 'String'),
+        _method('getItems', [], 'RSSItem[]'),
+        _method('getLink', [], 'URL'),
+        _method('getNode', [], 'SimpleXMLParserDocumentNode'),
+        _method('getTitle', [], 'String'),
+    ])
+
+class RSSFeedDataType:
+    
+    def get_xml_type():
+        return "RSSFeed"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getChannels', [], 'RSSChannel[]'),
+    ])
+
+class RSSItemDataType:
+    
+    def get_xml_type():
+        return "RSSItem"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   Date getPublicationDate()
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('getDescription', [], 'String'),
+        _method('getLink', [], 'URL'),
+        _method('getNode', [], 'SimpleXMLParserDocumentNode'),
+        _method('getTitle', [], 'String'),
+    ])
+
+
+#------------------------------------------------------------------------------
+#
+# Classes for package:
+#     org.gudy.azureus2.plugins.utils.xml.simpleparser
+#
+#------------------------------------------------------------------------------
+
+class SimpleXMLParserDocumentDataType:
+    
+    def get_xml_type():
+        return "SimpleXMLParserDocument"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getAttribute', ['String'], 'SimpleXMLParserDocumentAttribute'),
+        _method('getAttributes', [], 'SimpleXMLParserDocumentAttribute[]'),
+        _method('getChild', ['String'], 'SimpleXMLParserDocumentNode'),
+        _method('getChildren', [], 'SimpleXMLParserDocumentNode[]'),
+        _method('getName', [], 'String'),
+        _method('getValue', [], 'String'),
+        _method('print'),
+    ])
+
+class SimpleXMLParserDocumentAttributeDataType:
+    
+    def get_xml_type():
+        return "SimpleXMLParserDocumentAttribute"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getName', [], 'String'),
+        _method('getValue', [], 'String'),
+    ])
+
+class SimpleXMLParserDocumentFactoryDataType:
+    
+    def get_xml_type():
+        return "SimpleXMLParserDocumentFactory"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    # 
+    # The following methods are not available, because either the return
+    # type is not supported or / and one or more parameters are not
+    # remotely supported:
+    # 
+    #   SimpleXMLParserDocument create(InputStream)
+    #   
+    __az_methods__ = AzureusMethods([
+        _method('create', ['File'], 'SimpleXMLParserDocument'),
+        _method('create', ['String'], 'SimpleXMLParserDocument'),
+    ])
+
+class SimpleXMLParserDocumentNodeDataType:
+    
+    def get_xml_type():
+        return "SimpleXMLParserDocumentNode"
+    get_xml_type = staticmethod(get_xml_type)
+    
+    __az_methods__ = AzureusMethods([
+        _method('getAttribute', ['String'], 'SimpleXMLParserDocumentAttribute'),
+        _method('getAttributes', [], 'SimpleXMLParserDocumentAttribute[]'),
+        _method('getChild', ['String'], 'SimpleXMLParserDocumentNode'),
+        _method('getChildren', [], 'SimpleXMLParserDocumentNode[]'),
+        _method('getName', [], 'String'),
+        _method('getValue', [], 'String'),
+    ])
+
+del _method
+
+# XXX: Sort this out.
+PluginConfigDataType.__az_attributes__['cached_property_values'] = 'int[]'
+
+# Temporary value - should be removed once the import has finished.
+import dopal
+__epydoc_mode = dopal.__dopal_mode__ == 2
+
+classes_to_undefine = []
+
+_class_map = {}
+for classname, classobject in locals().items():
+    if hasattr(classobject, 'get_xml_type'):
+        _class_map[classname] = classobject
+        if __epydoc_mode:
+
+            # These classes are required to be defined to get the epydoc code
+            # generation to work, because we have custom methods defined.
+            #
+            # We used to generate API documentation for all classes - but this
+            # was generating far too much documentation. So now we just filter
+            # it down to those classes which need to exist for obj_impl to be
+            # imported properly.
+            accepted_classes = [
+                'Download', 'DownloadAnnounceResult', 'DownloadScrapeResult',
+                'DownloadStats', 'PluginConfig', 'PluginInterface', 'Torrent',
+                'DiskManagerFileInfo', 'LoggerChannel', 'Peer',
+            ]
+            classobject.__plugin_class__ = True
+            if classobject.get_xml_type() not in accepted_classes:
+                classes_to_undefine.append(classname)
+del classname, classobject, __epydoc_mode
+
+# Epydoc code - prevents these classes from being generated.
+for class_to_undefine in classes_to_undefine:
+    del globals()[class_to_undefine]
+    del _class_map[class_to_undefine]
+    del class_to_undefine
+del classes_to_undefine

+ 318 - 0
html/bin/clients/fluazu/dopal/classes.py

@@ -0,0 +1,318 @@
+# File: classes.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Contains basic class definitions for objects - use this module when doing
+instance checking.
+
+This module contains a few utility functions in determining what classes are supported by DOPAL - it has an internal list of all classes that DOPAL is aware of.
+'''
+
+from dopal.aztypes import get_component_type as _get_component_type
+
+#
+# List of classes created by classes_make.py.
+#
+azureus_class_list = [
+    ('org.gudy.azureus2.plugins', 'LaunchablePlugin'),
+    ('org.gudy.azureus2.plugins', 'Plugin'),
+    ('org.gudy.azureus2.plugins', 'PluginConfig'),
+    ('org.gudy.azureus2.plugins', 'PluginConfigListener'),
+    ('org.gudy.azureus2.plugins', 'PluginEvent'),
+    ('org.gudy.azureus2.plugins', 'PluginEventListener'),
+    ('org.gudy.azureus2.plugins', 'PluginInterface'),
+    ('org.gudy.azureus2.plugins', 'PluginListener'),
+    ('org.gudy.azureus2.plugins', 'PluginManager'),
+    ('org.gudy.azureus2.plugins', 'PluginManagerArgumentHandler'),
+    ('org.gudy.azureus2.plugins', 'PluginManagerDefaults'),
+    ('org.gudy.azureus2.plugins', 'UnloadablePlugin'),
+    ('org.gudy.azureus2.plugins.clientid', 'ClientIDGenerator'),
+    ('org.gudy.azureus2.plugins.clientid', 'ClientIDManager'),
+    ('org.gudy.azureus2.plugins.config', 'ConfigParameter'),
+    ('org.gudy.azureus2.plugins.config', 'ConfigParameterListener'),
+    ('org.gudy.azureus2.plugins.ddb', 'DistributedDatabase'),
+    ('org.gudy.azureus2.plugins.ddb', 'DistributedDatabaseContact'),
+    ('org.gudy.azureus2.plugins.ddb', 'DistributedDatabaseEvent'),
+    ('org.gudy.azureus2.plugins.ddb', 'DistributedDatabaseKey'),
+    ('org.gudy.azureus2.plugins.ddb', 'DistributedDatabaseListener'),
+    ('org.gudy.azureus2.plugins.ddb', 'DistributedDatabaseProgressListener'),
+    ('org.gudy.azureus2.plugins.ddb', 'DistributedDatabaseTransferHandler'),
+    ('org.gudy.azureus2.plugins.ddb', 'DistributedDatabaseTransferType'),
+    ('org.gudy.azureus2.plugins.ddb', 'DistributedDatabaseValue'),
+    ('org.gudy.azureus2.plugins.disk', 'DiskManager'),
+    ('org.gudy.azureus2.plugins.disk', 'DiskManagerChannel'),
+    ('org.gudy.azureus2.plugins.disk', 'DiskManagerEvent'),
+    ('org.gudy.azureus2.plugins.disk', 'DiskManagerFileInfo'),
+    ('org.gudy.azureus2.plugins.disk', 'DiskManagerListener'),
+    ('org.gudy.azureus2.plugins.disk', 'DiskManagerRequest'),
+    ('org.gudy.azureus2.plugins.download', 'Download'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadAnnounceResult'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadAnnounceResultPeer'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadListener'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadManager'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadManagerListener'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadManagerStats'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadPeerListener'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadPropertyEvent'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadPropertyListener'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadScrapeResult'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadStats'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadTrackerListener'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadWillBeAddedListener'),
+    ('org.gudy.azureus2.plugins.download', 'DownloadWillBeRemovedListener'),
+    ('org.gudy.azureus2.plugins.download.session', 'SessionAuthenticator'),
+    ('org.gudy.azureus2.plugins.installer', 'FilePluginInstaller'),
+    ('org.gudy.azureus2.plugins.installer', 'InstallablePlugin'),
+    ('org.gudy.azureus2.plugins.installer', 'PluginInstaller'),
+    ('org.gudy.azureus2.plugins.installer', 'PluginInstallerListener'),
+    ('org.gudy.azureus2.plugins.installer', 'StandardPlugin'),
+    ('org.gudy.azureus2.plugins.ipc', 'IPCInterface'),
+    ('org.gudy.azureus2.plugins.ipfilter', 'IPBlocked'),
+    ('org.gudy.azureus2.plugins.ipfilter', 'IPFilter'),
+    ('org.gudy.azureus2.plugins.ipfilter', 'IPRange'),
+    ('org.gudy.azureus2.plugins.logging', 'Logger'),
+    ('org.gudy.azureus2.plugins.logging', 'LoggerAlertListener'),
+    ('org.gudy.azureus2.plugins.logging', 'LoggerChannel'),
+    ('org.gudy.azureus2.plugins.logging', 'LoggerChannelListener'),
+    ('org.gudy.azureus2.plugins.messaging', 'Message'),
+    ('org.gudy.azureus2.plugins.messaging', 'MessageManager'),
+    ('org.gudy.azureus2.plugins.messaging', 'MessageManagerListener'),
+    ('org.gudy.azureus2.plugins.messaging', 'MessageStreamDecoder'),
+    ('org.gudy.azureus2.plugins.messaging', 'MessageStreamEncoder'),
+    ('org.gudy.azureus2.plugins.network', 'Connection'),
+    ('org.gudy.azureus2.plugins.network', 'ConnectionListener'),
+    ('org.gudy.azureus2.plugins.network', 'ConnectionManager'),
+    ('org.gudy.azureus2.plugins.network', 'IncomingMessageQueue'),
+    ('org.gudy.azureus2.plugins.network', 'IncomingMessageQueueListener'),
+    ('org.gudy.azureus2.plugins.network', 'OutgoingMessageQueue'),
+    ('org.gudy.azureus2.plugins.network', 'OutgoingMessageQueueListener'),
+    ('org.gudy.azureus2.plugins.network', 'RawMessage'),
+    ('org.gudy.azureus2.plugins.network', 'Transport'),
+    ('org.gudy.azureus2.plugins.peers', 'Peer'),
+    ('org.gudy.azureus2.plugins.peers', 'PeerEvent'),
+    ('org.gudy.azureus2.plugins.peers', 'PeerListener'),
+    ('org.gudy.azureus2.plugins.peers', 'PeerListener2'),
+    ('org.gudy.azureus2.plugins.peers', 'PeerManager'),
+    ('org.gudy.azureus2.plugins.peers', 'PeerManagerListener'),
+    ('org.gudy.azureus2.plugins.peers', 'PeerManagerStats'),
+    ('org.gudy.azureus2.plugins.peers', 'PeerReadRequest'),
+    ('org.gudy.azureus2.plugins.peers', 'PeerStats'),
+    ('org.gudy.azureus2.plugins.peers.protocol', 'PeerProtocolBT'),
+    ('org.gudy.azureus2.plugins.peers.protocol', 'PeerProtocolExtensionHandler'),
+    ('org.gudy.azureus2.plugins.peers.protocol', 'PeerProtocolManager'),
+    ('org.gudy.azureus2.plugins.platform', 'PlatformManager'),
+    ('org.gudy.azureus2.plugins.sharing', 'ShareItem'),
+    ('org.gudy.azureus2.plugins.sharing', 'ShareManager'),
+    ('org.gudy.azureus2.plugins.sharing', 'ShareManagerListener'),
+    ('org.gudy.azureus2.plugins.sharing', 'ShareResource'),
+    ('org.gudy.azureus2.plugins.sharing', 'ShareResourceDir'),
+    ('org.gudy.azureus2.plugins.sharing', 'ShareResourceDirContents'),
+    ('org.gudy.azureus2.plugins.sharing', 'ShareResourceEvent'),
+    ('org.gudy.azureus2.plugins.sharing', 'ShareResourceFile'),
+    ('org.gudy.azureus2.plugins.sharing', 'ShareResourceListener'),
+    ('org.gudy.azureus2.plugins.sharing', 'ShareResourceWillBeDeletedListener'),
+    ('org.gudy.azureus2.plugins.torrent', 'Torrent'),
+    ('org.gudy.azureus2.plugins.torrent', 'TorrentAnnounceURLList'),
+    ('org.gudy.azureus2.plugins.torrent', 'TorrentAnnounceURLListSet'),
+    ('org.gudy.azureus2.plugins.torrent', 'TorrentAttribute'),
+    ('org.gudy.azureus2.plugins.torrent', 'TorrentAttributeEvent'),
+    ('org.gudy.azureus2.plugins.torrent', 'TorrentAttributeListener'),
+    ('org.gudy.azureus2.plugins.torrent', 'TorrentDownloader'),
+    ('org.gudy.azureus2.plugins.torrent', 'TorrentFile'),
+    ('org.gudy.azureus2.plugins.torrent', 'TorrentManager'),
+    ('org.gudy.azureus2.plugins.torrent', 'TorrentManagerEvent'),
+    ('org.gudy.azureus2.plugins.torrent', 'TorrentManagerListener'),
+    ('org.gudy.azureus2.plugins.tracker', 'Tracker'),
+    ('org.gudy.azureus2.plugins.tracker', 'TrackerListener'),
+    ('org.gudy.azureus2.plugins.tracker', 'TrackerPeer'),
+    ('org.gudy.azureus2.plugins.tracker', 'TrackerPeerEvent'),
+    ('org.gudy.azureus2.plugins.tracker', 'TrackerPeerListener'),
+    ('org.gudy.azureus2.plugins.tracker', 'TrackerTorrent'),
+    ('org.gudy.azureus2.plugins.tracker', 'TrackerTorrentListener'),
+    ('org.gudy.azureus2.plugins.tracker', 'TrackerTorrentRequest'),
+    ('org.gudy.azureus2.plugins.tracker', 'TrackerTorrentWillBeRemovedListener'),
+    ('org.gudy.azureus2.plugins.tracker.web', 'TrackerAuthenticationListener'),
+    ('org.gudy.azureus2.plugins.tracker.web', 'TrackerWebContext'),
+    ('org.gudy.azureus2.plugins.tracker.web', 'TrackerWebPageGenerator'),
+    ('org.gudy.azureus2.plugins.tracker.web', 'TrackerWebPageRequest'),
+    ('org.gudy.azureus2.plugins.tracker.web', 'TrackerWebPageResponse'),
+    ('org.gudy.azureus2.plugins.ui', 'Graphic'),
+    ('org.gudy.azureus2.plugins.ui', 'UIInstance'),
+    ('org.gudy.azureus2.plugins.ui', 'UIInstanceFactory'),
+    ('org.gudy.azureus2.plugins.ui', 'UIManager'),
+    ('org.gudy.azureus2.plugins.ui', 'UIManagerEvent'),
+    ('org.gudy.azureus2.plugins.ui', 'UIManagerEventListener'),
+    ('org.gudy.azureus2.plugins.ui', 'UIManagerListener'),
+    ('org.gudy.azureus2.plugins.ui', 'UIPluginView'),
+    ('org.gudy.azureus2.plugins.ui.components', 'UIComponent'),
+    ('org.gudy.azureus2.plugins.ui.components', 'UIProgressBar'),
+    ('org.gudy.azureus2.plugins.ui.components', 'UIPropertyChangeEvent'),
+    ('org.gudy.azureus2.plugins.ui.components', 'UIPropertyChangeListener'),
+    ('org.gudy.azureus2.plugins.ui.components', 'UITextArea'),
+    ('org.gudy.azureus2.plugins.ui.components', 'UITextField'),
+    ('org.gudy.azureus2.plugins.ui.config', 'ActionParameter'),
+    ('org.gudy.azureus2.plugins.ui.config', 'BooleanParameter'),
+    ('org.gudy.azureus2.plugins.ui.config', 'ConfigSection'),
+    ('org.gudy.azureus2.plugins.ui.config', 'ConfigSectionSWT'),
+    ('org.gudy.azureus2.plugins.ui.config', 'DirectoryParameter'),
+    ('org.gudy.azureus2.plugins.ui.config', 'EnablerParameter'),
+    ('org.gudy.azureus2.plugins.ui.config', 'IntParameter'),
+    ('org.gudy.azureus2.plugins.ui.config', 'LabelParameter'),
+    ('org.gudy.azureus2.plugins.ui.config', 'Parameter'),
+    ('org.gudy.azureus2.plugins.ui.config', 'ParameterGroup'),
+    ('org.gudy.azureus2.plugins.ui.config', 'ParameterListener'),
+    ('org.gudy.azureus2.plugins.ui.config', 'PasswordParameter'),
+    ('org.gudy.azureus2.plugins.ui.config', 'PluginConfigUIFactory'),
+    ('org.gudy.azureus2.plugins.ui.config', 'StringListParameter'),
+    ('org.gudy.azureus2.plugins.ui.config', 'StringParameter'),
+    ('org.gudy.azureus2.plugins.ui.menus', 'MenuItem'),
+    ('org.gudy.azureus2.plugins.ui.menus', 'MenuItemFillListener'),
+    ('org.gudy.azureus2.plugins.ui.menus', 'MenuItemListener'),
+    ('org.gudy.azureus2.plugins.ui.model', 'BasicPluginConfigModel'),
+    ('org.gudy.azureus2.plugins.ui.model', 'BasicPluginViewModel'),
+    ('org.gudy.azureus2.plugins.ui.model', 'PluginConfigModel'),
+    ('org.gudy.azureus2.plugins.ui.model', 'PluginViewModel'),
+    ('org.gudy.azureus2.plugins.ui.SWT', 'GraphicSWT'),
+    ('org.gudy.azureus2.plugins.ui.SWT', 'SWTManager'),
+    ('org.gudy.azureus2.plugins.ui.tables', 'TableCell'),
+    ('org.gudy.azureus2.plugins.ui.tables', 'TableCellAddedListener'),
+    ('org.gudy.azureus2.plugins.ui.tables', 'TableCellDisposeListener'),
+    ('org.gudy.azureus2.plugins.ui.tables', 'TableCellMouseListener'),
+    ('org.gudy.azureus2.plugins.ui.tables', 'TableCellRefreshListener'),
+    ('org.gudy.azureus2.plugins.ui.tables', 'TableCellToolTipListener'),
+    ('org.gudy.azureus2.plugins.ui.tables', 'TableColumn'),
+    ('org.gudy.azureus2.plugins.ui.tables', 'TableContextMenuItem'),
+    ('org.gudy.azureus2.plugins.ui.tables', 'TableManager'),
+    ('org.gudy.azureus2.plugins.ui.tables', 'TableRow'),
+    ('org.gudy.azureus2.plugins.ui.tables.mytorrents', 'MyTorrentsTableItem'),
+    ('org.gudy.azureus2.plugins.ui.tables.mytorrents', 'PluginMyTorrentsItem'),
+    ('org.gudy.azureus2.plugins.ui.tables.mytorrents', 'PluginMyTorrentsItemFactory'),
+    ('org.gudy.azureus2.plugins.ui.tables.peers', 'PeerTableItem'),
+    ('org.gudy.azureus2.plugins.ui.tables.peers', 'PluginPeerItem'),
+    ('org.gudy.azureus2.plugins.ui.tables.peers', 'PluginPeerItemFactory'),
+    ('org.gudy.azureus2.plugins.update', 'UpdatableComponent'),
+    ('org.gudy.azureus2.plugins.update', 'Update'),
+    ('org.gudy.azureus2.plugins.update', 'UpdateChecker'),
+    ('org.gudy.azureus2.plugins.update', 'UpdateCheckerListener'),
+    ('org.gudy.azureus2.plugins.update', 'UpdateCheckInstance'),
+    ('org.gudy.azureus2.plugins.update', 'UpdateCheckInstanceListener'),
+    ('org.gudy.azureus2.plugins.update', 'UpdateInstaller'),
+    ('org.gudy.azureus2.plugins.update', 'UpdateListener'),
+    ('org.gudy.azureus2.plugins.update', 'UpdateManager'),
+    ('org.gudy.azureus2.plugins.update', 'UpdateManagerDecisionListener'),
+    ('org.gudy.azureus2.plugins.update', 'UpdateManagerListener'),
+    ('org.gudy.azureus2.plugins.update', 'UpdateProgressListener'),
+    ('org.gudy.azureus2.plugins.utils', 'AggregatedDispatcher'),
+    ('org.gudy.azureus2.plugins.utils', 'AggregatedList'),
+    ('org.gudy.azureus2.plugins.utils', 'AggregatedListAcceptor'),
+    ('org.gudy.azureus2.plugins.utils', 'ByteArrayWrapper'),
+    ('org.gudy.azureus2.plugins.utils', 'Formatters'),
+    ('org.gudy.azureus2.plugins.utils', 'LocaleDecoder'),
+    ('org.gudy.azureus2.plugins.utils', 'LocaleListener'),
+    ('org.gudy.azureus2.plugins.utils', 'LocaleUtilities'),
+    ('org.gudy.azureus2.plugins.utils', 'Monitor'),
+    ('org.gudy.azureus2.plugins.utils', 'PooledByteBuffer'),
+    ('org.gudy.azureus2.plugins.utils', 'Semaphore'),
+    ('org.gudy.azureus2.plugins.utils', 'ShortCuts'),
+    ('org.gudy.azureus2.plugins.utils', 'Utilities'),
+    ('org.gudy.azureus2.plugins.utils', 'UTTimer'),
+    ('org.gudy.azureus2.plugins.utils', 'UTTimerEvent'),
+    ('org.gudy.azureus2.plugins.utils', 'UTTimerEventPerformer'),
+    ('org.gudy.azureus2.plugins.utils.resourcedownloader', 'ResourceDownloader'),
+    ('org.gudy.azureus2.plugins.utils.resourcedownloader', 'ResourceDownloaderDelayedFactory'),
+    ('org.gudy.azureus2.plugins.utils.resourcedownloader', 'ResourceDownloaderFactory'),
+    ('org.gudy.azureus2.plugins.utils.resourcedownloader', 'ResourceDownloaderListener'),
+    ('org.gudy.azureus2.plugins.utils.resourceuploader', 'ResourceUploader'),
+    ('org.gudy.azureus2.plugins.utils.resourceuploader', 'ResourceUploaderFactory'),
+    ('org.gudy.azureus2.plugins.utils.security', 'CertificateListener'),
+    ('org.gudy.azureus2.plugins.utils.security', 'PasswordListener'),
+    ('org.gudy.azureus2.plugins.utils.security', 'SESecurityManager'),
+    ('org.gudy.azureus2.plugins.utils.xml.rss', 'RSSChannel'),
+    ('org.gudy.azureus2.plugins.utils.xml.rss', 'RSSFeed'),
+    ('org.gudy.azureus2.plugins.utils.xml.rss', 'RSSItem'),
+    ('org.gudy.azureus2.plugins.utils.xml.simpleparser', 'SimpleXMLParserDocument'),
+    ('org.gudy.azureus2.plugins.utils.xml.simpleparser', 'SimpleXMLParserDocumentAttribute'),
+    ('org.gudy.azureus2.plugins.utils.xml.simpleparser', 'SimpleXMLParserDocumentFactory'),
+    ('org.gudy.azureus2.plugins.utils.xml.simpleparser', 'SimpleXMLParserDocumentNode'),
+]
+
+# Record the existance of the classes which are mentioned above.
+# (We need this for lookups.)
+_known_class_names = dict([(cls_tpl[1], None) for cls_tpl in azureus_class_list]).keys()
+
+import dopal
+if dopal.__dopal_mode__ == 1:
+    # Check we don't get any nameclashes.
+    if len(azureus_class_list) != len(_known_class_names):
+        raise RuntimeError, 'difference in class sizes'
+
+# We do more to generate a nice docstring in epydoc mode.
+# Bugfix for tf-b4rt: don't try to use/change __doc__ if it's
+# empty, which is the case if Python was invoked with -OO
+# (except for early Python 2.5 releases where -OO is broken:
+# http://mail.python.org/pipermail/python-bugs-list/2007-June/038590.html).
+if __doc__ is not None and dopal.__dopal_mode__ == 2:
+    grouped_classes = {}
+    for package_name, class_name in azureus_class_list:
+        grouped_classes.setdefault(package_name, []).append(class_name)
+
+    ordered_grouped_packages = grouped_classes.keys()
+    ordered_grouped_packages.sort()
+
+    generated_lines = []
+
+    base_url = 'http://azureus.sourceforge.net/plugins/docCVS/'
+    package_tmpl = base_url + '%s/package-summary.html'
+    class_tmpl = base_url + '%s/%s.html'
+    for package_name in ordered_grouped_packages:
+        package_path = package_name.replace('.', '/')
+        full_package_url = package_tmpl % package_path
+        generated_lines.append(
+            '\n  - Package C{U{%(package_name)s<%(full_package_url)s>}}' % vars()
+        )
+        for class_name in grouped_classes[package_name]:
+            full_class_url = class_tmpl % (package_path, class_name)
+            generated_lines.append(
+                '    - Class C{U{%(class_name)s<%(full_class_url)s>}}' % vars()
+            )
+
+    __doc__ += "\n\nThe following classes are well-supported by DOPAL (the "
+    __doc__ += 'links below link to the Azureus\'s own '
+    __doc__ += 'U{Javadoc API documentation<%(base_url)s>}):\n' % vars()
+    __doc__ +=  '\n'.join(generated_lines)
+
+    del package_path, full_package_url, full_class_url
+    del base_url, package_tmpl, class_tmpl
+    del package_name, class_name, generated_lines
+    del grouped_classes, ordered_grouped_packages
+del azureus_class_list
+
+def is_azureus_class(class_name):
+    return class_name in _known_class_names
+
+is_azureus_argument_class = is_azureus_class
+
+def is_azureus_return_class(class_name):
+
+    if is_azureus_class(class_name):
+        return True
+
+    class_component_type = _get_component_type(class_name)
+    if class_component_type is not None:
+        if is_azureus_class(class_name):
+            return True
+
+    return False

+ 510 - 0
html/bin/clients/fluazu/dopal/convert.py

@@ -0,0 +1,510 @@
+# File: convert.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Contains classes used to convert an XML structure back into an object
+structure.
+'''
+
+# We disable the override checks (subclasses of mixins are allowed to have
+# different signatures - arguments they want can be explicitly named, arguments
+# they don't want can be left unnamed in kwargs).
+#
+# We also disable the class attribute checks - converter calls a lot of methods
+# which are only defined in mixin classes.
+__pychecker__ = 'unusednames=attributes,kwargs,object_id,value,self no-override no-classattr no-objattr'
+
+from dopal.aztypes import is_array_type, get_component_type, \
+    is_java_argument_type, is_java_return_type
+from dopal.classes import is_azureus_argument_class, is_azureus_return_class
+from dopal.errors import AbortConversion, DelayConversion, SkipConversion, \
+    InvalidRemoteClassTypeError
+
+import types
+
+from dopal.utils import Sentinel
+ATOM_TYPE = Sentinel('atom')
+SEQUENCE_TYPE = Sentinel('sequence')
+MAPPING_TYPE = Sentinel('mapping')
+OBJECT_TYPE = Sentinel('object')
+NULL_TYPE = Sentinel('null')
+del Sentinel
+
+class Converter(object):
+
+    def __call__(self, value, result_type=None):
+        return self.convert(value, source_parent=None, target_parent=None,
+            attribute=None, sequence_index=None, suggested_type=result_type)
+
+    def convert(self, value, **kwargs):
+
+        # The keyword arguments we have here include data for the reader and
+        # the writer. We separate kwargs into three parts -
+        #   1) Reader-only values.
+        #   2) Writer-only values.
+        #   3) All keyword arguments.
+        reader_kwargs = kwargs.copy()
+        writer_kwargs = kwargs.copy()
+        convert_kwargs = kwargs
+        del kwargs
+
+        writer_kwargs['parent'] = writer_kwargs['target_parent']
+        reader_kwargs['parent'] = reader_kwargs['source_parent']
+        del reader_kwargs['target_parent']
+        del reader_kwargs['source_parent']
+        del writer_kwargs['target_parent']
+        del writer_kwargs['source_parent']
+
+        # Keyword arguments:
+        #   attribute
+        #   mapping_key
+        #   sequence_index
+        #   suggested_type
+        #
+        #   parent (reader_kwargs and writer_kwargs, not in standard kwargs)
+        #   source_parent (not in reader_kwargs, not in writer_kwargs)
+        #   target_parent (not in reader_kwargs, not in writer_kwargs)
+        conversion_type = self.categorise_object(value, **reader_kwargs)
+        if conversion_type is NULL_TYPE:
+            return None
+
+        elif conversion_type is ATOM_TYPE:
+            atomic_value = self.get_atomic_value(value, **reader_kwargs)
+            return self.convert_atom(atomic_value, **writer_kwargs)
+
+        elif conversion_type is SEQUENCE_TYPE:
+            accepted_seq = []
+            rejected_seq = []
+
+            # The item we are currently looking at (value) is ignored.
+            # It is a normal sequence which doesn't contain any useful
+            # data, so we act as if each item in the sequence is
+            # actually an attribute of the source object (where the
+            # attribute name is taken from the attribute name of the
+            # list).
+
+            # Note - I would use enumerate, but I'm trying to leave this
+            # Python 2.2 compatible.
+            sequence_items = self.get_sequence_items(value, **reader_kwargs)
+            for i in range(len(sequence_items)):
+                item = sequence_items[i]
+
+                this_kwargs = convert_kwargs.copy()
+                this_kwargs['sequence_index'] = i
+                this_kwargs['suggested_type'] = self.get_suggested_type_for_sequence_component(value, **writer_kwargs)
+
+                try:
+                    sub_element = self.convert(item, **this_kwargs)
+                except SkipConversion, error:
+                    pass
+                except AbortConversion, error:
+                    import sys
+                    self.handle_error(item, error, sys.exc_info()[2])
+                    rejected_seq.append((item, error, sys.exc_info()[2]))
+                else:
+                    accepted_seq.append(sub_element)
+
+                del this_kwargs
+
+            if rejected_seq:
+                self.handle_errors(rejected_seq, accepted_seq, conversion_type)
+
+            return self.make_sequence(accepted_seq, **writer_kwargs)
+
+        elif conversion_type is MAPPING_TYPE:
+            accepted_dict = {}
+            rejected_dict = {}
+
+            for map_key, map_value in self.get_mapping_items(value, **reader_kwargs):
+                this_kwargs = convert_kwargs.copy()
+                this_kwargs['mapping_key'] = map_key
+                this_kwargs['suggested_type'] = self.get_suggested_type_for_mapping_component(value, **this_kwargs)
+                try:
+                    converted_value = self.convert(map_value, **this_kwargs)
+                except SkipConversion, error:
+                    pass
+                except AbortConversion, error:
+                    import sys
+                    self.handle_error(map_value, error, sys.exc_info()[2])
+                    rejected_dict[map_key] = (map_value, error, sys.exc_info()[2])
+                else:
+                    accepted_dict[map_key] = converted_value
+
+                del this_kwargs
+
+            if rejected_dict:
+                self.handle_errors(rejected_dict, accepted_dict, conversion_type)
+
+            return self.make_mapping(accepted_dict, **writer_kwargs)
+
+        elif conversion_type is OBJECT_TYPE:
+            object_id = self.get_object_id(value, **reader_kwargs)
+            source_attributes = self.get_object_attributes(value, **reader_kwargs)
+
+            # Try and convert the object attributes first.
+            #
+            # If we can't, because the parent object is requested, then
+            # we'll convert that first instead.
+            #
+            # If the code which converts the parent object requests that
+            # the attributes should be defined first, then we just exit
+            # with an error - we can't have attributes requesting that the
+            # object is converted first, and the object requesting attributes
+            # are converted first.
+            try:
+                attributes = self._get_object_attributes(value, None, source_attributes)
+            except DelayConversion:
+                # We will allow DelayConversions which arise from this block
+                # to propogate.
+                new_object = self.make_object(object_id, attributes=None, **writer_kwargs)
+                attributes = self._get_object_attributes(value, new_object, source_attributes)
+                self.add_attributes_to_object(attributes, new_object, **writer_kwargs)
+            else:
+                new_object = self.make_object(object_id, attributes, **writer_kwargs)
+
+            return new_object
+
+        else:
+            raise ValueError, "bad result from categorise_object: %s" % conversion_type
+
+    def _get_object_attributes(self, value, parent, source_attributes):
+
+        accepted = {}
+        rejected = {}
+
+        for attribute_name, attribute_value in source_attributes.items():
+            this_kwargs = {}
+            this_kwargs['source_parent'] = value
+            this_kwargs['target_parent'] = parent
+            this_kwargs['attribute'] = attribute_name
+            this_kwargs['mapping_key'] = None
+            this_kwargs['sequence_index'] = None
+            this_kwargs['suggested_type'] = self.get_suggested_type_for_attribute(value=attribute_value, parent=parent, attribute=attribute_name, mapping_key=None)
+
+            try:
+                converted_value = self.convert(attribute_value, **this_kwargs)
+            except SkipConversion, error:
+                pass
+            except AbortConversion, error:
+                import sys
+                self.handle_error(attribute_value, error, sys.exc_info()[2])
+                rejected[attribute_name] = (attribute_value, error, sys.exc_info()[2])
+            else:
+                accepted[attribute_name] = converted_value
+
+        if rejected:
+            self.handle_errors(rejected, accepted, OBJECT_TYPE)
+
+        return accepted
+
+    def handle_errors(self, rejected, accepted, conversion_type):
+        if isinstance(rejected, dict):
+            error_seq = rejected.itervalues()
+        else:
+            error_seq = iter(rejected)
+
+        attribute, error, traceback = error_seq.next()
+        raise error, None, traceback
+
+    def handle_error(self, object_, error, traceback):
+        raise error, None, traceback
+
+class ReaderMixin(object):
+
+    # Need to be implemented by subclasses:
+    #
+    # def categorise_object(self, value, suggested_type, **kwargs):
+    # def get_object_id(self, value, **kwargs):
+    # def get_object_attributes(self, value, **kwargs):
+
+    # You can raise DelayConversion here.
+    def get_atomic_value(self, value, **kwargs):
+        return value
+
+    def get_sequence_items(self, value, **kwargs):
+        return value
+
+    def get_mapping_items(self, value, **kwargs):
+        return value
+
+class WriterMixin(object):
+
+    # Need to be implemented by subclasses:
+    #
+    # def make_object(self, object_id, attributes, **kwargs):
+
+    # You can raise DelayConversion here.
+    def convert_atom(self, atomic_value, suggested_type, **kwargs):
+        if suggested_type is None:
+            # TODO: Add controls for unknown typed attributes.
+            return atomic_value
+        else:
+            from dopal.aztypes import unwrap_value
+            return unwrap_value(atomic_value, suggested_type)
+
+    def make_sequence(self, sequence, **kwargs):
+        return sequence
+
+    def make_mapping(self, mapping, **kwargs):
+        return mapping
+
+    def add_attributes_to_object(self, attributes, new_object, **kwargs):
+        new_object.update_remote_data(attributes)
+
+    def get_suggested_type_for_sequence_component(self, value, **kwargs):
+        return None
+
+    def get_suggested_type_for_mapping_component(self, value, **kwargs):
+        return None
+
+    def get_suggested_type_for_attribute(self, value, **kwargs):
+        return None
+
+class XMLStructureReader(ReaderMixin):
+
+    def categorise_object(self, node, suggested_type, **kwargs):
+
+        from xml.dom import Node
+
+        if node is None:
+            number_of_nodes = 0
+            null_value = True
+        elif isinstance(node, types.StringTypes):
+            number_of_nodes = -1 # Means "no-node type".
+            null_value = not node
+        else:
+            number_of_nodes = len(node.childNodes)
+            null_value = not number_of_nodes
+
+        # This is a bit ambiguous - how on earth are we meant to determine
+        # this? We'll see whether an explicit type is given here, otherwise
+        # we'll have to just guess.
+        if null_value:
+            if suggested_type == 'mapping':
+                return MAPPING_TYPE
+            elif is_array_type(suggested_type):
+                return SEQUENCE_TYPE
+
+            # If the suggested type is atomic, then we inform them that
+            # it is an atomic object. Some atomic types make sense with
+            # no nodes (like an empty string). Some don't, of course
+            # (like an integer), but never mind. It's better to inform
+            # the caller code that it is an atom if the desired type is
+            # an atom - otherwise for empty strings, we will get None
+            # instead.
+            elif is_java_return_type(suggested_type):
+                # We'll assume it is just an atom. It can't be an object
+                # without an object ID.
+                return ATOM_TYPE
+
+            # Oh well, let's just say it's null then.
+            else:
+                return NULL_TYPE
+
+        if number_of_nodes == -1:
+            return ATOM_TYPE
+
+        if number_of_nodes == 1 and node.firstChild.nodeType == Node.TEXT_NODE:
+            return ATOM_TYPE
+
+        if number_of_nodes and node.firstChild.nodeName == 'ENTRY':
+            return SEQUENCE_TYPE
+
+        if suggested_type == 'mapping':
+            return MAPPING_TYPE
+
+        return OBJECT_TYPE
+
+    def get_atomic_value(self, node, **kwargs):
+        if node is None:
+            # The only atomic type which provides an empty response are
+            # string types, so we will return an empty string.
+            return ''
+        elif isinstance(node, types.StringTypes):
+            return node
+        else:
+            from dopal.xmlutils import get_text_content
+            return get_text_content(node)
+
+    def get_sequence_items(self, node, **kwargs):
+        if node is None:
+            return []
+        return node.childNodes
+
+    def get_mapping_items(self, node, **kwargs):
+        if node is None:
+            return {}
+
+        # Not actually used, but just in case...
+        result_dict = {}
+        for child_node in node.childNodes:
+            if result_dict.has_key(child_node.nodeName):
+                raise AbortConversion("duplicate attribute - " + child_node.nodeName, child_node)
+            result_dict[child_node.nodeName] = child_node
+        return result_dict
+
+    def get_object_id(node, **kwargs):
+        if node is None:
+            return None
+
+        for child_node in node.childNodes:
+            if child_node.nodeName == '_object_id':
+                from dopal.xmlutils import get_text_content
+                return long(get_text_content(child_node))
+        else:
+            return None
+
+    # Used by StructuredResponse.get_object_id, so we make it static.
+    get_object_id = staticmethod(get_object_id)
+
+    def get_object_attributes(self, node, **kwargs):
+        result_dict = self.get_mapping_items(node, **kwargs)
+        for key in result_dict.keys():
+            if key.startswith('azureus_'):
+                del result_dict[key]
+            elif key in ['_connection_id', '_object_id']:
+                del result_dict[key]
+        return result_dict
+
+class ObjectWriterMixin(WriterMixin):
+
+    connection = None
+
+    # attributes may be None if not defined at this point.
+    #
+    # You can raise DelayConversion here.
+    def make_object(self, object_id, attributes, suggested_type=None, parent=None, attribute=None, **kwargs):
+
+        class_to_use = None
+        if suggested_type is not None:
+            class_to_use = self.get_class_for_object(suggested_type, attributes, parent, attribute)
+
+        if class_to_use is None:
+            class_to_use = self.get_default_class_for_object()
+            if class_to_use is None:
+                # TODO: Need to add control values:
+                #   - Drop the attribute.
+                #   - Put the attribute as is (convert it into a typeless
+                #       object)
+                #   - Raise an error.
+                #
+                # For now, we'll avoid creating the attribute altogether.
+                #
+                # Note - if the object has no parent, then that's a more
+                # serious situation. We may actually be returning a blank
+                # value instead of a representive object - in my opinion,
+                # it is better to fail in these cases.
+                #
+                # A broken object (missing attributes) is more desirable than
+                # having an object missing entirely if it is the actual object
+                # being returned.
+                if parent is None:
+                    cls_to_use = AbortConversion
+                else:
+                    cls_to_use = SkipConversion
+
+                raise cls_to_use(text="no default class defined by converter", obj=(parent, attribute, suggested_type))
+
+                # Alternative error-based code to use:
+                #
+                #    err_kwargs = {}
+                #    err_kwargs['obj'] = suggested_type
+                #    if parent is None:
+                #        if attribute is None:
+                #            pass
+                #        else:
+                #            err_kwargs['text'] = 'attr=' + attribute
+                #    else:
+                #        err_kwargs['text'] = "%(parent)r.%(attribute)s" %
+                #           locals()
+                #    raise InvalidRemoteClassTypeError(**err_kwargs)
+
+        result = class_to_use(self.connection, object_id)
+        if attributes is not None:
+            self.add_attributes_to_object(attributes, result)
+
+        return result
+
+    def get_class_for_object(self, suggested_type, attributes=None, parent=None, attribute=None):
+        return None
+
+    def get_default_class_for_object(self):
+        return None
+
+class RemoteObjectWriterMixin(ObjectWriterMixin):
+
+    class_map = {}
+
+    # XXX: This will need to be changed to something which will:
+    #    - If true, raise an error if the parent does not return an appropriate
+    #      type for any given attribute.
+    #    - If false, will never complain.
+    #    - If none (default), complain only when debug mode is on.
+    force_attribute_types = False
+
+    def get_class_for_object(self, suggested_type, attributes=None, parent=None, attribute=None):
+        if suggested_type is None:
+            return None
+        return self.class_map.get(suggested_type, None)
+
+    def get_default_class_for_object(self):
+        if self.class_map.has_key(None):
+            return self.class_map[None]
+        _super = super(RemoteObjectWriterMixin, self)
+        return _super.get_default_class_for_object()
+
+    def get_suggested_type_for_sequence_component(self, value, suggested_type, **kwargs):
+        if suggested_type is None:
+            return None
+        if is_array_type(suggested_type):
+            return get_component_type(suggested_type)
+        else:
+            raise AbortConversion("parent of value is a sequence, but the suggested type is not an array type", obj=value)
+
+    def _get_suggested_type_for_named_item(self, value, parent, attribute, mapping_key=None, **kwargs):
+        if parent is None:
+            raise DelayConversion
+
+        result_type = None
+        if hasattr(parent, '_get_type_for_attribute'):
+            result_type = parent._get_type_for_attribute(attribute, mapping_key)
+
+        if self.force_attribute_types and result_type is None:
+            txt = "%(parent)r could not provide type for '%(attribute)s'"
+            if mapping_key is not None:
+                txt += ", [%(mapping_key)s]"
+            raise AbortConversion(txt % locals())
+
+        return result_type
+
+    get_suggested_type_for_mapping_component = \
+    get_suggested_type_for_attribute = _get_suggested_type_for_named_item
+
+
+class RemoteObjectConverter(Converter,
+    XMLStructureReader, RemoteObjectWriterMixin):
+
+    def __init__(self, connection=None):
+        super(Converter, self).__init__()
+        self.connection = connection
+
+def is_azureus_argument_type(java_type):
+    return is_java_argument_type(java_type) or \
+        is_azureus_argument_class(java_type)
+
+def is_azureus_return_type(java_type):
+    return is_java_return_type(java_type) or \
+        is_azureus_return_class(java_type)

+ 1021 - 0
html/bin/clients/fluazu/dopal/core.py

@@ -0,0 +1,1021 @@
+# File: core.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Contains the main objects and functions required for DOPAL to be useful.
+'''
+
+# PyChecker seems to be complaining a lot about inconsistent return types
+# (especially returning different types of responses and returning different
+# types of exceptions), so we're switching it off.
+__pychecker__ = 'no-returnvalues'
+
+from dopal.aztypes import unwrap_value
+
+from dopal.errors import AzureusResponseXMLError, MissingObjectIDError, \
+    RemoteInternalError, RemoteMethodError, LinkError, InvalidObjectIDError, \
+    NoSuchMethodError, InvalidWrapTypeError, InvalidRemoteObjectError, \
+    NoObjectIDGivenError, NoEstablishedConnectionError, \
+    InvalidConnectionIDError, raise_as, DopalPendingDeprecationWarning
+
+#
+# Low-level class, representing a link to a remote Azureus instance.
+#
+class AzureusLink(object):
+
+    def __init__(self): # AzureusLink
+        # Default values, can be changed via set_connection_details.
+        self.link_data = {
+            'host': '127.0.0.1', 'port': 6884, 'secure': False,
+            'user': None, 'password': ''}
+        self.debug = None
+
+    def get_cgi_path(self, auth_details=False, include_password=False): # AzureusLink
+        path_template = "%(protocol)s://%(auth)s%(host)s:%(port)s/process.cgi"
+        path_data = {}
+        path_data['host'] = self.link_data['host']
+        path_data['port'] = self.link_data['port']
+        path_data['user'] = self.link_data['user']
+
+        if self.link_data['secure']:
+            path_data['protocol'] = 'https'
+        else:
+            path_data['protocol'] = 'http'
+
+        if auth_details and self.link_data['user']:
+            if include_password:
+                #path_data['password'] = '*' * len(self.link_data['password'])
+                path_data['password'] = '*' * 4
+            else:
+                path_data['password'] = self.link_data['password']
+
+            path_data['auth'] = '%(user)s:%(password)s@' % path_data
+        else:
+            path_data['auth'] = ''
+
+        return path_template % path_data
+
+    def _send_data(self, request):
+        import urllib2
+        return urllib2.urlopen(request)
+
+    def _send_method_exchange(self, xml_data): # AzureusLink
+        from dopal.debug import ConnectionExchangeDebug, \
+            ErrorLinkDebug, OutgoingExchangeDebug
+
+        cgi_path = self.get_cgi_path(auth_details=False)
+        printable_path = self.get_cgi_path(auth_details=True, include_password=False)
+
+        if self.debug is not None:
+            debug_data = OutgoingExchangeDebug(printable_path, xml_data)
+            self.debug(debug_data)
+
+        import socket, urllib2
+        request = urllib2.Request(cgi_path, xml_data)
+
+        # Add User-Agent string.
+        from dopal import __user_agent__
+        request.add_header("User-agent", __user_agent__)
+
+        # Add authorisation data.
+        if self.link_data['user']:
+            auth_string = ("%(user)s:%(password)s" % self.link_data)
+            base64_string = auth_string.encode('base64').strip()
+            request.add_header("Authorization", "Basic " + base64_string)
+            del auth_string, base64_string
+
+        try:
+            data = self._send_data(request).read()
+        except (urllib2.URLError, socket.error, LinkError), error:
+
+            # Log the error, if enabled.
+            if self.debug is not None:
+                debug_data = ErrorLinkDebug(printable_path, error)
+                self.debug(debug_data)
+
+            # Error raised here.
+            raise_as(error, LinkError, obj=cgi_path)
+
+        # Log the exchange, if enabled.
+        if self.debug is not None:
+            debug_data = ConnectionExchangeDebug(printable_path, xml_data, data)
+            self.debug(debug_data)
+
+        return data
+
+    def send_method_exchange(self, xml_data): # AzureusLink
+        retry_count = 0
+        retry_namespace = None
+        while True:
+            try:
+                result = self._send_method_exchange(xml_data)
+            except LinkError, error:
+                if retry_namespace is not None:
+                    retry_namespace = {}
+                if self.handle_link_error(error, retry_count, retry_namespace):
+                    retry_count = 1
+                else:
+                    raise error
+            else:
+                if retry_count:
+                    self.handle_link_repair(error, retry_count, retry_namespace)
+                return result
+
+        # This won't happen, but it keeps PyChecker happy.
+        return None
+
+    def handle_link_error(self, error, retry_count, saved):
+        return False # Don't bother retrying.
+
+    def handle_link_repair(self, error, retry_count, saved):
+        pass
+
+    def set_link_details(self, **kwargs): # AzureusLink
+        """
+        Sets the details of where the Azureus server to connect to is located.
+
+        @rtype: None
+        @keyword host: Host name of the machine to connect to (default is
+            C{127.0.0.1}).
+        @keyword port: Server port that Azureus is accepting XML/HTTP
+            connections on (default is C{6884}).
+        @keyword secure: Set to a true value if the Azureus is only
+            accepting secure connections (default is C{False}).
+        @keyword user: For authenticated connections - the user name to
+            connect as (default is to use no authentication).
+        @keyword password: For authenticated connections - the password to
+            connect with (default is to use no authentication).
+        """
+
+        # Smart use of handle_kwargs, I think. :)
+        from dopal.utils import handle_kwargs
+        kwargs = handle_kwargs(kwargs, **self.link_data)
+
+        #for key, value in kwargs.items():
+        #    if key not in self.link_data:
+        #        raise TypeError, "invalid keyword argument: %s" % key
+        self.link_data.update(kwargs)
+
+    def __str__(self): # AzureusLink
+        return "%s for %s" % (self.__class__.__name__, self.link_data['host'])
+
+#
+# Method generation.
+#
+def remote_method_call_to_xml(method_name, method_args, request_id,
+    object_id=None, connection_id=None):
+
+    '''
+    Generates an XML block which can be sent to Azureus to invoke a remote
+    method.
+
+    An example of the output generated by this method::
+      >>> remote_method_call_to_xml('getDownloads', [True], request_id=123, object_id=456, connection_id=789)
+
+      <REQUEST>
+        <OBJECT>
+          <_object_id>456</_object_id>
+        </OBJECT>
+        <METHOD>getDownloads[boolean]</METHOD>
+        <PARAMS>
+          <ENTRY index="0">true</ENTRY>
+        </PARAMS>
+        <CONNECTION_ID>789</CONNECTION_ID>
+        <REQUEST_ID>123</REQUEST_ID>
+      </REQUEST>
+
+    The I{method_args} parameter needs to be a sequence of items representing
+    the method you want to invoke. Each argument needs to be one of the
+    following types:
+      - C{boolean} (represented in Java as a boolean)
+      - C{int} (represented in Java as an int)
+      - C{long} (represented in Java as a long)
+      - C{str} or C{unicode} (represented in Java as a String)
+      - C{float} (represented in Java as a float)
+      - An object with a I{get_xml_type} method, which returns a string
+        representing the name of the Java data type that it represents. It
+        needs to also have one other method on it:
+          - I{get_object_id} - needs to return the remote ID of the Azureus
+            object it is representing; or
+          - I{as_xml} - returns an object which can be converted into a string
+            containing XML representing the value of this object. Several other
+            types are supported using this method, defined in the L{aztypes}
+            module (such as C{java.net.URL}, C{byte[]} etc.)
+
+    @attention: B{Deprecated:} This method is not unicode-safe, nor does it
+    define what happens when dealing with unicode data. Use
+    L{remote_method_call_as_xml} instead.
+
+    @param method_name: A string representing the name of the method you want
+    to invoke (which must either be a method available on the object with the
+    given object ID, or a special global method which Azureus has some special
+    case behaviour for).
+
+    @type method_name: str
+
+    @param method_args: A sequence of items representing the arguments you want
+    to pass to the method (definition of what types are accepted are explained
+    above).
+
+    @param request_id: The unique ID to be given to this invocation request
+    (each invocation on a connection must be unique).
+    @type request_id: str / int / long
+
+    @param object_id: The object on which to invoke the method on. There are
+    some methods which are special cased which don't require an object ID - in
+    these cases, this can be left as I{None}.
+    @type object_id: str / int / long / None
+
+    @param connection_id: The ID of the connection you are using - this value
+    is given to you once you have initiated a connection with Azureus - this
+    can be left blank if you haven't yet initiated the connection.
+    @type connection_id: str / int / long / None
+
+    @return: A string representing the XML block to send.
+
+    @raise InvalidWrapTypeError: Raised if one of the items in the
+    method_args sequence does not match one of the accepted types.
+
+    @see: L{aztypes}
+    @see: L{InvalidWrapTypeError}
+
+    @summary: B{Deprecated:} Use L{remote_method_call_as_xml} instead.
+    '''
+
+    import warnings
+    warnings.warn("remote_method_call_to_xml is deprecated, use remote_method_call_as_xml instead", DopalPendingDeprecationWarning)
+
+    import types
+    from dopal.xmlutils import XMLObject, make_xml_ref_for_az_object
+
+    # We check the argument types here,
+    arg_types = []
+    arg_values = []
+    for method_arg in method_args:
+
+        # The value has methods on it to tell us how we should handle it.
+        if hasattr(method_arg, 'get_xml_type'):
+            arg_type = method_arg.get_xml_type()
+
+            # Either the value generates the XML itself (like below), or we
+            # are able to determine how to generate the XML for it.
+            if hasattr(method_arg, 'as_xml'):
+                arg_value = method_arg.as_xml()
+
+            # The value represents a remote object...
+            elif hasattr(method_arg, 'get_object_id'):
+                arg_value = make_xml_ref_for_az_object(method_arg.get_object_id())
+
+            # If we get here, we don't know how to handle this object.
+            else:
+                raise InvalidWrapTypeError(obj=method_arg)
+
+        # We must check boolean types before integers, as booleans are
+        # a type of integer.
+        #
+        # The first check is just to ensure that booleans exist on the
+        # system (to retain compatibility with Python 2.2)
+        elif hasattr(types, 'BooleanType') and isinstance(method_arg, bool):
+            arg_type = 'boolean'
+
+            # lower - the Java booleans are lower case.
+            arg_value = str(method_arg).lower()
+
+        elif isinstance(method_arg, int):
+            arg_type = 'int'
+            arg_value = str(method_arg)
+
+        elif isinstance(method_arg, types.StringTypes):
+            arg_type = 'String'
+            arg_value = method_arg
+
+        elif isinstance(method_arg, long):
+            arg_type = 'long'
+            arg_value = str(method_arg)
+
+        elif isinstance(method_arg, float):
+            arg_type = 'float'
+            arg_value = str(method_arg)
+
+        else:
+            raise InvalidWrapTypeError(obj=method_arg)
+
+        arg_types.append(arg_type)
+        arg_values.append(arg_value)
+        del arg_type, arg_value, method_arg
+
+    # We don't need to refer to method_args again, as we have arg_types and
+    # arg_values. This prevents the code below accessing method_args
+    # accidently.
+    del method_args
+
+    # Now we start to generate the XML.
+    request_block = XMLObject('REQUEST')
+
+    # Add the object ID (if we have one).
+    if object_id:
+
+        # We are just using this object to generate the XML block, the name
+        # we give the type is not used, so does not matter.
+        object_block = make_xml_ref_for_az_object(object_id)
+        request_block.add_content(object_block)
+        del object_block
+
+    # Add the method identifier.
+    method_block = XMLObject('METHOD')
+    method_content = method_name
+    if arg_types:
+        method_content += '[' + ','.join(arg_types) + ']'
+    method_block.add_content(method_content)
+    request_block.add_content(method_block)
+    del method_block, method_content
+
+    # Add method arguments.
+    if arg_values:
+        params_block = XMLObject('PARAMS')
+        for index_pos, xml_value in zip(range(len(arg_values)), arg_values):
+            entry_block = XMLObject('ENTRY')
+            entry_block.add_attribute('index', str(index_pos))
+            entry_block.add_content(xml_value)
+            params_block.add_content(entry_block)
+
+        request_block.add_content(params_block)
+        del index_pos, xml_value, entry_block, params_block
+
+    # Add the connection ID (if we have one).
+    if connection_id:
+        connection_id_block = XMLObject('CONNECTION_ID')
+        connection_id_block.add_content(str(connection_id))
+        request_block.add_content(connection_id_block)
+        del connection_id_block
+
+    # Add a "unique" request ID.
+    request_id_block = XMLObject('REQUEST_ID')
+    request_id_block.add_content(str(request_id))
+    request_block.add_content(request_id_block)
+
+    return request_block.to_string()
+
+def remote_method_call_as_xml(method_name, method_args, request_id,
+    object_id=None, connection_id=None):
+
+    '''
+    Generates an XML block which can be sent to Azureus to invoke a remote
+    method - this is returned as an object which can be turned into a unicode
+    string.
+
+    An example of the output generated by this method::
+      >>> remote_method_call_as_xml('getDownloads', [True], request_id=123, object_id=456, connection_id=789).encode('UTF-8')
+
+      <?xml version="1.0" encoding="UTF-8"?>
+      <REQUEST>
+        <OBJECT>
+          <_object_id>456</_object_id>
+        </OBJECT>
+        <METHOD>getDownloads[boolean]</METHOD>
+        <PARAMS>
+          <ENTRY index="0">true</ENTRY>
+        </PARAMS>
+        <CONNECTION_ID>789</CONNECTION_ID>
+        <REQUEST_ID>123</REQUEST_ID>
+      </REQUEST>
+
+    The I{method_args} parameter needs to be a sequence of items representing
+    the method you want to invoke. Each argument needs to be one of the
+    following types:
+      - C{boolean} (represented in Java as a boolean)
+      - C{int} (represented in Java as an int)
+      - C{long} (represented in Java as a long)
+      - C{str} or C{unicode} (represented in Java as a String)
+      - C{float} (represented in Java as a float)
+      - An object with a I{get_xml_type} method, which returns a string
+        representing the name of the Java data type that it represents. It
+        needs to also have one other method on it:
+          - I{get_object_id} - needs to return the remote ID of the Azureus
+            object it is representing; or
+          - I{as_xml} - returns an object which can be converted into a string
+            containing XML representing the value of this object. Several other
+            types are supported using this method, defined in the L{aztypes}
+            module (such as C{java.net.URL}, C{byte[]} etc.)
+
+    @attention: Any byte strings passed to this function will be treated as if
+    they are text strings, and they can be converted to unicode using the
+    default system encoding. If the strings represented encoded content, you
+    must decode them to unicode strings before passing to this function.
+
+    @note: This function will return an object which has an C{encode} method
+    (to convert the XML into the specified bytestring representation. The
+    object can also be converted into a unicode string via the C{unicode}
+    function. Currently, this object will be an instance of
+    L{UXMLObject<dopal.xmlutils.UXMLObject>}, but this behaviour may change in
+    future - the only guarantees this function makes is the fact that the
+    resulting object can be converted into unicode,
+    and that it will have an encode method on it.
+
+    @param method_name: A string representing the name of the method you want
+    to invoke (which must either be a method available on the object with the
+    given object ID, or a special global method which Azureus has some special
+    case behaviour for).
+
+    @type method_name: str / unicode
+
+    @param method_args: A sequence of items representing the arguments you want
+    to pass to the method (definition of what types are accepted are explained
+    above).
+
+    @param request_id: The unique ID to be given to this invocation request
+    (each invocation on a connection must be unique).
+    @type request_id: str / unicode / int / long
+
+    @param object_id: The object on which to invoke the method on. There are
+    some methods which are special cased which don't require an object ID - in
+    these cases, this can be left as I{None}.
+    @type object_id: str / unicode / int / long / None
+
+    @param connection_id: The ID of the connection you are using - this value
+    is given to you once you have initiated a connection with Azureus - this
+    can be left blank if you haven't yet initiated the connection.
+    @type connection_id: str / unicode / int / long / None
+
+    @return: An object which has an C{encode} method (to convert the XML into
+    the specified bytestring representation. The object can also be converted
+    into a unicode string via the C{unicode} function. Currently, this object
+    will be an instance of L{UXMLObject<dopal.xmlutils.UXMLObject>}, but this
+    behaviour may change in future - the only guarantees this function makes is
+    the fact that the resulting object can be converted into unicode, and that
+    it will have an encode method on it.
+
+    @raise InvalidWrapTypeError: Raised if one of the items in the
+    method_args sequence does not match one of the accepted types.
+
+    @see: L{aztypes}
+    @see: L{InvalidWrapTypeError}
+    @see: L{UXMLObject<dopal.xmlutils.UXMLObject>}
+    '''
+
+    import types
+    from dopal.xmlutils import UXMLObject, make_xml_ref_for_az_object
+
+    # We check the argument types here,
+    arg_types = []
+    arg_values = []
+    for method_arg in method_args:
+
+        # The value has methods on it to tell us how we should handle it.
+        if hasattr(method_arg, 'get_xml_type'):
+            arg_type = method_arg.get_xml_type()
+
+            # Either the value generates the XML itself (like below), or we
+            # are able to determine how to generate the XML for it.
+            if hasattr(method_arg, 'as_xml'):
+                arg_value = method_arg.as_xml()
+
+            # The value represents a remote object...
+            elif hasattr(method_arg, 'get_object_id'):
+                arg_value = make_xml_ref_for_az_object(method_arg.get_object_id())
+
+            # If we get here, we don't know how to handle this object.
+            else:
+                raise InvalidWrapTypeError(obj=method_arg)
+
+        # We must check boolean types before integers, as booleans are
+        # a type of integer.
+        #
+        # The first check is just to ensure that booleans exist on the
+        # system (to retain compatibility with Python 2.2)
+        elif hasattr(types, 'BooleanType') and isinstance(method_arg, bool):
+            arg_type = 'boolean'
+
+            # lower - the Java booleans are lower case.
+            arg_value = str(method_arg).lower()
+
+        elif isinstance(method_arg, int):
+            arg_type = 'int'
+            arg_value = str(method_arg)
+
+        elif isinstance(method_arg, types.StringTypes):
+            arg_type = 'String'
+            arg_value = method_arg
+
+        elif isinstance(method_arg, long):
+            arg_type = 'long'
+            arg_value = str(method_arg)
+
+        elif isinstance(method_arg, float):
+            arg_type = 'float'
+            arg_value = str(method_arg)
+
+        else:
+            raise InvalidWrapTypeError(obj=method_arg)
+
+        arg_types.append(arg_type)
+        arg_values.append(arg_value)
+        del arg_type, arg_value, method_arg
+
+    # We don't need to refer to method_args again, as we have arg_types and
+    # arg_values. This prevents the code below accessing method_args
+    # accidently.
+    del method_args
+
+    # Now we start to generate the XML.
+    request_block = UXMLObject('REQUEST')
+
+    # Add the object ID (if we have one).
+    if object_id:
+
+        # We are just using this object to generate the XML block, the name
+        # we give the type is not used, so does not matter.
+        object_block = make_xml_ref_for_az_object(object_id)
+        request_block.add_content(object_block)
+        del object_block
+
+    # Add the method identifier.
+    method_block = UXMLObject('METHOD')
+    method_content = method_name
+    if arg_types:
+        method_content += '[' + ','.join(arg_types) + ']'
+    method_block.add_content(method_content)
+    request_block.add_content(method_block)
+
+    # Make this easily accessible for the debugger.
+    request_block.request_method = method_content
+    del method_block, method_content
+
+    # Add method arguments.
+    if arg_values:
+        params_block = UXMLObject('PARAMS')
+        for index_pos, xml_value in zip(range(len(arg_values)), arg_values):
+            entry_block = UXMLObject('ENTRY')
+            entry_block.add_attribute('index', str(index_pos))
+            entry_block.add_content(xml_value)
+            params_block.add_content(entry_block)
+
+        request_block.add_content(params_block)
+        del index_pos, xml_value, entry_block, params_block
+
+    # Add the connection ID (if we have one).
+    if connection_id:
+        connection_id_block = UXMLObject('CONNECTION_ID')
+        connection_id_block.add_content(str(connection_id))
+        request_block.add_content(connection_id_block)
+        del connection_id_block
+
+    # Add a "unique" request ID.
+    request_id_block = UXMLObject('REQUEST_ID')
+    request_id_block.add_content(str(request_id))
+    request_block.add_content(request_id_block)
+
+    return request_block
+
+
+#
+# Incoming method handling.
+#
+
+# Processes an XML response returned by Azureus, returning an AzureusResponse
+# instance.
+#
+# xml_node must be an instance of xml.dom.Node which has been normalised using
+# the normalise_xml_structure function.
+#
+# This function will raise a AzureusResponseXMLError if the XML is not in the
+# format expected.
+def process_xml_response(xml_node):
+
+    if len(xml_node.childNodes) != 1:
+        err = "expected one main block inside document, had %s"
+        raise AzureusResponseXMLError, err % len(xml_node.childNodes)
+
+    block_name = xml_node.firstChild.localName
+    if block_name != 'RESPONSE':
+        err = "expected a RESPONSE block, got %s block instead"
+        raise AzureusResponseXMLError, err % block_name
+
+    response_block = xml_node.firstChild
+    az_dict = {}
+
+    # We get an empty response block when the remote method doesn't return a
+    # result (e.g. void), or returns a reponse which is effectively empty
+    # (empty sequence, empty string) - or perhaps null itself.
+    if not response_block.hasChildNodes():
+        return NullResponse(az_dict)
+
+    # If we detect any child block with the name ERROR, then we'll raise an
+    # error and ignore the rest of the content (it is possible for the block
+    # to be embedded alongside other values - normally if something has gone
+    # wrong during processing.
+    #
+    # XXX: Perhaps this could occur anywhere in the tree structure, what should
+    # we do?
+    from xml.dom import Node
+    from dopal.xmlutils import get_text_content
+
+    for child_block in response_block.childNodes:
+        if child_block.nodeType == Node.ELEMENT_NODE and \
+            child_block.nodeName == 'ERROR':
+            return ErrorResponse(az_dict, get_text_content(child_block))
+
+    if len(response_block.childNodes) == 1:
+        node = response_block.firstChild
+        if node.nodeType == Node.TEXT_NODE:
+            return AtomicResponse(az_dict, get_text_content(node))
+
+    # We will have some "complex" XML structure. It may contain the definition
+    # of one remote object, one remote object with other remote objects
+    # branching off it, or no remote objects at all.
+    #
+    # We will return the XML as-is, but we will try to extract important data
+    # so that it is more conveniently retrievable.
+    #
+    # Nodes which are categorised as being "important" are currently defined
+    # as information about Azureus and any connection information.
+    conn_node = None
+    azureus_nodes = []
+
+    for node in response_block.childNodes:
+        if node.nodeName.startswith('azureus_'):
+            azureus_nodes.append(node)
+        elif node.nodeName == '_connection_id':
+            conn_node = node
+        else:
+            pass
+
+    # Extract the data from the Azureus nodes.
+    az_dict = {}
+    for az_node in azureus_nodes:
+        name = az_node.nodeName[8:] # (remove "azureus_" prefix)
+        value = get_text_content(az_node)
+        az_dict[name] = value
+
+    # Extract the connection ID.
+    if conn_node:
+        connection_id = long(get_text_content(conn_node))
+    else:
+        connection_id = None
+
+    # We've got a structured definition.
+    return StructuredResponse(az_dict, response_block, connection_id)
+
+
+#
+# Base class of all types of response which can be returned by Azureus.
+#
+# It will have at least the following attributes:
+#
+#   azureus_data - dictionary containng information about the instance of
+#                  Azureus which is running.
+#
+#   response_data - The value of the response object. The type of this value
+#                   will differ between different Response implementations.
+#
+#   connection_id - ID of the connection given in the response. Will be None
+#                   if none was given.
+#
+class AzureusResponse(object):
+
+    def __init__(self, azureus_data, response_data=None, connection_id=None):
+        self.azureus_data = azureus_data
+        self.response_data = response_data
+        self.connection_id = connection_id
+
+class ErrorResponse(AzureusResponse):
+
+    def raise_error(self):
+        raise generate_remote_error(self.response_data)
+
+class StructuredResponse(AzureusResponse):
+
+    def get_object_id(self):
+        # Doesn't matter that this is an abstract class, the method still
+        # works. :)
+        from dopal.convert import XMLStructureReader
+        return XMLStructureReader.get_object_id(self.response_data)
+
+class AtomicResponse(AzureusResponse):
+
+    def get_value(self, value_type=None):
+        if value_type is None:
+            return self.response_data
+        else:
+            return unwrap_value(self.response_data, value_type)
+
+    def as_string(self):
+        return self.get_value("String")
+
+    def as_int(self):
+        return self.get_value("int")
+
+    def as_long(self):
+        return self.get_value("long")
+
+    def as_float(self):
+        return self.get_value("float")
+
+    def as_bool(self):
+        return self.get_value("boolean")
+
+    def as_bytes(self):
+        return self.get_value("byte[]")
+
+class NullResponse(AzureusResponse):
+
+    '''
+    A response class which is used when Azureus returns a response which
+    contains no content at all.
+
+    This is normally returned when:
+      - C{null} is returned by the remote method.
+      - An empty sequence.
+      - An empty string.
+      - The return type of the method is C{void}.
+    '''
+
+    def get_value(self, value_type=None):
+        if value_type is None:
+            return None
+        elif value_type in ['byte[]', 'String']:
+            return ''
+        else:
+            return InvalidUnwrapTypeError(obj=value_type)
+
+#
+# Error-handling.
+#
+
+#
+# This method takes a string returned in a response and generates an instance
+# of RemoteMethodError - it doesn't take into account of any reported class
+# type or any other data.
+#
+def generate_remote_error(message):
+
+    # Bad method?
+    bad_method_prefix = 'Unknown method: '
+    if message.startswith(bad_method_prefix):
+        return NoSuchMethodError(message[len(bad_method_prefix):])
+
+    # Bad object ID?
+    if message == 'Object no longer exists':
+        return InvalidObjectIDError()
+
+    # Missing object ID?
+    if message == 'Object identifier missing from request':
+        return MissingObjectIDError()
+
+    # Perhaps a Java exception has occurred remotely. Not always easy to
+    # detect - it'll mention the Java exception class though. For example,
+    # passing an non-integer object ID got this error:
+    #
+    # u'java.lang.RuntimeException: java.lang.NumberFormatException: For
+    # input string: "3536f63"'
+    #
+    # So we'll try and test to see if a Java exception occurred, by seeing
+    # if there appears to be a Java-esque exception mentioned.
+    parts = message.split(':', 1)
+    if len(parts) == 2:
+        exception_name = parts[0]
+
+        # A Java-esque exception name: We'll take anything which is defined
+        # in a package with java. at the start, and ends with Error or
+        # Exception, then we'll take it.
+        if exception_name.startswith('java.') and \
+            (exception_name.endswith('Error') or \
+            exception_name.endswith('Exception')):
+
+                return RemoteInternalError(message)
+
+    # Something went wrong - don't know what...
+    return RemoteMethodError(message)
+
+#
+# Higher-level version of AzureusLink - this class maintains an active
+# connection with the remote server - it also utilises other components
+# defined by this module.
+#
+
+class AzureusConnection(AzureusLink):
+
+    def __init__(self): # AzureusConnection
+        AzureusLink.__init__(self)
+        self.connection_id = None
+        self.request_id = None # Will be initialised later.
+
+    def update_connection_details(self, connection_id=None, connection_data={}): # AzureusConnection
+        if connection_id is not None:
+            self.connection_id = connection_id
+
+    # Return true if the specified method can be called without passing an
+    # object ID or connection ID.
+    #
+    # XXX: Would it be safe to have ExtendedAzureusConnection handle this?
+    # I would say yes, but look at the invoke_remote_method method, I don't
+    # think it would behave well if we either return True or False all the
+    # time...
+    def _is_global_method_call(self, method_name, method_args): # AzureusConnection
+        return method_name in ['getDownloads', 'getSingleton'] and not method_args
+
+    def invoke_remote_method(self, object_id, method_name, method_args, raise_errors=True): # AzureusConnection
+        # We require a connection ID and an object ID, unless we are calling
+        # a "global" method - methods which don't need either, and which will
+        # actually return that data to you in the result.
+        if self._is_global_method_call(method_name, method_args):
+            connection_id = None
+        else:
+            connection_id = self.connection_id
+            if object_id is None:
+                raise NoObjectIDGivenError
+            if connection_id is None:
+                raise NoEstablishedConnectionError
+
+        from xml.dom.minidom import parseString
+        from dopal.xmlutils import normalise_xml_structure, get_text_content
+
+        # First step, convert the method data to XML.
+        xml_data = remote_method_call_as_xml(method_name, method_args,
+            self.get_new_request_id(), object_id, connection_id)
+
+        xml_data_as_string = xml_data.encode('UTF-8')
+
+        from dopal.debug import MethodRequestDebug, MethodResponseDebug
+
+        # Log a debug message, if appropriate.
+        if self.debug is not None:
+            self.debug(MethodRequestDebug(object_id, xml_data.request_method))
+
+        # Second step, send this to Azureus and get a response back.
+        xml_response_string = self.send_method_exchange(xml_data_as_string)
+
+        # Third step, convert the string into a xml.dom.Node structure.
+        xml_structure = parseString(xml_response_string)
+
+        # Fourth step, sanitise the XML structure for easier parsing.
+        normalise_xml_structure(xml_structure)
+
+        # Fifth step, calculate the Azureus response instance represented by
+        # this XML structure.
+        response = process_xml_response(xml_structure)
+
+        # Send another debug message with the response.
+        if self.debug is not None:
+            self.debug(MethodResponseDebug(object_id, xml_data.request_method, response))
+
+        # Sixth step - update our own connection data given in this response.
+        connection_id = response.connection_id
+        azureus_data = response.azureus_data
+        self.update_connection_details(connection_id, azureus_data)
+
+        # Seventh step - return the response (or raise an error, if it's an
+        # error response).
+        if raise_errors and isinstance(response, ErrorResponse):
+            response.raise_error()
+
+        return response
+
+    def get_new_request_id(self): # AzureusConnection
+        if self.request_id is None:
+
+            # We use long to force it to be an integer.
+            import time
+            self.request_id = long(time.time())
+
+        self.request_id += 1
+        return self.request_id
+
+class ExtendedAzureusConnection(AzureusConnection):
+
+    def __init__(self): # ExtendedAzureusConnection
+        AzureusConnection.__init__(self)
+        self.connection_data = {}
+        self._plugin_interface_id = None
+
+    def invoke_remote_method(self, object_id, method_name, method_args, raise_errors=True): # ExtendedAzureusConnection
+        try:
+            response = AzureusConnection.invoke_remote_method(self, object_id, method_name, method_args, raise_errors)
+        except InvalidObjectIDError:
+            if self.is_connection_valid():
+                raise InvalidRemoteObjectError
+            else:
+                raise InvalidConnectionIDError
+
+        if object_id is None and method_name == 'getSingleton' and not method_args and isinstance(response, StructuredResponse):
+            self._plugin_interface_id = response.get_object_id()
+
+        return response
+
+    def establish_connection(self, force=True): # ExtendedAzureusConnection
+        '''
+        Establishes a connection with the Azureus server.
+
+        By invoking this method, this will ensure that other methods defined
+        by this class work correctly, as it will have references both to a
+        connection ID, and the ID for the plugin interface.
+
+        The C{force} argument determines whether server communication must
+        take place or not. If C{True}, then this object will communicate with
+        the Azureus server - if C{False}, then this object will only
+        communicate with the Azureus server if it has no recorded information
+        about the plugin interface.
+
+        This method has two uses, depending on the value of the C{force}
+        argument - it can be used to ensure that there is a valid recorded
+        connection in place (if force is C{True}), or it can be used just
+        to ensure that other methods on this class will behave properly (if
+        force is C{False}).
+
+        If a new connection is established, then the L{_on_reconnect} method
+        will be invoked.
+
+        @param force: Boolean value indicating if communication with the server
+           I{must} take place or not (default is C{True}).
+        @return: None
+        '''
+
+        # If 'force' is not true - then we only make a call if we don't have
+        # any stored reference to a plugin interface ID.
+        if (not force) and (self._plugin_interface_id is not None):
+            return
+
+        # Our overridden implementation of this method will set
+        # what we need.
+        old_interface_id = self._plugin_interface_id
+        response = self.invoke_remote_method(None, 'getSingleton', ())
+
+        # If we had the ID for an old PluginInterface object, then
+        # that probably signals a reconnection. So let's signal that.
+        if old_interface_id is not None and \
+            old_interface_id != self._plugin_interface_id:
+            self._on_reconnect()
+
+        return
+
+    def _on_reconnect(self): # ExtendedAzureusConnection
+        '''
+        Hook for subclasses to be notified whenever a new connection has been
+        made.
+        '''
+        pass
+
+    def is_connection_valid(self): # ExtendedAzureusConnection
+
+        '''
+        Returns a boolean value indicating if the current connection is
+        still valid.
+
+        @invariant: This connection to have already been I{established}.
+        @raise NoEstablishedConnectionError: If this connection has not been
+           established.
+        @return: C{True} if the current established connection is still valid,
+           C{False} otherwise.
+        '''
+
+        if self._plugin_interface_id is None:
+            raise NoEstablishedConnectionError
+
+        # Try to invoke this method on the remote PluginInterface object.
+        try:
+            AzureusConnection.invoke_remote_method(self, self._plugin_interface_id, '_refresh', ())
+        except InvalidObjectIDError:
+            return False
+        else:
+            return True
+
+    def __str__(self): # ExtendedAzureusConnection
+        result = super(ExtendedAzureusConnection, self).__str__()
+        if self.connection_data.has_key('name') and \
+            self.connection_data.has_key('version'):
+            result += " [%(name)s %(version)s]" % self.connection_data
+        return result
+
+    def update_connection_details(self, connection_id=None, connection_data={}): # ExtendedAzureusConnection
+        super(ExtendedAzureusConnection, self).update_connection_details(connection_id)
+        self.connection_data.update(connection_data)
+
+    def get_azureus_version(self): # ExtendedAzureusConnection
+        '''
+        @since: DOPAL 0.56
+        '''
+        try:
+            az_version = self.connection_data['version']
+        except KeyError:
+            raise NoEstablishedConnectionError
+        else:
+            import dopal.utils
+            return dopal.utils.parse_azureus_version_string(az_version)
+
+
+# Use of this name is deprecated, and this alias will be removed in later
+# versions of DOPAL.
+ReusableAzureusConnection = ExtendedAzureusConnection

+ 167 - 0
html/bin/clients/fluazu/dopal/debug.py

@@ -0,0 +1,167 @@
+# File: debug.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Contains functions and objects useful for debugging DOPAL.
+'''
+
+class DebugObject(object):
+    pass
+
+class LinkDebugObject(DebugObject):
+    pass
+
+class ErrorLinkDebug(LinkDebugObject):
+
+    def __init__(self, cgi_path, error):
+        self.cgi_path = cgi_path
+        self.error = error
+
+class OutgoingExchangeDebug(LinkDebugObject):
+
+    def __init__(self, cgi_path, data_to_send):
+        self.cgi_path = cgi_path
+        self.data_to_send = data_to_send
+
+class ConnectionExchangeDebug(LinkDebugObject):
+
+    def __init__(self, cgi_path, data_sent, data_received):
+        self.cgi_path = cgi_path
+        self.data_sent = data_sent
+        self.data_received = data_received
+
+class ConnectionDebugObject(DebugObject):
+    pass
+
+class MethodRequestDebug(ConnectionDebugObject):
+
+    def __init__(self, object_id, request_method):
+        self.object_id = object_id
+        self.request_method = request_method
+
+class MethodResponseDebug(ConnectionDebugObject):
+
+    def __init__(self, object_id, request_method, response):
+        self.object_id = object_id
+        self.request_method = request_method
+        self.response = response
+
+def print_everything(debug_object):
+
+    if not isinstance(debug_object, LinkDebugObject):
+        return
+
+    print
+    print '---------------'
+    print
+
+    if isinstance(debug_object, OutgoingExchangeDebug):
+        print 'Sending to "%s"' % debug_object.cgi_path
+        print
+        print debug_object.data_to_send
+
+    elif isinstance(debug_object, ConnectionExchangeDebug):
+        print 'Recieved from "%s"' % debug_object.cgi_path
+        print
+        print debug_object.data_received
+
+    elif isinstance(debug_object, ErrorLinkDebug):
+        error = debug_object.error
+        print 'Error from "%s"' % debug_object.cgi_path
+        print
+        print '%s: %s' % (error.__class__.__name__, error)
+
+    print
+    print '---------------'
+    print
+
+def print_everything_with_stack(debug_object):
+    if isinstance(debug_object, OutgoingExchangeDebug):
+        import traceback
+        print
+        print '---------------'
+        print
+        print 'Stack trace of request:'
+        traceback.print_stack()
+        print
+        print '---------------'
+        print
+    print_everything(debug_object)
+
+def print_method(debug_object):
+    from dopal.utils import make_short_object_id as _sid
+    if isinstance(debug_object, MethodRequestDebug):
+        print
+        print '---------------'
+        print '  Object:', debug_object.object_id,
+        if debug_object.object_id is not None:
+            print "[sid=%s]" % _sid(debug_object.object_id),
+        print
+        print '  Method:', debug_object.request_method
+        print
+    elif isinstance(debug_object, MethodResponseDebug):
+        import dopal.core
+        if isinstance(debug_object.response, dopal.core.ErrorResponse):
+            print '  Response Type: ERROR'
+            print '  Response Data:', debug_object.response.response_data
+        elif isinstance(debug_object.response, dopal.core.AtomicResponse):
+            print '  Response Type: VALUE'
+            print '  Response Data:', debug_object.response.response_data
+        elif isinstance(debug_object.response, dopal.core.NullResponse):
+            print '  Response Type: NULL / EMPTY'
+            print '  Response Data: None'
+        elif isinstance(debug_object.response, dopal.core.StructuredResponse):
+            print '  Response Type: STRUCTURE'
+            print '  Response Data:',
+
+            obj_id = debug_object.response.get_object_id()
+            if obj_id is not None:
+                print 'Object [id=%s, sid=%s]' % (obj_id, _sid(obj_id))
+            else:
+                print 'Non-object value'
+        print '---------------'
+        print
+
+def print_method_with_stack(debug_object):
+    if isinstance(debug_object, MethodResponseDebug):
+        import traceback
+        print
+        print '---------------'
+        print
+        print 'Stack trace of request:'
+        traceback.print_stack()
+        print
+        print '---------------'
+        print
+    print_method(debug_object)
+
+class DebugGrabber(object):
+
+    debug_object = None
+
+    def get_in(self):
+        if self.debug_object is None:
+            raise Exception, "not captured any data yet"
+        return self.debug_object.data_sent
+
+    def get_out(self):
+        if self.debug_object is None:
+            raise Exception, "not captured any data yet"
+        return self.debug_object.data_received
+
+    def __call__(self, debug_object):
+        if isinstance(debug_object, ConnectionExchangeDebug):
+            self.debug_object = debug_object

+ 466 - 0
html/bin/clients/fluazu/dopal/errors.py

@@ -0,0 +1,466 @@
+# File: errors.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Module containing all errors defined in DOPAL.
+'''
+
+def as_error(error, error_class, **kwargs):
+    if not isinstance(error, error_class):
+        error = error_class(error=error, **kwargs)
+    return error
+
+def raise_as(error, error_class, **kwargs):
+    import sys
+    raise as_error(error, error_class, **kwargs), None, sys.exc_info()[2]
+
+class DopalError(Exception):
+    "Subclass of all errors in the Dopal library"
+
+    def __init__(self, *args, **kwargs):
+
+        if len(args) + len(kwargs) > 3:
+            raise ValueError, "DopalError.__init__ takes at most 3 arguments - %s positional argument(s) given, %s keyword argument(s) given" % (len(args), len(kwargs))
+
+        # Filters out invalid keywords.
+        from dopal.utils import handle_kwargs
+        handle_kwargs(kwargs, error=None, obj=None, text=None)
+
+        error = obj = text = None
+        has_error = has_object = has_text = False
+
+        import types
+        for kwname, kwvalue in kwargs.items():
+
+            if kwname == 'error':
+                if not isinstance(kwvalue, Exception):
+                    msg = "'error' keyword argument is not Exception: %r"
+                    raise TypeError, msg % (kwvalue,)
+                has_error = True
+                error = kwvalue
+
+            elif kwname == 'text':
+                if not isinstance(kwvalue, types.StringTypes):
+                    msg = "'text' keyword argument is not a String type: %r"
+                    raise TypeError, msg % (kwvalue,)
+                has_text = True
+                text = kwvalue
+
+            else: # kwname == 'obj'
+                has_object = True
+                obj = kwvalue
+
+        import types
+        for arg in args:
+            if isinstance(arg, Exception) and not has_error:
+                has_error = True
+                error = arg
+            elif isinstance(arg, types.StringTypes) and not has_text:
+                has_text = True
+                text = arg
+            else:
+                if has_object:
+                    msg = "could not determine Dopal argument type for %r"
+                    raise TypeError, msg % (arg,)
+                has_object = True
+                obj = arg
+
+        dopal_arg_tuple = args
+        if kwargs:
+            dopal_arg_tuple += tuple(kwargs.values())
+
+        dopal_arg_dict = {}
+        if has_error:
+            dopal_arg_dict['error'] = error
+        if has_object:
+            dopal_arg_dict['object'] = obj
+        if has_text:
+            dopal_arg_dict['text'] = text
+
+        self.dopal_arg_tuple = dopal_arg_tuple
+        self.dopal_arg_dict = dopal_arg_dict
+        self.error = error
+        self.obj = obj
+        self.text = text
+        self.has_object = has_object
+        self.has_error = has_error
+        self.has_text = has_text
+
+        #super(DopalError, self).__init__(dopal_arg_tuple)
+        Exception.__init__(self, *dopal_arg_tuple)
+
+    def __str__(self):
+
+        # Allow the subclass to render the string if:
+        #   1) self.args is not the tuple that this class passed to the super
+        #      constructor; or
+        #   2) We have 2 or more values given to us - the default behaviour for
+        #      rendering the string by the superclass for one or no arguments
+        #      is fine.
+        if self.args != self.dopal_arg_tuple or \
+            len(self.args) < 2:
+            #return super(DopalError, self).__str__()
+            return Exception.__str__(self)
+
+        if not self.has_error:
+            tmpl = "%(text)s (%(object)r)"
+
+        elif not self.has_object:
+            tmpl = "%(text)s - %(error)s"
+
+        elif not self.has_text:
+            tmpl = "%(error)s (%(object)r)"
+
+        else:
+            tmpl = "%(text)s - %(error)s (%(object)r)"
+
+        return tmpl % self.dopal_arg_dict
+
+    def __repr__(self):
+        arg_parts = ["%s=%r" % item_tpl for item_tpl in self.dopal_arg_dict.items()]
+        return "%s(%s)" % (self.__class__.__name__, ', '.join(arg_parts))
+
+    # An alternative to str() of the object - this will make a string of the
+    # form:
+    #    ErrorClassName: <error output>
+    #
+    # (This is similar to the last line you see of a traceback). The main
+    # difference here is that we will use the class name of the internal error
+    # if there is one, otherwise we will use the class name of the object
+    # itself.
+    #
+    # The error output will be the same string as you get when you apply str()
+    # to this object.
+    #
+    # Setting use_error to False will force it to always ignore the internal
+    # error.
+    def to_error_string(self, use_error=True):
+        if use_error and self.has_error:
+            error_to_use = self.error
+        else:
+            error_to_use = self
+
+        error_output = str(self)
+
+        result = error_to_use.__class__.__name__
+        if error_output:
+            result += ": " + error_output
+
+        return result
+
+#---- core module ----#
+
+class LinkError(DopalError):
+    "Error communicating with Azureus (low-level)"
+
+class RemoteError(DopalError): # Base class.
+    "Error reported by Azureus"
+
+class RemoteInvocationError(RemoteError): # Base class.
+    "Unable to invoke remote method"
+
+class NoSuchMethodError(RemoteInvocationError):
+    """
+    This error is thrown when Azureus reports that the requested method does
+    not exist, or is not allowed.
+
+    A NoSuchMethodError is a representation of the response Azureus returns
+    when trying to invoke a method, but is unable to map the requested method
+    to a method it can actually invoke.
+
+    Causes
+    ======
+
+      There are various reasons why this error may occur, but here are the
+      most likely.
+
+      Wrong method requested
+      ----------------------
+      The wrong method signature was used - this is possible for a variety
+      of reasons (though it isn't likely). Check that the method you want to
+      use is the same one being reported in the NoSuchMethodError instance.
+
+      Method not available in this version of Azureus
+      -----------------------------------------------
+      This method may not be available in the version of Azureus you are
+      using - although DOPAL normally supports all methods made available in
+      the latest beta release, this error will occur if the version of Azureus
+      does not support that method.
+
+      XML/HTTP request processor may not support this method
+      ------------------------------------------------------
+      The request processor used by Azureus may not be able to resolve that
+      method. Versions 2.3.0.6 and older only allow a small subset of methods
+      defined in the plugin API to be called. Version 2.4.0.0 (as well as some
+      later beta versions of 2.3.0.7) have been changed to allow any method to
+      be called. To enable this, go to the XML/HTTP plugin configuration page,
+      and tick the I{"Advanced Settings -> Use generic classes"} setting.
+
+      Non read-only method requested, but XML/HTTP in view mode
+      ---------------------------------------------------------
+      The XML/HTTP plugin in Azureus is set up to be in "view" mode, so only
+      certain methods are allowed. Note - if you are unable to call a method
+      which you think should be allowed in read only mode, contact the
+      developers of the XML/HTTP plugin.
+
+    @ivar obj: This will be a string which describes the method signature
+      which was requested - for example::
+         getDownloads
+         setPosition[int]
+         setTorrentAttribute[TorrentAttribute,String]
+    """
+
+    def __init__(self, method_sig):
+        """
+        Creates a new NoSuchMethodError instance.
+        """
+        RemoteInvocationError.__init__(self, obj=method_sig)
+
+class NoObjectIDGivenError(DopalError, ValueError):
+    "No object ID given when needed"
+
+class NoEstablishedConnectionError(DopalError, TypeError):
+    "Connection object has no valid connection established"
+
+# Raised by generate_remote_error (which means it is indirectly raised in
+# AzureusConnection.invoke_remote_method).
+#
+# These errors are masked by ExtendedAzureusConnection.invoke_remote_method
+# who throw one of the subclass errors (InvalidRemoteObjectError and
+# InvalidConnectionIDError).
+#
+# This error shouldn't arise if you are using a ExtendedAzureusConnection or
+# higher-level connection object.
+class InvalidObjectIDError(RemoteInvocationError):
+    "Invalid remote object ID given (bad object or bad connection)"
+
+class InvalidRemoteObjectError(InvalidObjectIDError):
+    "Invalid remote object ID used"
+
+class InvalidConnectionIDError(InvalidObjectIDError):
+    "Invalid connection ID used"
+
+class MissingObjectIDError(RemoteInvocationError):
+    "Missing object ID"
+
+# Raised by generate_remote_error (which means it is indirectly raised in
+# AzureusConnection.invoke_remote_method).
+#
+# Higher-level connections (like AzureusObjectConnection) may raise subclasses
+# of this error, if they are able to give a more precise error can be
+# determined.
+class RemoteMethodError(RemoteError):
+    "Error thrown by remote method"
+
+class RemoteInternalError(RemoteError):
+    "Internal error occurred during remote method invocation"
+
+class AzureusResponseXMLError(DopalError):
+    "Error while parsing XML returned by Azureus"
+
+#---- core module ----#
+
+#---- types module ----#
+
+class ConversionError(DopalError): # Base class.
+    "Error converting value (Azureus <--> Python)"
+
+class WrapError(ConversionError):
+    "Error converting value to remote method argument"
+
+class UnwrapError(ConversionError):
+    "Error converting remote method result to Python value"
+
+class InvalidWrapTypeError(WrapError, TypeError):
+    '''
+    Invalid wrap type given.
+
+    This error is raised when a value is passed which cannot be converted into
+    something that can be represented in Azureus. This either means that the
+    value doesn't meet the criteria as something which can be represented, or
+    the value doesn't fit the type that it is being wrapped as (e.g. a
+    non-integer string as a integer).
+
+    @see: L{wrap_value<dopal.aztypes.wrap_value>}
+    @see: L{remote_method_call_to_xml<dopal.core.remote_method_call_to_xml>}
+    '''
+
+class InvalidUnwrapTypeError(UnwrapError, TypeError):
+    "Invalid unwrap type given."
+
+class InconsistentWrapTypeError(WrapError, TypeError):
+    "Object has wrap type different to requested type"
+
+#---- types module ----#
+
+#---- types (AzMethod) module ----#
+
+class AzMethodError(DopalError): # Base class.
+    "Error selecting matching AzMethod"
+
+    def __init__(self, obj, *args, **kwargs):
+        kwargs['obj'] = obj
+        self.method_name = obj
+        DopalError.__init__(self, *args, **kwargs)
+
+class IncorrectArgumentCountError(AzMethodError, TypeError):
+    "Wrong number of arguments given for AzMethod"
+
+    def __init__(self, obj, given_arg_count, required_arg_count):
+        TypeError.__init__(self)
+
+        # self.required_arg_count is a list
+        # required_count is used for the 'text' variable
+
+        self.given_arg_count = given_arg_count
+
+        if isinstance(required_arg_count, (int, long)):
+            self.required_arg_count = [required_arg_count]
+            required_count = required_arg_count
+
+        elif len(required_arg_count) == 1:
+            self.required_arg_count = required_arg_count
+            required_count = required_arg_count[0]
+
+        else:
+            self.required_arg_count = required_arg_count
+            required_count = required_arg_count
+            required_count = list(required_count)
+            required_count.sort()
+
+        text = "wrong number of arguments given (wanted %(required_count)s, given %(given_arg_count)s)" % locals()
+
+        AzMethodError.__init__(self, obj, text=text)
+
+class ArgumentWrapError(AzMethodError):
+    "Error wrapping argument for AzMethod"
+
+    def __init__(self, arg_index, value, arg_type, error):
+        text = "error converting arg %(arg_index)s to %(arg_type)s" % locals()
+        AzMethodError.__init__(self, obj=value, error=error, text=text)
+        self.arg_index = arg_index
+        self.arg_type = arg_type
+
+class NoSuchAzMethodError(AzMethodError, AttributeError):
+    "No method of that name available"
+    def __init__(self, *args, **kwargs):
+        AttributeError.__init__(self)
+        AzMethodError.__init__(self, *args, **kwargs)
+
+class MethodArgumentWrapError(AzMethodError):
+    "Error wrapping argument for multiple AzMethods"
+
+    def __init__(self, name, invocation_errors):
+        AzMethodError.__init__(self, name)
+        self.invocation_errors = invocation_errors
+
+    def __str__(self):
+        text = "Error wrapping arguments:"
+        error_data = [(str(method_data), str(error.__class__.__name__), str(error)) for (method_data, error) in self.invocation_errors]
+
+        error_data.sort()
+        for method_data, err_class, error in error_data:
+            text += "\n  %(method_data)s - %(err_class)s: %(error)s" % locals()
+
+        return text
+
+
+
+#---- types  (AzMethod) module ----#
+
+#---- objects module ----#
+
+class ConnectionlessObjectError(DopalError):
+    "Object has no remote connection"
+
+class NonRefreshableObjectError(DopalError): # Base class
+    "Object cannot be refreshed - refresh not implemented"
+
+class NonRefreshableConnectionlessObjectError(NonRefreshableObjectError, ConnectionlessObjectError):
+    "Object cannot be refreshed - no connection attached"
+
+    def __init__(self, *args, **kwargs):
+        NonRefreshableObjectError.__init__(self)
+        ConnectionlessObjectError.__init__(self, *args, **kwargs)
+        #_superclass = super(NonRefreshableConnectionlessObjectError, self)
+        #_superclass.__init__(*args, **kwargs)
+
+class NonRefreshableObjectTypeError(NonRefreshableObjectError):
+    "Object cannot be refreshed - not implemented for this type"
+
+class NonRefreshableIncompleteObjectError(NonRefreshableObjectError):
+    "Object cannot be refreshed - insufficient information on object"
+
+class StaleObjectReferenceError(NonRefreshableObjectError):
+    "Object used belongs to old connection, which doesn't have persistency enabled"
+
+class MissingRemoteAttributeError(DopalError, AttributeError):
+    "Object does not have remote attribute available"
+
+#---- objects module ----#
+
+#---- convert module ----#
+
+class InvalidRemoteClassTypeError(DopalError, TypeError):
+    "Invalid remote class type given"
+
+# Base exception class - used when something cannot be converted.
+class StructureConversionError(ConversionError): # Base class.
+    "Error converting response structure"
+
+# Base class for flow control exceptions.
+class ConversionControl(StructureConversionError): # Base class.
+    "Base class for structured conversion control"
+
+# Use this class if you want to skip converting the object which
+# is being handled.
+class SkipConversion(ConversionControl):
+    "Structured conversion of object skipped"
+
+# Use this class if you want to stop converting the object which
+# is being handled (essentially signalling a somewhat "fatal" error).
+class AbortConversion(ConversionControl):
+    "Structured conversion of object aborted"
+
+# Use this class if you want to signal that you need more information
+# before you can proceed with the conversion - either that you need
+# the items lower down to be converted first, or you need the items
+# higher up converted first.
+class DelayConversion(ConversionControl):
+    "Structured conversion of object delayed"
+
+# Use this class if you want to halt conversion completely - this is a more
+# severe form of AbortConversion, where it won't be passed to
+# Converter.handle_errors.
+class HaltConversion(ConversionControl):
+    "Structured conversion of object halted"
+
+class DopalDeprecationWarning(DeprecationWarning, DopalError):
+    pass
+
+# PendingDeprecationWarning class doesn't exist in Python 2.2.
+try:
+    class DopalPendingDeprecationWarning(DopalDeprecationWarning, PendingDeprecationWarning):
+        pass
+except NameError:
+    class DopalPendingDeprecationWarning(DopalDeprecationWarning):
+        pass
+
+class NoDefaultScriptConnectionError(DopalError):
+    pass
+
+class ScriptFunctionError(DopalError):
+    "Error occurred inside script function."

+ 92 - 0
html/bin/clients/fluazu/dopal/interact.py

@@ -0,0 +1,92 @@
+# File: interact.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Interactive Python application which initialises DOPAL to connect with a chosen
+Azureus server.
+'''
+
+
+def main():
+    '''Function to invoke this application.'''
+    # Get host and port.
+    connection_details = {}
+    connection_details['host'] = raw_input('Enter host: ')
+    port_text = raw_input('Enter port (default is 6884): ')
+    if port_text:
+        connection_details['port'] = int(port_text)
+
+    # Username and password.
+    username = raw_input('Enter user name (leave blank if not applicable): ')
+    password = None
+    if username:
+        import getpass
+        connection_details['user'] = username
+        connection_details['password'] = getpass.getpass('Enter password: ')
+
+    my_locals = {}
+    from dopal.main import make_connection
+    connection = make_connection(**connection_details)
+    connection.is_persistent_connection = True
+
+    from dopal.errors import LinkError
+    try:
+        interface = connection.get_plugin_interface()
+    except LinkError, error:
+        interface = None
+        connection_error = error
+    else:
+        connection_error = None
+
+    from dopal import __version_str__
+    banner = "DOPAL %s - interact module\n\n" % __version_str__
+    banner += "Connection object stored in 'connection' variable.\n"
+
+    if connection_error is None:
+        banner += "Plugin interface stored in 'interface' variable.\n"
+    else:
+        banner += "\nError getting plugin interface object - could not connect to Azureus, error:\n  %s" % connection_error.to_error_string()
+
+    import dopal
+    if dopal.__dopal_mode__ == 1:
+        banner += "\nRunning in DEBUG mode.\n"
+    elif dopal.__dopal_mode__ == 2:
+        banner += '\nWARNING: Running in "epydoc" mode.\n'
+
+    my_locals['connection'] = connection
+    if interface is not None:
+        my_locals['interface'] = interface
+    my_locals['__import__'] = __import__
+
+    print
+    print '------------------------'
+    print
+
+    import code
+    code.interact(banner, local=my_locals)
+
+if __name__ == '__main__':
+    def _main(env):
+       return main()
+
+    import dopal.scripting
+    dopal.scripting.ext_run(
+        'dopal.interact', _main,
+        make_connection=False,
+        setup_logging=False,
+        timeout=8,
+        pause_on_exit=2,
+    )

+ 34 - 0
html/bin/clients/fluazu/dopal/logutils.py

@@ -0,0 +1,34 @@
+# File: logutils.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Module containing various logging-related utilities and classes.
+'''
+# From:
+#   http://news.hping.org/comp.lang.python.archive/19937.html
+def noConfig():
+    '''
+    This function should be called to indicate that you are explicitly
+    notifying the logging module that you do not intend to add any
+    handlers to the root logger.
+
+    This suppresses the warning C{No handlers could be found for logger
+    "root"} from being emitted. This function performs the following
+    call to disable the warning::
+        logging.root.manager.emittedNoHandlerWarning = True
+    '''
+    import logging
+    logging.root.manager.emittedNoHandlerWarning = True

+ 79 - 0
html/bin/clients/fluazu/dopal/main.py

@@ -0,0 +1,79 @@
+# File: main.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Main module for using DOPAL with little effort.
+'''
+
+def make_connection(persistent=False, **kwargs):
+
+    '''
+    Generate a L{DopalConnection} to an Azureus server, whose location is
+    specified by keyword arguments.
+
+    To see what keywords are accepted, see the
+    L{set_link_details<dopal.core.AzureusLink.set_link_details>} method. This
+    method also takes an additional keyword - C{persistent}, which determines
+    whether the connection should be persistent or not (by default, it is not
+    persistent).
+
+    This function will return a DopalConnection instance.
+
+    @rtype: L{DopalConnection}
+    @see: L{set_link_details<dopal.core.AzureusLink.set_link_details>}
+    '''
+    connection = DopalConnection()
+    connection.set_link_details(**kwargs)
+
+    if not persistent:
+        connection.is_persistent_connection = False
+    return connection
+
+from dopal.objects import AzureusObjectConnection
+class DopalConnection(AzureusObjectConnection):
+
+    '''
+    A subclass of
+    L{AzureusObjectConnection<dopal.objects.AzureusObjectConnection>} which
+    contains an extended API.
+
+    This class defines an extended API, similar to the way that C{Dopal}
+    classes contain additional methods compared to their C{Azureus}
+    counterparts. It also sets up some different default behaviours (compared
+    to L{AzureusObjectConnection<dopal.objects.AzureusObjectConnection>}):
+      - All instances are I{persistent} connections by default.
+      - A L{RemoteObjectConverter<dopal.convert.RemoteObjectConverter>}
+        instance is installed as the default handler for converting XML to
+        its appropriate object representation.
+      - The L{DOPAL class map<dopal.obj_impl.DOPAL_CLASS_MAP>} is used as the
+        standard class mapping.
+
+    @see: The L{obj_impl<dopal.obj_impl>} module documentation.
+    '''
+
+    def __init__(self):
+        super(DopalConnection, self).__init__()
+
+        from dopal.convert import RemoteObjectConverter
+        converter = RemoteObjectConverter(self)
+
+        from dopal.obj_impl import DOPAL_CLASS_MAP
+        converter.class_map = DOPAL_CLASS_MAP
+        self.converter = converter
+
+        self.is_persistent_connection = True
+
+del AzureusObjectConnection

+ 326 - 0
html/bin/clients/fluazu/dopal/obj_impl.py

@@ -0,0 +1,326 @@
+# File: obj_impl.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Implementation of classes defined by Azureus's plugin API.
+
+Not all classes are defined here are mentioned in the API documentation (simply
+because too much documentation will be generated).
+
+For each class that DOPAL has explicit support for, there will be two classes
+defined in this module. The list of classes supported in this version is
+described in L{classes}.
+
+For each class supported, there will be a class named
+I{AzureusE{<}classnameE{>}}, and another class named I{DopalE{<}classnameE{>}}.
+
+The I{Azureus*} class is a subclass of L{AzureusObject} mixed in with the
+I{*DataType} class in the L{class_defs} module - the API closely resembles
+that of the actual object in Azureus.
+
+The I{Dopal*} class is a subclass of the I{Azureus*} class, mixed in with the
+L{DopalObjectMixin} class. These classes exist to define an extended API of
+convenience functions beyond the API supplied by Azureus itself. Although all
+plugin classes have a I{Dopal*} representation, only those classes mentioned
+in the API documentation have any extended behaviour defined for them.
+
+@group Standard class implementations: %(standard_classes)s
+@group DOPAL class implementation: %(dopal_classes)s
+'''
+
+import dopal.class_defs as _cdefs
+from dopal.objects import AzureusObject, AzureusObjectMetaclass, TypelessRemoteObject
+from dopal.errors import MissingRemoteAttributeError
+
+# Temporary value - should be removed once the import has finished.
+import dopal
+__epydoc_mode = dopal.__dopal_mode__ == 2
+del dopal
+
+# Imported just for the __str__ method of DopalObjectMixin.
+import sys
+
+# The code here is used to create representive classes of each of the remote
+# Azureus class types that we support.
+import new
+def _make_class(common_cls, data_type_cls, name_prefix, class_map_dict=None):
+    az_class_name = data_type_cls.get_xml_type()
+    new_class_name = name_prefix + az_class_name
+
+    # Is the class already defined in the global namespace? If so - we
+    # avoid defining it again.
+    if globals().has_key(new_class_name):
+        classobj = globals()[new_class_name]
+    else:
+        base_classes = (common_cls, data_type_cls)
+        classobj = new.classobj(new_class_name, base_classes, {})
+        del base_classes
+        globals()[new_class_name] = classobj
+
+    if __epydoc_mode:
+        classobj.__plugin_class__ = True
+
+    if class_map_dict is not None:
+        class_map_dict[az_class_name] = classobj
+    return classobj
+
+# The two class maps we provide by default.
+STANDARD_CLASS_MAP = {}
+DOPAL_CLASS_MAP = {}
+
+# These methods are common on all DOPAL variants of classes we create.
+class DopalObjectMixin:
+
+    # Used by repr.
+    def short_description(self):
+        try:
+            result = self._short_description()
+        except MissingRemoteAttributeError:
+            result = None
+
+        if result is None:
+            return ''
+        return result
+
+    # Used by str.
+    def full_description(self):
+        try:
+            result = self._full_description()
+        except MissingRemoteAttributeError:
+            result = None
+
+        if result is None:
+            return ''
+        return result
+
+    def _short_description(self):
+        return None
+
+    def _full_description(self):
+        return self.short_description()
+
+    def __str__(self): # DopalObjectMixin
+        '''
+        Generates a string representation of this object - the value of this
+        string will be the result returned by the L{__unicode__} method.
+
+        Note - this method should return a string which is appropriate for
+        the system's encoding (so C{UnicodeEncodeError}s should not occur), but
+        it makes no guarantee I{how} it will do this.
+
+        As of DOPAL 0.60, it encodes the string using the default system
+        encoding, using 'replace' as the default way to handle encoding
+        problems.
+        '''
+        # What should be the default behaviour?
+        #
+        # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/466341
+        #
+        #   1) Use encoding - "raw_unicode_escape".
+        #   2) Use error handler - "replace" (current).
+        #   3) Use error handler - "ignore".
+        return unicode(self).encode(sys.getdefaultencoding(), 'replace')
+
+    def __unicode__(self):
+        '''
+        Generates a text representation of this object. If the
+        L{full_description} returns a useful representation, then the string
+        will have this format::
+           RemoteTypeName: FullDescriptionString
+
+        Otherwise, it will resort to the superclass string representation.
+
+        Example::
+           Download: The Subways - Staring at the Sun.mp3 [Stopped, 100.0%]
+        '''
+
+        nice_name = self.full_description()
+
+        if nice_name:
+            result = "%s: %s" % (self.get_remote_type(), nice_name)
+        else:
+            result = AzureusObject.__str__(self)
+
+        try:
+            return unicode(result)
+
+        # Python 2.2 doesn't define UnicodeDecodeError, we have to use
+        # UnicodeError.
+        except UnicodeError, error:
+            # string_escape only defined in Python 2.3.
+            if sys.version_info >= (2, 3):
+                return unicode(result.encode('string_escape'))
+            else:
+                return unicode(AzureusObject.__str__(self))
+        #
+
+    def __repr__(self):
+        nice_name = self.short_description()
+
+        repr_string = AzureusObject.__repr__(self)
+        if nice_name:
+            if repr_string[-1:] == ">":
+                repr_string = repr_string[:-1] + \
+                    ', for "%s">' % nice_name
+        return repr_string
+
+class DopalObjectStatsMixin(DopalObjectMixin):
+
+    def _short_description(self):
+        return "S:%s P:%s" % (self.seed_count, self.non_seed_count)
+
+    def _full_description(self):
+        return "Seeds: %s, Peers: %s" % (self.seed_count, self.non_seed_count)
+
+# Some classes which are basically stat counts share these methods.
+
+# Now we create the classes - the standard variants first, then the DOPAL
+# enhanced variants afterwards.
+#
+# The DOPAL variants are only automatically generated if we haven't defined
+# them manually. We only define them manually if we have methods we want
+# to define on them.
+for az_class in _cdefs._class_map.values():
+    _make_class(AzureusObject, az_class, 'Azureus', STANDARD_CLASS_MAP)
+del az_class
+
+#
+#
+# We've now created all the classes we wanted. We now define extra methods
+# on particular classes we care about.
+#
+#
+
+# Now we declare DOPAL variants of these classes - these classes will end up
+# providing a richer API than just the standard plugin classes.
+class DopalPluginConfig(DopalObjectMixin, AzureusPluginConfig):
+
+    def get_upload_speed_limit(self):
+        return self.getIntParameter(self.CORE_PARAM_INT_MAX_UPLOAD_SPEED_KBYTES_PER_SEC, 0)
+
+    def get_download_speed_limit(self):
+        return self.getIntParameter(self.CORE_PARAM_INT_MAX_DOWNLOAD_SPEED_KBYTES_PER_SEC, 0)
+
+    def set_upload_speed_limit(self, limit):
+        if limit is None:
+            limit = 0
+        self.setIntParameter(self.CORE_PARAM_INT_MAX_UPLOAD_SPEED_KBYTES_PER_SEC, limit)
+
+    def set_download_speed_limit(self, limit):
+        if limit is None:
+            limit = 0
+        self.setIntParameter(self.CORE_PARAM_INT_MAX_DOWNLOAD_SPEED_KBYTES_PER_SEC, limit)
+
+class DopalDownload(DopalObjectMixin, AzureusDownload):
+
+    def _short_description(self):
+        return self.torrent.short_description()
+
+    def _full_description(self):
+        result = self.short_description()
+        if not result:
+            return result
+
+        result += " " + self.stats.full_description()
+        return result
+
+class DopalDownloadStats(DopalObjectMixin, AzureusDownloadStats):
+
+    def _full_description(self):
+        return "[%s, %.1f%%]" % (self.status, float(self.completed) / 10)
+
+class DopalDiskManagerFileInfo(DopalObjectMixin, AzureusDiskManagerFileInfo):
+
+    def _full_description(self):
+        filename = self.short_description()
+        if not filename:
+            return None
+
+        if self.is_skipped:
+            return filename + " [skipped]"
+        elif self.is_priority:
+            return filename + " [high]"
+        else:
+            return filename + " [normal]"
+
+    def _short_description(self):
+        import os.path
+        return os.path.basename(self.file)
+
+class DopalLoggerChannel(DopalObjectMixin, AzureusLoggerChannel):
+
+    def _full_description(self):
+        result = self.name
+        if not self.enabled:
+            result += " [disabled]"
+        return result
+
+    def _short_description(self):
+        return self.name
+
+class DopalPeer(DopalObjectMixin, AzureusPeer):
+
+    def _full_description(self):
+        return "%s:%s" % (self.ip, self.port)
+
+    def _short_description(self):
+        return self.ip
+
+class DopalPluginInterface(DopalObjectMixin, AzureusPluginInterface):
+
+    def _full_description(self):
+        return self.plugin_name
+
+    def _short_description(self):
+        return self.plugin_id
+
+class DopalTorrent(DopalObjectMixin, AzureusTorrent):
+
+    def _short_description(self):
+        return self.name
+
+# Let's define the rest of the DOPAL classes.
+for az_class in [AzureusDownloadAnnounceResult, AzureusDownloadScrapeResult]:
+    _make_class(DopalObjectStatsMixin, az_class, 'Dopal', DOPAL_CLASS_MAP)
+for az_class in STANDARD_CLASS_MAP.values():
+    _make_class(DopalObjectMixin, az_class, 'Dopal', DOPAL_CLASS_MAP)
+del az_class
+
+
+# Bugfix for tf-b4rt: don't try to use/change __doc__ if it's
+# empty, which is the case if Python was invoked with -OO
+# (except for early Python 2.5 releases where -OO is broken:
+# http://mail.python.org/pipermail/python-bugs-list/2007-June/038590.html).
+if __doc__ is not None:
+
+    # Amend the docstring to contain all the object types defined.
+    doc_string_sub_dict = {}
+    for class_map_dict, dict_entry in [
+        (STANDARD_CLASS_MAP, 'standard_classes'),
+        (DOPAL_CLASS_MAP, 'dopal_classes'),
+    ]:
+        cls = None
+        classes_in_map = [cls.__name__ for cls in class_map_dict.values()]
+        classes_in_map.sort()
+        doc_string_sub_dict[dict_entry] = ', '.join(classes_in_map)
+        del classes_in_map, cls
+
+    __doc__ = __doc__ % doc_string_sub_dict
+    del doc_string_sub_dict
+
+del __epydoc_mode
+
+STANDARD_CLASS_MAP[None] = DOPAL_CLASS_MAP[None] = TypelessRemoteObject

+ 565 - 0
html/bin/clients/fluazu/dopal/objects.py

@@ -0,0 +1,565 @@
+# File: objects.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+Defines the object layer framework.
+'''
+
+from dopal.core import ExtendedAzureusConnection
+from dopal.errors import AzMethodError, InvalidObjectIDError, \
+    RemoteMethodError, StaleObjectReferenceError, ConnectionlessObjectError, \
+    NonRefreshableConnectionlessObjectError, MissingRemoteAttributeError, \
+    NonRefreshableIncompleteObjectError, NonRefreshableObjectError
+import dopal.utils
+
+class AzureusObjectConnection(ExtendedAzureusConnection):
+    '''
+    This connection class generates remote representations of each object available in Azureus.
+
+    @ivar is_persistent_connection: Boolean indicating whether the connection should be persistent. Default is C{False}.
+
+    @ivar converter: Callable object which will be used to convert response data into objects.
+
+    This will usually be a L{RemoteObjectConverter<dopal.convert.RemoteObjectConverter>} instance which will convert the results of method invocations into objects. A value must be assigned for this object to work - no suitable default is provided automatically.
+    '''
+
+    def __init__(self): # AzureusObjectConnection
+        ExtendedAzureusConnection.__init__(self)
+        self.is_persistent_connection = False
+        self.converter = None
+        self.cached_plugin_interface_object = None
+
+    def _on_reconnect(self): # AzureusObjectConnection
+        if not self.is_persistent_connection:
+            self.cached_plugin_interface_object = None
+
+    def get_plugin_interface(self): # AzureusObjectConnection
+        # XXX: Add docstring.
+
+        obj = self.cached_plugin_interface_object
+        if self.cached_plugin_interface_object is not None:
+
+            # Try to verify that it exists.
+            #
+            # Why do we verify the object? Well, we just want to ensure that
+            # the object we return here is valid. It would be valid if we
+            # returned getPluginInterface. If the object needs repairing,
+            # then it is better to do it immediately.
+            #
+            # Why do we not rely on the object just sorting itself out?
+            # Well, the default definitions for extracting the object from the
+            # root will use the plugin interface object as the root.
+            #
+            try:
+                self.verify_objects([self.cached_plugin_interface_object])
+            except NonRefreshableObjectError:
+                # Subclasses of this error will occur if there's a problem
+                # refreshing the object (for whatever reason). Refreshing
+                # it will only occur if the object is not valid.
+                #
+                # If, for whatever reason, our cached plugin interface hasn't
+                # repaired itself, we'll just lose the cached version and get
+                # a new object.
+                #
+                # Why do we not just get a plugin interface object and update
+                # the cached plugin interface object? If the object is
+                # 'broken', or object persistency is not enabled, there's no
+                # reason to repair it - we wouldn't normally do that for any
+                # other object.
+                #
+                # But it is important we return a valid object.
+                self.cached_plugin_interface_object = None
+
+        if self.cached_plugin_interface_object is None:
+            self.cached_plugin_interface_object = self.getPluginInterface()
+
+        return self.cached_plugin_interface_object
+
+    def getPluginInterface(self): # AzureusObjectConnection
+        return self.invoke_object_method(None, 'getSingleton', (), 'PluginInterface')
+
+    # Invoke the remote method - nothing regarding object persistency is
+    # handled here.
+    def _invoke_object_method(self, az_object, method_name, method_args, result_type=None): # AzureusObjectConnection
+        if az_object is None:
+            az_object_id = None
+        else:
+            az_object_id = az_object.get_object_id()
+
+        # We don't need to extract the object ID's for objects which are in
+        # method_args - they will be an instance of one of the wrapper type
+        # classes, which will be appropriate enough to pull out the correct
+        # type to use.
+        response = self.invoke_remote_method(az_object_id, method_name, method_args)
+
+        result = self.converter(response.response_data, result_type=result_type)
+        return result
+
+    def invoke_object_method(self, az_object, method_name, method_args, result_type=None): # AzureusObjectConnection
+
+        objects = [obj for obj in method_args if isinstance(obj, RemoteObject)]
+        if az_object is not None:
+            objects.insert(0, az_object)
+
+        self.verify_objects(objects)
+
+        try:
+            return self._invoke_object_method(az_object, method_name, method_args, result_type)
+        except InvalidObjectIDError, error:
+            # XXX: TODO, this exception is likely to be one of two subclasses
+            # You don't need to call is_connection_valid, since you'll know
+            # from the subclasses error. It's an unnecessary method call - fix
+            # it!
+            if not self.is_persistent_connection:
+                raise
+            if self.is_connection_valid():
+                raise
+            self.establish_connection()
+            self.verify_objects(objects)
+
+            # Two very quick failures of this type is unlikely to happen -
+            # it is more likely to be a logic error in this case, so we
+            # don't retry if it fails again.
+            return self._invoke_object_method(az_object, method_name, method_args, result_type)
+
+    def verify_objects(self, objects): # AzureusObjectConnection
+        # I did write this as a list expression initially, but I guess we
+        # should keep it readable.
+        has_refreshed_objects = False
+        for obj in objects:
+            if obj.__connection_id__ != self.connection_id:
+                if self.is_persistent_connection:
+                    obj._refresh_object(self)
+                    has_refreshed_objects = True
+                else:
+                    raise StaleObjectReferenceError, obj
+        return has_refreshed_objects
+
+class RemoteObject(object):
+
+    def __init__(self, connection, object_id, attributes=None): # RemoteObject
+
+        if connection is None:
+            self.__connection_id__ = None
+        elif isinstance(connection, AzureusObjectConnection):
+            self.__connection_id__ = connection.connection_id
+        else:
+            err = "connection must be instance of AzureusObjectConnection: %s"
+            raise ValueError, err % connection
+
+        self.__connection__ = connection
+        self.__object_id__ = object_id
+
+        if attributes is not None:
+            self.update_remote_data(attributes)
+
+    def __repr__(self): # RemoteObject
+        txt = "<%s object at 0x%08X" % (self.__class__.__name__, id(self))
+
+        # "sid" stands for short ID.
+        sid = self.get_short_object_id()
+        if sid is not None:
+            txt += ", sid=%s" % sid
+        return txt + ">"
+
+    def __str__(self): # RemoteObject
+        sid = self.get_short_object_id()
+        if sid is None:
+            return RemoteObject.__repr__(self)
+        else:
+            return "<%s, sid=%s>" % (self.__class__.__name__, sid)
+
+    def get_short_object_id(self):
+        if self.__object_id__ is None:
+            return None
+
+        return dopal.utils.make_short_object_id(self.__object_id__)
+
+    def get_object_id(self): # RemoteObject
+        return self.__object_id__
+
+    def get_remote_type(self): # RemoteObject
+        if not hasattr(self, 'get_xml_type'):
+            return None
+        return self.get_xml_type()
+
+    def get_remote_attributes(self): # RemoteObject
+        result = {}
+        result['__connection__'] = self.__connection__
+        result['__connection_id__'] = self.__connection_id__
+        result['__object_id__'] = self.__object_id__
+        return result
+
+    # set_remote_attribute and update_remote_data are very closely
+    # linked - the former sets one attribute at a time while the
+    # other sets multiple attributes together. It is recommended
+    # that set_remote_attribute is not overridden, but
+    # update_remote_data is instead. If you choose to override
+    # set_remote_attribute, you should override update_remote_data
+    # to use set_remote_attribute.
+    def set_remote_attribute(self, name, value): # RemoteObject
+        return self.update_remote_data({name: value})
+
+    def update_remote_data(self, attribute_data): # RemoteObject
+        for key, value in attribute_data.items():
+            setattr(self, key, value)
+
+    def get_connection(self): # RemoteObject
+        if self.__connection__ is None:
+            raise ConnectionlessObjectError, self
+        return self.__connection__
+
+    # Exits quietly if the current connection is valid.
+    #
+    # If it is invalid, then this object's _refresh_object method will be
+    # called instead to retrieve a new object ID (if applicable), but only
+    # if this object's connection is a persistent one. If not, it will raise
+    # a StaleObjectReferenceError.
+    def verify_connection(self): # RemoteObject
+        return self.get_connection().verify_objects([self])
+
+    def refresh_object(self): # RemoteObject
+        '''
+        Updates the remote attributes on this object.
+
+        @raise NonRefreshableConnectionlessObjectError: If the object is not
+               attached to a connection.
+        @return: None
+        '''
+        try:
+            if not self.verify_connection():
+                self._refresh_object(self.__connection__)
+        except ConnectionlessObjectError:
+            raise NonRefreshableConnectionlessObjectError, self
+
+    def _refresh_object(self, connection_to_use): # RemoteObject
+        '''
+        Internal method which refreshes the attributes on the object.
+
+        This method actually performs two different functionalities.
+
+        If the connection to use is the same as the one already attached,
+        with the same connection ID, then a refresh will take place.
+
+        If the connection is either a different connection, or the connection
+        ID is different, then an attempt will be made to retrieve the
+        equivalent object to update the attributes.
+
+        @param connection_to_use: The connection object to update with.
+        @type connection_to_use: L{AzureusObjectConnection}
+        @raise NonRefreshableObjectTypeError: Raised when the object type is
+        not one which can be refreshed on broken connections.
+        @raise NonRefreshableIncompleteObjectError: Raised when the object is
+        missing certain attributes which prevents it being refreshed on broken
+        connections.
+        @return: None
+        '''
+
+        # If the object is still valid, let's use the refresh method.
+        if (self.__connection__ == connection_to_use) and \
+            self.__connection_id__ == connection_to_use.connection_id:
+            new_object = connection_to_use.invoke_object_method(
+                self, '_refresh', (), result_type=self.get_xml_type())
+
+        # The object is no longer valid. Let's grab the equivalent object.
+        else:
+
+            # Special case - if the object is the cached plugin interface
+            # object, then we need to avoid calling get_plugin_interface.
+            #
+            # Why? Because that'll pick up that the object is invalid, and
+            # then attempt to refresh it. Recursive infinite loop.
+            #
+            # So in that case, we just get a plugin interface object
+            # directly.
+            if connection_to_use.cached_plugin_interface_object is self:
+                new_object = connection_to_use.getPluginInterface()
+            else:
+                root = connection_to_use.get_plugin_interface()
+                new_object = self._get_self_from_root_object(root)
+                del root
+
+        # Get the attributes...
+        new_data = new_object.get_remote_attributes()
+
+        # (Make sure that the important attributes are there...)
+        if __debug__:
+            attrs = ['__connection__', '__connection_id__', '__object_id__']
+            for key in attrs:
+                if key not in new_data:
+                    err = "%r.get_remote_attributes() is missing values!"
+                    raise AssertionError, err % self
+            del attrs, key
+
+        # Update the values.
+        self.update_remote_data(new_data)
+
+    # This method is used to locate the remote equivalent object from the
+    # plugin interface object. If the object cannot be retrieved from the
+    # PluginInterface, you should raise a NonRefreshableObjectTypeError
+    # instead (this is the default behaviour).
+    def _get_self_from_root_object(self, plugin_interface): # RemoteObject
+        raise NonRefreshableObjectError, self
+
+    def invoke_object_method(self, method, method_args, result_type=None): # RemoteObject
+        try:
+            return self.get_connection().invoke_object_method(self, method, method_args, result_type=result_type)
+        except RemoteMethodError, error:
+
+            # There's three different ways an error can be generated here:
+            #   1) _handle_invocation_error raises an error - this will have
+            #      the traceback of where it was raised.
+            #   2) _handle_invocation_error returns an error object - this
+            #      will have the traceback of the original exception.
+            #   3) _handle_invocation_error returns None - this will just
+            #      reraise the original error.
+
+            error = self._handle_invocation_error(error, method, method_args)
+            if error is not None:
+                import sys
+                raise error, None, sys.exc_info()[2]
+            raise
+
+    def _handle_invocation_error(self, error, method_name, method_args): # RemoteObject
+        # Default behaviour - just reraise the old error.
+        return None
+
+    # Called by the converter classes to determine the type of a remote
+    # attribute.
+    def _get_type_for_attribute(self, attrib_name, mapping_key=None): # RemoteObject
+        return None
+
+class RemoteConstantsMetaclass(type):
+
+    def __init__(cls, name, bases, cls_dict):
+        super(RemoteConstantsMetaclass, cls).__init__(name, bases, cls_dict)
+        if hasattr(cls, '__az_constants__'):
+            for key, value in cls.__az_constants__.items():
+                setattr(cls, key, value)
+
+# This just used for interrogation purposes - the guts of this function will
+# be used to build other functions (see below).
+#
+# Poor little function - ends up being consumed and tossed aside, like a
+# Hollow devouring a human soul.
+def _invoke_remote_method(self, *args, **kwargs):
+    from dopal.utils import handle_kwargs
+    kwargs = handle_kwargs(kwargs, result_type=None)
+    return self.invoke_object_method(__funcname__, args, **kwargs)
+
+from dopal.utils import MethodFactory
+_methodobj = MethodFactory(_invoke_remote_method)
+make_instance_remote_method = _methodobj.make_instance_method
+make_class_remote_method = _methodobj.make_class_method
+del _methodobj, MethodFactory
+
+from dopal.aztypes import AzureusMethods
+
+class RemoteMethodMetaclass(type):
+
+    def __init__(cls, name, bases, cls_dict):
+        super(RemoteMethodMetaclass, cls).__init__(name, bases, cls_dict)
+
+        az_key = '__az_methods__'
+        if az_key not in cls_dict:
+            methodsobj = AzureusMethods()
+            for base in bases:
+                if hasattr(base, az_key):
+                    methodsobj.update(getattr(base, az_key))
+            setattr(cls, az_key, methodsobj)
+        else:
+            methodsobj = getattr(cls, az_key)
+
+        # Create the real methods based on those in __az_methods__.
+        for method_name in methodsobj.get_method_names():
+            if not hasattr(cls, method_name):
+                _mobj = make_class_remote_method(method_name, cls)
+                setattr(cls, method_name, _mobj)
+
+class RemoteMethodMixin(object):
+    __use_dynamic_methods__ = False
+    __use_type_checking__ = True
+
+    def __getattr__(self, name):
+        # Anything which starts with an underscore is unlikely to be a public
+        # method.
+        if (not name.startswith('_')) and self.__use_dynamic_methods__:
+            return self._get_remote_method_on_demand(name)
+        _superclass = super(RemoteMethodMixin, self)
+
+        # Influenced by code here:
+        #   http://aspn.activestate.com/ASPN/Mail/Message/python-list/1620146
+        #
+        # The problem is that we can't use the super object to get a
+        # __getattr__ method for the appropriate class.
+        self_mro = list(self.__class__.__mro__)
+        for cls in self_mro[self_mro.index(RemoteMethodMixin)+1:]:
+            if hasattr(cls, '__getattr__'):
+                return cls.__getattr__(self, name)
+        else:
+            # Isn't there something I can call to fall back on default
+            # behaviour?
+            text = "'%s' object has no attribute '%s'"
+            raise AttributeError, text % (type(self).__name__, name)
+
+    # Used to create a remote method object on demand.
+    def _get_remote_method_on_demand(self, name):
+        return make_instance_remote_method(name, self)
+
+    def invoke_object_method(self, method, method_args, result_type=None):
+        if self.__use_type_checking__:
+            try:
+                az_methods = self.__az_methods__
+            except AttributeError:
+                if not self.__use_dynamic_methods__:
+                    raise RuntimeError, "%s uses type checking, but has no methods to check against" % type(self).__name__
+            else:
+                try:
+                    method_args, result_type = \
+                        az_methods.wrap_args(method, method_args)
+                except AzMethodError:
+                    if not self.__use_dynamic_methods__:
+                        raise
+
+        return super(RemoteMethodMixin, self).invoke_object_method(method, method_args, result_type=result_type)
+
+
+class RemoteAttributeMetaclass(type):
+
+    # XXX: What the hell is this meant to do!?
+    def __init__(cls, name, bases, cls_dict):
+        deft_names = '__default_remote_attribute_names__'
+        az_attrs = '__az_attributes__'
+
+        attr_dict = cls_dict.setdefault(deft_names, {})
+        attr_dict.update(cls_dict.get(az_attrs, {}))
+
+        for base in bases:
+            attr_dict.update(getattr(base, deft_names, {}))
+            attr_dict.update(getattr(base, az_attrs, {}))
+
+        setattr(cls, deft_names, attr_dict)
+        super(RemoteAttributeMetaclass, cls).__init__(name, bases, cls_dict)
+
+class RemoteAttributesMixin(object):
+    __default_remote_attribute_names__ = {}
+    __reset_attributes_on_refresh__ = False
+    __protect_remote_attributes__ = True
+
+    def __init__(self, *args, **kwargs):
+        # Class attribute becomes instance attribute.
+        super(RemoteAttributesMixin, self).__init__(*args, **kwargs)
+        self.__remote_attribute_names__ = self.__default_remote_attribute_names__.copy()
+
+    def __getattr__(self, name):
+        if name in self.__remote_attribute_names__:
+            raise MissingRemoteAttributeError, name
+
+        # Influenced by code here:
+        #   http://aspn.activestate.com/ASPN/Mail/Message/python-list/1620146
+        self_mro = list(self.__class__.__mro__)
+        for cls in self_mro[self_mro.index(RemoteAttributesMixin)+1:]:
+            if hasattr(cls, '__getattr__'):
+                return cls.__getattr__(self, name)
+        else:
+            # Isn't there something I can call to fall back on default
+            # behaviour?
+            text = "'%s' object has no attribute '%s'"
+            raise AttributeError, text % (type(self).__name__, name)
+
+
+    def __setattr__(self, name, value):
+        if self.__protect_remote_attributes__ and not name.startswith('__'):
+            if name in self.__remote_attribute_names__:
+                err = "cannot set remote attribute directly: %s"
+                raise AttributeError, err % name
+        return super(RemoteAttributesMixin, self).__setattr__(name, value)
+
+    def set_remote_attribute(self, name, value):
+        if name not in self.__remote_attribute_names__:
+            self.__remote_attribute_names__[name] = None
+        return super(RemoteAttributesMixin, self).__setattr__(name, value)
+
+    def get_remote_attributes(self):
+        result = super(RemoteAttributesMixin, self).get_remote_attributes()
+        for attribute in self.__remote_attribute_names__:
+            if hasattr(self, attribute):
+                result[attribute] = getattr(self, attribute)
+        return result
+
+    def is_remote_attribute(self, name):
+        return name in self.__remote_attribute_names__
+
+    def update_remote_data(self, remote_attribute_dict):
+        if self.__reset_attributes_on_refresh__:
+            for attrib in self.__remote_attribute_names__:
+                try:
+                    delattr(self, attrib)
+                except AttributeError:
+                    pass
+
+        _super = super(RemoteAttributesMixin, self)
+
+        # XXX: Do a better fix than this!
+        pra_value = self.__protect_remote_attributes__
+        self.__protect_remote_attributes__ = False
+        try:
+            return _super.update_remote_data(remote_attribute_dict)
+        finally:
+            self.__protect_remote_attributes__ = pra_value
+
+    def _get_type_for_attribute(self, name, mapping_key=None):
+        if mapping_key is not None:
+            key_to_use = name + ',' + mapping_key
+        else:
+            key_to_use = name
+        result = self.__remote_attribute_names__.get(key_to_use)
+        if result is not None:
+            return result
+        else:
+            import dopal
+            if dopal.__dopal_mode__ == 1:
+                raise RuntimeError, (self, key_to_use)
+        _superfunc = super(RemoteAttributesMixin, self)._get_type_for_attribute
+        return _superfunc(name, mapping_key)
+
+class AzureusObjectMetaclass(RemoteConstantsMetaclass, RemoteMethodMetaclass, RemoteAttributeMetaclass):
+    pass
+
+class AzureusObject(RemoteAttributesMixin, RemoteMethodMixin, RemoteObject):
+    __metaclass__ = AzureusObjectMetaclass
+
+    def _get_self_from_root_object(self, plugin_interface):
+        # XXX: Err, this is a bit incorrect - it should be get_remote_type.
+        # But it will do for now. Need to think more carefully about
+        # the responsibilities of the two methods.
+        if hasattr(self, 'get_xml_type'):
+            from dopal.persistency import get_equivalent_object_from_root
+            return get_equivalent_object_from_root(self, plugin_interface)
+        return super(AzureusObject, self)._get_self_from_root_object(plugin_interface)
+
+class TypelessRemoteObject(RemoteAttributesMixin, RemoteMethodMixin, RemoteObject):
+    __use_dynamic_methods__ = True
+
+TYPELESS_CLASS_MAP = {None: TypelessRemoteObject}
+
+# XXX: Define converter here?
+# Add type checking code (though this proably should be core)
+# Add some code to read data from statistics file (what level should this be at?)
+## Allow some code to make link_error_handler assignable
+# Converter - needs to have some default behaviours (easily changeable):
+#   a) Atoms - what to do if no type is suggested. (not so important this one)
+#   b) Objects - what to do if no class is given (if no id is given?)

+ 60 - 0
html/bin/clients/fluazu/dopal/persistency.py

@@ -0,0 +1,60 @@
+# File: persistency.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+'''
+Support module containing code which supports the persistency functionality offered by DOPAL.
+'''
+_refresh_methods = {
+
+    # Simple ones.
+    'PluginConfig': lambda pi, obj: \
+        pi.getPluginconfig(),
+    'DownloadManager': lambda pi, obj: \
+        pi.getDownloadManager(),
+    'IPFilter': lambda pi, obj: \
+        pi.getIPFilter(),
+    'ShortCuts': lambda pi, obj: \
+        pi.getShortCuts(),
+    'TorrentManager': lambda pi, obj: \
+        pi.getTorrentManager(),
+    'PluginInterface': lambda pi, obj: \
+        pi,
+
+    # Not so simple ones.
+    'Download': lambda pi, obj: \
+        pi.getShortCuts().getDownload(obj.torrent.hash),
+    'Torrent': lambda pi, obj: \
+        pi.getShortCuts().getDownload(obj.hash).torrent,
+}
+
+# XXX: Test and document.
+def get_equivalent_object_from_root(original_object, plugin_interface):
+
+    import dopal.objects
+    if not isinstance(original_object, dopal.objects.RemoteObject):
+        raise ValueError, "%s is not a RemoteObject" % (original_object,)
+
+    from dopal.errors import NonRefreshableObjectTypeError, \
+        MissingRemoteAttributeError, NonRefreshableIncompleteObjectError
+    remote_type = original_object.get_remote_type()
+    try:
+        refresh_function = _refresh_methods[remote_type]
+    except KeyError:
+        raise NonRefreshableObjectTypeError(obj=original_object)
+
+    try:
+        return refresh_function(plugin_interface, original_object)
+    except MissingRemoteAttributeError, error:
+        raise NonRefreshableIncompleteObjectError(obj=original_object, error=error)

+ 1410 - 0
html/bin/clients/fluazu/dopal/scripting.py

@@ -0,0 +1,1410 @@
+# File: scripting.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+This module is designed to provide an 'environment' that allows small scripts
+to be written without having to deal with the setting up and exception handling
+that you would normally have to deal with.
+
+It also tries to make it straight-forward and distribute scripts without
+requiring any modification by another user to get it working on their system
+(most common change would be to personalise the script to work with a user's
+particular connection setup).
+
+This module provides simple functionality for scripts - data persistency, error
+handling and logging - it even provides a mechanism for sending alerts to the
+user to be displayed in Azureus (via "Mr Slidey").
+
+There are two main functions provided here:
+  - C{L{ext_run}} - which provides all the main functionality; and
+  - C{L{run}} - which calls ext_run with the default settings, but allows these
+          arguments to be modified through command line arguments.
+
+The following features are provided by this module:
+
+  - B{Automatic connection setup} - Default connection settings can be set by
+        running this module (or any script which uses the run method) with the
+        C{--setup-connection} command line argument. This will provide the user
+        with an input prompt to enter connection values, and store it an
+        appropriate directory (see L{determine_configuration_directory}). That
+        data is then used for all scripts using that module.
+
+  - B{Data Persistency} - You are provided with access to methods to save and
+        load a pickleable object - the module keeps the data stored in a unique
+        data directory based on the script's name.
+
+  - B{Logging (local)} - A logging system is initialised for the script to log
+        any messages to - by default, logging to a file in the data directory.
+
+  - B{Logging (remote)} - A LoggerChannel is set up to provide the ability to
+        send log messages to Azureus through it's own logging mechanism. It
+        also provides the ability to send alerts to the user (via Mr Slidey).
+
+  - B{Pause on exit} - The module provides behaviour to pause whenever a script
+        has finished execution, either in all cases, or only if an error has
+        occurred. This makes it quite useful if you have the script setup to
+        run in a window, which closes as soon as the script has terminated.
+
+When writing a script, it should look like this::
+
+    def script_function(env):
+       ... # Do something here.
+
+    if __name__ == '__main__':
+        import dopal.scripting
+        dopal.scripting.run("functionname", script_function)
+
+where "script_function" is the main body of the script (which takes one
+argument, a C{L{ScriptEnvironment}} instance) and "functionname" which is used
+to define the script (in terms of where persistent data is sent), and what the
+script is called when sending alerts to Azureus.
+'''
+
+# Python 2.2 compatibility.
+from __future__ import generators
+
+import os, os.path
+_default_config_dir = None
+
+def determine_configuration_directory(mainname='DOPAL Scripts', subname=None,
+    create_dir=True, preserve_case=False):
+
+    '''
+    Determines an appropriate directory to store application data into.
+
+    This function will look at environmental settings and registry settings to
+    determine an appropriate directory.
+
+    The locations considered are in order:
+      - The user's home, as defined by the C{home} environment setting.
+      - The user's application directory, as determined by the C{win32com} library.
+      - The user's application directory, as determine by the C{_winreg} library.
+      - The user's application directory, as defined by the C{appdata} environment setting.
+      - The user's home, as defined by the C{homepath} environment setting (and if it exists, the C{homedrive} environment setting.
+      - The user's home, as defined by the C{os.path.expanduser} function.
+      - The current working directory.
+
+    (Note: this order may change between releases.)
+
+    If an existing directory can be found, that will be returned. If no
+    existing directory is found, then this function will try to create the
+    directory in the most preferred location (based on the order of
+    preference). If that fails - no existing directory was found and no
+    directory could be created, then an OSError will be raised. If create_dir
+    is False and no existing directory can be found, then the most preferred
+    candidate directory will be returned.
+
+    The main argument taken by this function is mainname. This should be a
+    directory name which is suitable for a Windows application directory
+    (e.g. "DOPAL Scripts"), as opposed to something which
+    resembles more Unix-based conventions (e.g. ".dopal_scripts"). This
+    function will convert the C{mainname} argument into a Unix-style filename
+    automatically in some cases (read below). You can set the C{preserve_case}
+    argument to C{True} if you want to prevent automatic name conversation of
+    this argument to take place.
+
+    The C{subname} argument is the subdirectory which gets created in the
+    main directory. This name will be used literally - no translation of the
+    directory name will occur.
+
+    When this function is considering creating or locating a directory inside
+    a 'home' location, it will use a Unix-style directory name (e.g.
+    ".dopal_scripts"). If it is considering an 'application' directory, it will
+    use a Windows-style directory name (e.g. "DOPAL Scripts"). If it considers
+    a directory it is unable to categorise (like the current working
+    directory), it will use a Windows-style name on Windows systems, or a
+    Unix-style name on all other systems.
+
+    @param mainname: The main directory name to store data in - the default is
+      C{"DOPAL Scripts"}. This value cannot be None.
+    @param subname: The subdirectory to create in the main directory - this may
+      be C{None}.
+    @param create_dir: Boolean value indicating whether we should create the
+      directory if it doesn't already exist (default is C{True}).
+    @param preserve_case: Indicates whether the value given in C{mainname}
+      should be taken literally, or whether name translation can be performed.
+      Default is C{False}.
+    @return: A directory which matches the specification given. This directory
+      is guaranteed to exist, unless this function was called with
+      C{create_dir} being False.
+    @raise OSError: If C{create_dir} is C{True}, and no appropriate directory
+      could be created.
+    '''
+
+    # If we have an application data directory, then we will prefer to use
+    # that. We will actually iterate over all directories that we consider, and
+    # return the first directory we find. If we don't manage that, we'll create
+    # one in the most appropriate directory. We'll also try to stick to some
+    # naming conventions - using a dot-prefix for home directories, using
+    # normal looking names in application data directories.
+    #
+    # Code is based on a mixture of user.py and homedirectory.py from the
+    # pyopengl library.
+
+    # Our preferred behaviour - existance of a home directory, and creating a
+    # .dopal_scripts directory there.
+    if not preserve_case:
+        app_data_name = mainname
+        home_data_name = '.' + mainname.lower().replace(' ', '_')
+
+        import sys
+        if sys.platform == 'win32':
+            unknown_loc_name = app_data_name
+        else:
+            unknown_loc_name = home_data_name
+    else:
+        app_data_name = home_data_name = unknown_loc_name = mainname
+
+    if subname:
+        app_data_name = os.path.join(app_data_name, subname)
+        home_data_name = os.path.join(home_data_name, subname)
+        unknown_loc_name = os.path.join(unknown_loc_name, subname)
+
+    def suggested_location():
+
+        # 1) Test for the home directory.
+        if os.environ.has_key('home'):
+            yield os.environ['home'], home_data_name
+
+        # 2) Test for application data - using win32com library.
+        try:
+            from win32com.shell import shell, shellcon
+            yield shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0), app_data_name
+        except Exception, e:
+            pass
+
+        # 3) Test for application data - using _winreg.
+        try:
+            import _winreg
+            key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
+            path = _winreg.QueryValueEx(key, 'AppData')[0]
+            _winreg.CloseKey(key)
+            yield path, app_data_name
+        except Exception, e:
+            pass
+
+        # 4) Test for application data - using environment settings.
+        if os.environ.has_key('appdata'):
+            yield os.environ['appdata'], app_data_name
+
+        # 5) Test for home directory, using other environment settings.
+        if os.environ.has_key('homepath'):
+            if os.environ.has_key('homedrive'):
+                yield os.path.join(os.environ['homedrive'], os.environ['homepath']), home_data_name
+            else:
+                yield os.environ['homepath'], home_data_name
+
+        # 6) Test for home directory, using expanduser.
+        expanded_path = os.path.expanduser('~')
+        if expanded_path != '~':
+            yield expanded_path, home_data_name
+
+        # 7) Try the current directory then.
+        yield os.getcwd(), unknown_loc_name
+
+    # This will go through each option and choose what directory to choose.
+    # It will keep yielding suggestions until we've decided what we want to
+    # use.
+    suggested_unmade_paths = []
+    for suggested_path, suggested_name in suggested_location():
+        full_suggested_path = os.path.join(suggested_path, suggested_name)
+        if os.path.isdir(full_suggested_path):
+            return full_suggested_path
+        suggested_unmade_paths.append(full_suggested_path)
+
+    # Return the first path we're able to create.
+    for path in suggested_unmade_paths:
+
+        # If we don't want to create a directory, just return the first path
+        # we have dealt with.
+        try:
+            os.makedirs(path)
+        except OSError, e:
+            pass
+        else:
+            # Success!
+            if os.path.isdir(path):
+                return path
+
+    # If we get here, then there's nothing we can do. We gave it our best shot.
+    raise OSError, "unable to create an appropriate directory"
+
+# Lazily-generated attribute stuff for ScriptEnvironment, taken from here:
+#   http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/363602
+class _lazyattr(object):
+    def __init__(self, calculate_function):
+        self._calculate = calculate_function
+
+    def __get__(self, obj, typeobj=None):
+        if obj is None:
+            return self
+        value = self._calculate(obj)
+        setattr(obj, self._calculate.func_name, value)
+        return value
+
+#
+# Methods used for saving and loading data.
+#
+
+class ScriptEnvironment(object):
+
+    '''
+    The ScriptEnvironment class contains values and methods useful for a script
+    to work with.
+
+    @ivar name: The name of the script.
+
+    @ivar filename: The filename (no directory information included) of where
+        persistent data for this object should be stored - default is
+        C{data.dpl}. If you want to set a different filename, this value should
+        be set before any saving or loading of persistent data takes place in
+        the script.
+
+    @ivar connection: The AzureusObjectConnection to work with. The connection
+        should already be in an established state. connection may be None if
+        ext_run is configured that way.
+
+    @ivar logger: The logger instance to log data to. May be C{None}.
+
+    @ivar log_channel: The logger channel object to send messages to. May be
+        C{None}. For convenience the L{alert} method is available on
+        ScriptEnvironment messages.
+
+    @ivar default_repeatable_alerts: Indicates whether alerts are repeatable
+        or not by default. This can be set explicitly on the object, but it
+        can also be overridden when calling the L{alert} method. In most cases,
+        this value will be C{None} when instantiated, and will be automatically
+        determined the first time the L{alert} method is called.
+    '''
+
+    def __init__(self, name, data_file='data.dpl'):
+
+        '''
+        Note - this is a module-B{private} constructor. The method signature
+        for this class may change without notice.
+        '''
+        self.name = name
+        self.filename = data_file
+        self.connection = None
+        self.logger = None
+        self.default_repeatable_alerts = None
+
+    def get_data_dir(self, create_dir=True):
+        try:
+            return self.config_dir
+        except AttributeError:
+            config_dir = determine_configuration_directory(subname=self.name, create_dir=create_dir)
+            if create_dir:
+                self.config_dir = config_dir
+            return config_dir
+
+    def get_data_file_path(self, create_dir=True):
+        return os.path.join(self.get_data_dir(create_dir), self.filename)
+
+    def load_data(self):
+        data_file_path = self.get_data_file_path()
+        if not os.path.exists(data_file_path):
+            return None
+        data_file = file(data_file_path, 'rb')
+        data = data_file.read()
+        data_file.close()
+        return _zunpickle(data)
+
+    def save_data(self, data):
+        data_file_path = self.get_data_file_path()
+        data_file = file(data_file_path, 'wb')
+        data_file.write(_zpickle(data))
+        data_file.close()
+
+    def get_log_file_path(self, create_dir=True):
+        return os.path.join(self.get_data_dir(create_dir), 'log.txt')
+
+    def get_log_config_path(self, create_dir=True):
+        return os.path.join(self.get_data_dir(create_dir), 'logconfig.ini')
+
+    def alert(self, message, alert_type='info', repeatable=None):
+        if self.log_channel is None:
+            return
+
+        # Azureus 2.4.0.0 and onwards have a Hide All button, therefore we
+        # don't mind having the same message popping up.
+        if repeatable is None:
+            if self.default_repeatable_alerts is None:
+                if self.connection is None:
+                    self.default_repeatable_alerts = False
+                else:
+                    self.default_repeatable_alerts = \
+                        self.connection.get_azureus_version() >= (2, 4, 0, 0)
+
+            repeatable = self.default_repeatable_alerts
+
+        alert_code = {
+            'warn': self.log_channel.LT_WARNING,
+            'error': self.log_channel.LT_ERROR,
+        }.get(alert_type, self.log_channel.LT_INFORMATION)
+
+        if repeatable:
+            _log = self.log_channel.logAlertRepeatable
+        else:
+            _log = self.log_channel.logAlert
+
+        import dopal.errors
+        try:
+            _log(alert_code, message)
+        except dopal.errors.DopalError:
+            pass
+
+    def log_channel(self):
+        if hasattr(self, '_log_channel_factory'):
+             return self._log_channel_factory()
+        return None
+
+    log_channel = _lazyattr(log_channel)
+
+def _zunpickle(byte_data):
+    import pickle, zlib
+    return pickle.loads(zlib.decompress(byte_data))
+
+def _zpickle(data_object):
+    import pickle, zlib
+    return zlib.compress(pickle.dumps(data_object))
+
+#
+# Methods for manipulating the default connection data.
+#
+
+def input_connection_data():
+    print
+    print 'Enter the default connection data to be used for scripts.'
+    print
+    save_file = save_connection_data(ask_for_connection_data())
+    print
+    print 'Data saved to', save_file
+
+def ask_for_connection_data():
+    connection_details = {}
+    connection_details['host'] = raw_input('Enter host: ')
+    port_text = raw_input('Enter port (default is 6884): ')
+    if port_text:
+        connection_details['port'] = int(port_text)
+
+    # Username and password.
+    username = raw_input('Enter user name (leave blank if not applicable): ')
+    password = None
+    if username:
+        import getpass
+        connection_details['user'] = username
+        password1 = getpass.getpass('Enter password: ')
+        password2 = getpass.getpass('Confirm password: ')
+        if password1 != password2:
+            raise ValueError, "Password mismatch!"
+        connection_details['password'] = password1
+
+    # Additional information related to the connection.
+    print
+    print 'The following settings are for advanced connection configuration.'
+    print 'Just leave these values blank if you are unsure what to set them to.'
+    print
+    additional_details = {}
+    additional_details['persistent'] = raw_input(
+        "Enable connection persistency [type 'no' to disable]: ") != 'no'
+
+    timeout_value = raw_input('Set socket timeout (0 to disable, blank to use script default): ')
+    if timeout_value.strip():
+        additional_details['timeout'] = int(timeout_value.strip())
+
+    return connection_details, additional_details
+
+def save_connection_data(data_dict):
+    ss = ScriptEnvironment(None, 'connection.dpl')
+    ss.save_data(data_dict)
+    return ss.get_data_file_path()
+
+def load_connection_data(error=True):
+    ss = ScriptEnvironment(None, 'connection.dpl')
+    data = ss.load_data()
+    if data is None and error:
+        from dopal.errors import NoDefaultScriptConnectionError
+        raise NoDefaultScriptConnectionError, "No default connection data found - you must run dopal.scripting.input_connection_data(), or if you are running as a script, use the --setup-connection parameter."
+    return data
+
+def get_stored_connection():
+    return _get_connection_from_config(None, None, None, False, False)
+
+def _sys_exit(exitcode, message=''):
+    import sys
+    if message:
+        print >>sys.stderr, message
+    sys.exit(exitcode)
+
+def _press_any_key_to_exit():
+    # We use getpass to swallow input, because we don't want to echo
+    # any nonsense that the user types in.
+    print
+    import getpass
+    getpass.getpass("Press any key to exit...")
+
+def _configure_logging(script_env, setup_logging):
+    try:
+        import logging
+    except ImportError:
+        return False
+
+    if setup_logging is False:
+        import dopal.logutils
+        dopal.logutils.noConfig()
+    elif setup_logging is True:
+        logging.basicConfig()
+    else:
+        log_ini = script_env.get_log_config_path(create_dir=False)
+        if not os.path.exists(log_ini):
+            log_ini = ScriptEnvironment(None).get_log_config_path(create_dir=False)
+        if os.path.exists(log_ini):
+            import logging.config
+            logging.config.fileConfig(log_ini)
+        else:
+            import dopal.logutils
+            dopal.logutils.noConfig()
+
+    return True
+
+def _create_handlers(script_env, log_to_file, log_file, log_to_azureus):
+    try:
+        import logging.handlers
+    except ImportError:
+        return []
+
+    created_handlers = []
+
+    if log_to_file:
+        if log_file is None:
+            log_file = script_env.get_log_path()
+        handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=2000000)
+        created_handlers.append(handler)
+
+    return created_handlers
+
+def _get_remote_logger(script_env, use_own_log_channel):
+    import dopal.errors, types
+
+    try:
+        logger = script_env.connection.getPluginInterface().getLogger()
+        channel_by_name = dict([(channel.getName(), channel) for channel in logger.getChannels()])
+
+        if isinstance(use_own_log_channel, types.StringTypes):
+            log_channel_name = use_own_log_channel
+        elif use_own_log_channel:
+            log_channel_name = name
+        else:
+            log_channel_name = 'DOPAL Scripts'
+
+        # Reuse an existing channel, or create a new one.
+        if log_channel_name in channel_by_name:
+            return channel_by_name[log_channel_name]
+        else:
+            return logger.getChannel(log_channel_name)
+    except dopal.errors.DopalError, e:
+
+        # Not too sure about this at the moment. It's probably better to
+        # provide some way to let errors escape.
+        import dopal
+        if dopal.__dopal_mode__ == 1:
+            raise
+        return None
+
+def _get_connection_from_config(script_env, connection, timeout, establish_connection, silent_on_connection_error):
+
+    import dopal.errors
+
+    if script_env is None:
+        logger = None
+    else:
+        logger = script_env.logger
+
+    extended_settings = {}
+    if connection is None:
+        if logger:
+            logger.debug("No connection explicitly defined, attempting to load DOPAL scripting default settings.")
+        connection_details, extended_settings = load_connection_data()
+        if logger:
+            logger.debug("Connection settings loaded, about to create connection.")
+
+        import dopal.main
+        connection = dopal.main.make_connection(**connection_details)
+        if logger:
+            logger.debug("Connection created. Processing advanced settings...")
+
+    if timeout is not None:
+        timeout_to_use = timeout
+    elif extended_settings.has_key('timeout'):
+        timeout_to_use = extended_settings['timeout']
+    else:
+        timeout_to_use = None
+
+    if timeout_to_use is not None:
+
+        # This is how we distinguish between not giving a value, and turning
+        # timeouts off - 0 means don't use timeouts, and None means "don't do
+        # anything".
+        if timeout_to_use == 0:
+            timeout_to_use = None
+
+        if logger:
+            logger.debug("Setting timeout to %s." % timeout_to_use)
+        import socket
+        try:
+            socket.setdefaulttimeout(timeout_to_use)
+        except AttributeError: # Not Python 2.2
+            pass
+
+        connection.is_persistent_connection = extended_settings.get('persistent', True)
+
+    if not establish_connection:
+        return connection
+
+    if logger:
+        logger.debug("About to establish connection to %s." % connection.get_cgi_path(auth_details=True))
+
+    try:
+        connection.establish_connection()
+    except dopal.errors.LinkError:
+        if silent_on_connection_error:
+            if logger:
+                logger.info("Failed to establish connection.", exc_info=1)
+            return None
+        else:
+            if logger:
+                logger.exception("Failed to establish connection.")
+            raise
+    else:
+        if logger:
+            logger.debug("Connection established.")
+        return connection
+
+
+def ext_run(name, function,
+
+    # Connection related.
+    connection=None, make_connection=True,
+
+    # Connection setup.
+    timeout=15,
+
+    # Remote logging related.
+    use_repeatable_remote_notification=None, use_own_log_channel=False,
+    remote_notify_on_run=False, remote_notify_on_error=True,
+
+    # Local logging related.
+    logger=None, setup_logging=None, log_to_file=False, log_level=None,
+    log_file=None,
+
+    # Exit behaviour.
+    silent_on_connection_error=False, pause_on_exit=0, print_error_on_pause=1):
+
+    '''
+    Prepares a L{ScriptEnvironment} object based on the settings here, and
+    executes the passed function.
+
+    You may alternatively want to use the L{run} function if you don't wish
+    to determine the environment settings to run in, and would prefer the
+    settings to be controlled through arguments on the command line.
+
+    @note: If passing additional arguments, you must use named arguments,
+    and not rely on the position of the arguments, as these arguments may
+    be moved or even completely removed in later releases.
+
+    @param name: The I{name} of the script - used for storing data, log files
+       and so on.
+
+    @param function: The callable object to invoke. Must take one argument,
+       which will be the L{ScriptEnvironment} instance.
+
+    @param connection: The
+       L{AzureusObjectConnection<dopal.objects.AzureusObjectConnection>} object
+       to use - if C{None} is provided, one will be automatically determined
+       for you.
+
+    @param make_connection: Determines whether the C{scripting} module
+       should attempt to create a connection based on the default connection
+       details or not. Only has an effect if the C{connection} parameter is
+       C{None}.
+
+    @param timeout: Defines how long socket operations should wait before
+       timing out for (in seconds). Specify C{0} to disable timeouts, the
+       default is C{15}. Specifying C{None} will resort to using the default
+       timeout value specified in the connection details.
+
+    @param use_repeatable_remote_notification: Determines whether the
+       L{alert<ScriptEnvironment.alert>} method should use repeatable
+       notification by default or not (see L{ScriptEnvironment.alert}).
+
+    @param use_own_log_channel: Determines what log channel to use. The default
+       behaviour is to use a log channel called "C{DOPAL Scripts}". Passing a
+       string value will result in logging output being sent to a channel with
+       the given name. Passing C{True} will result in a channel being used
+       which has the same name as the script.
+
+    @param remote_notify_on_run: Determines whether to send
+       L{alert<ScriptEnvironment.alert>} calls when the script starts and ends.
+       Normally, this is only desired when testing that the script is working.
+
+    @param remote_notify_on_error: Determines whether to send an alert to the
+      Azureus connection if an error has occurred during the script's
+      execution.
+
+    @param logger: The C{logging.Logger} instance to log to - the root logger
+      will be used by default. Will be C{None} if the C{logging} module is not
+      available on the system.
+
+    @param setup_logging: Determines whether automatically set up logging with
+      the C{logging.Logger} module. If C{True}, C{logging.basicConfig} will be
+      called. If C{False}, L{dopal.logutils.noConfig} will be called. If
+      C{None} (default), then this module will look for file named C{log.ini},
+      firstly in the script's data directory and then in the global DOPAL
+      scripts directory. If such a file can be found, then
+      C{logging.fileConfig} will be invoked, otherwise
+      L{dopal.logutils.noConfig} will be called instead.
+
+    @param log_to_file: If C{True}, then a C{RotatingFileHandler} will log to a
+      file in the script's data directory.
+
+    @param log_level: The logging level assigned to any logger or handlers
+      I{created} by this function.
+
+    @param log_file: If C{log_to_file} is C{True}, this parameter
+      specifies determines which file to log to (default is that the script
+      will determine a path automatically).
+
+    @param silent_on_connection_error: If C{True}, this function will silently
+      exit if a connection cannot be established with the stored connection
+      object. Otherwise, the original error will be raised.
+
+    @param pause_on_exit: If set to C{0} (default), then after execution of the
+      script has occurred, the function will immediately return. If C{1}, the
+      script will wait for keyboard input before terminating. If C{2}, the
+      script will wait for keyboard input only if an error has occurred.
+
+    @param print_error_on_pause: If C{pause_on_exit} is enabled, this flag
+      determines whether any traceback should be printed. If C{0}, no
+      traceback will be printed. If C{1} (default), any error which occurs
+      inside this function will be printed. If C{2}, only tracebacks which have
+      occurred in the script will be printed. If C{3}, only tracebacks which
+      have occurred outside of the script's invocation will be printed.
+
+    @raises ScriptFunctionError: Any exception which occurs in the
+      function passed in will be wrapped in this exception.
+    '''
+
+    from dopal.errors import raise_as, ScriptFunctionError
+
+    try:
+
+        # This will be eventually become a parameter on this method in a later
+        # version of DOPAL, so I'll declare the variable here and program the
+        # code with it in mind.
+        log_to_azureus = False
+
+        # All data for the script will be stored here.
+        script_env = ScriptEnvironment(name)
+
+        # First step, initialise the logging environment.
+        #
+        # We do this if we have not been passed a logger object.
+        if logger is None:
+
+            # We don't call this method if we have been specifically
+            # asked to construct handlers from these function arguments.
+            #
+            # (Currently, that's just "log_to_file" that we want to check.)
+            if log_to_file:
+                logging_configured_by_us = False
+
+            # We want to log to Azureus, but we can't set that up yet, because
+            # we don't have a connection set up (probably). Adding a logging
+            # handler is the last thing we do before invoking the script, because
+            # we don't want to log any scripting initialisation messages here
+            # remotely (we only want to log what the script wants to log).
+            elif log_to_azureus:
+                logging_configured_by_us = _configure_logging(script_env, False)
+
+            # Configure using the setup_logging flag.
+            else:
+                logging_configured_by_us = _configure_logging(script_env, setup_logging)
+
+            if logging_configured_by_us:
+                import logging
+                logger = logging.getLogger()
+
+            if log_level is not None:
+                logger.setLevel(log_level)
+
+        else:
+            logging_configured_by_us = False
+
+        script_env.logger = logger
+
+        set_levels_on_handlers = \
+            (log_level is not None) and (not logging_configured_by_us)
+
+        del logging_configured_by_us
+
+        # Setup all handlers, apart from any remote handlers...
+        for handler in _create_handlers(script_env, log_to_file, log_file, None):
+            if set_levels_on_handlers:
+                handler.setLevel(log_level)
+
+        # Next step, sort out a connection (if we need to).
+        if connection is None and make_connection:
+            connection = _get_connection_from_config(script_env, None, timeout, True, silent_on_connection_error)
+
+            # If connection is None, that means that we failed to establish a
+            # connection, but we don't mind, so just return silently.
+            if connection is None:
+                return
+
+        # Assign connection if we've got one.
+        if connection is not None:
+            script_env.connection = connection
+
+        # Next step, setup a remote channel for us to communicate with Azureus.
+        if connection is not None:
+
+            def make_log_channel():
+                return _get_remote_logger(script_env, use_own_log_channel)
+
+            script_env._log_channel_factory = make_log_channel
+
+        script_env.default_repeatable_alerts = use_repeatable_remote_notification
+
+        # Configure remote handlers at this point.
+        for handler in _create_handlers(script_env, False, None, log_to_azureus):
+            if set_levels_on_handlers:
+                handler.setLevel(log_level)
+
+        if remote_notify_on_run:
+            script_env.alert('About to start script "%s"...' % name, repeatable=True)
+
+        try:
+            function(script_env)
+        except Exception, e:
+            if logger:
+                logger.exception("Error occurred inside script.")
+
+            # Do we want to notify Azureus?
+            if remote_notify_on_error:
+                script_env.alert('An error has occurred while running the script "%s".\nPlease check any related logs - the script\'s data directory is located at:\n  %s'  % (script_env.name, script_env.get_data_dir(create_dir=False)), alert_type='error')
+
+            raise_as(e, ScriptFunctionError)
+
+        if remote_notify_on_run:
+            script_env.alert('Finished running script "%s".' % name, repeatable=True)
+
+    # Error during execution.
+    except:
+
+        if pause_on_exit:
+
+            # Do we want to log the exception?
+            import sys
+            _exc_type, _exc_value, _exc_tb = sys.exc_info()
+            if isinstance(_exc_value, ScriptFunctionError):
+                _print_tb = print_error_on_pause in [1, 2]
+
+                # If we are printing the traceback, we do need to print the
+                # underlying traceback if we have a ScriptFunctionError.
+                _exc_value = _exc_value.error
+                _exc_type  = _exc_value.__class__
+            else:
+                _print_tb = print_error_on_pause in [1, 3]
+
+            if _print_tb:
+                import traceback
+                traceback.print_exception(_exc_type, _exc_value, _exc_tb)
+            _press_any_key_to_exit()
+
+        # Reraise the original error.
+        raise
+
+    # Script finished cleanly, just exit normally.
+    else:
+        if pause_on_exit == 1:
+            _press_any_key_to_exit()
+
+def run(name, function):
+
+    '''
+    Main entry point for script functions to be executed in a preconfigured
+    environment.
+
+    This function wraps up the majority of the functionality offered by
+    L{ext_run}, except it allows it to be configured through command line
+    arguments.
+
+    This function requires the C{logging} and C{optparse} (or C{optik}) modules
+    to be present - if they are not (which is the case for a standard Python
+    2.2 distribution), then a lot of the configurability which is normally
+    provided will not be available.
+
+    You can find all the configuration options that are available by running
+    this function and passing the C{--help} command line option.
+
+    There are several options available which will affect how the script is
+    executed, as well as other options which will do something different other
+    than executing the script (such as configuring the default connection).
+
+    This script can be passed C{None} as the function value - this will force
+    all the command line handling and so on to take place, without requiring
+    a script to be executed. This is useful if you want to know whether
+    calling this function will actually result in your script being executed -
+    for example, you might want to print the text C{"Running script..."}, but
+    only if your script is actually going to executed.
+
+    This function does not return a value - if this method returns cleanly,
+    then it means the script has been executed (without any problems). This
+    function will raise C{SystemExit} instances if it thinks it is appropriate
+    to do so - this is always done if the script actually fails to be executed.
+
+    The exit codes are::
+        0 - Exit generated by optparse (normally when running with C{--help}).
+        2 - Required module is missing.
+        3 - No default connection stored.
+        4 - Error parsing command line arguments.
+        5 - Connection not established.
+       16 - Script not executed (command line options resulted in some other behaviour to occur).
+
+    If an exception occurs inside the script, it will be passed back to the
+    caller of this function, but it will be wrapped in a
+    L{ScriptFunctionError<dopal.errors.ScriptFunctionError>} instance.
+
+    If any exception occurs inside the script, in this function, or in
+    L{ext_run}, it will be passed back to the caller of this function (rather
+    than being suppressed).
+
+    @note: C{sys.excepthook} may be modified by this function to ensure that
+      an exception is only printed once to the user with the most appopriate
+      information.
+    '''
+
+    EXIT_TRACEBACK = 1
+    EXIT_MISSING_MODULE = 2
+    EXIT_NO_CONNECTION_STORED = 3
+    EXIT_OPTION_PARSING = 4
+    EXIT_COULDNT_ESTABLISH_CONNECTION = 5
+    EXIT_SCRIPT_NOT_EXECUTED = 16
+
+    def abort_if_no_connection():
+        if load_connection_data(error=False) is None:
+            _sys_exit(EXIT_NO_CONNECTION_STORED,
+                "No connection data stored, please re-run with --setup-connection.")
+
+    try:
+        from optik import OptionGroup, OptionParser, OptionValueError, TitledHelpFormatter
+    except ImportError:
+        try:
+            from optparse import OptionGroup, OptionParser, OptionValueError, TitledHelpFormatter
+        except ImportError:
+            import sys
+            if len(sys.argv) == 1:
+                abort_if_no_connection()
+                if function is not None:
+                    ext_run(name, function)
+                return
+
+            _module_msg = "Cannot run - you either need to:\n" + \
+            "  - Install Python 2.3 or greater\n" + \
+            "  - the 'optik' module from http://optik.sf.net\n" + \
+            "  - Run with no command line arguments."
+            _sys_exit(EXIT_MISSING_MODULE, _module_msg)
+
+    # Customised help formatter.
+    #
+    # Why do we need one? We don't.
+    # Why do *I* want one? Here's why:
+    #
+    class DOPALCustomHelpFormatter(TitledHelpFormatter):
+
+        #
+        # 1) Choice options which I create will have a metavar containing
+        #    a long string of all the options that can be used. If it's
+        #    bunched together with other options, it doesn't read well, so
+        #    I want an extra space.
+        #
+        def format_option(self, option):
+            if option.choices is not None:
+                prefix = '\n'
+            else:
+                prefix = ''
+
+            return prefix + TitledHelpFormatter.format_option(self, option)
+
+        #
+        # 2) I don't like the all-lower-case "options" header, so we
+        #    capitalise it.
+        #
+        def format_heading(self, heading):
+            if heading == 'options':
+                heading = 'Options'
+            return TitledHelpFormatter.format_heading(self, heading)
+
+        #
+        # 3) I don't like descriptions not being separated out from option
+        #    strings, hence the extra space.
+        #
+        def format_description (self, description):
+            result = TitledHelpFormatter.format_description(self, description)
+            if description[-1] == '\n':
+                result += '\n'
+            return result
+
+    parser = OptionParser(formatter=DOPALCustomHelpFormatter(), usage='%prog [options] [--help]')
+
+    def parser_error(msg):
+        import sys
+        parser.print_usage(sys.stderr)
+        _sys_exit(EXIT_OPTION_PARSING, msg)
+
+    parser.error = parser_error
+
+    # We want to raise a different error code on exit.
+
+    def add_option(optname, options, help_text, group=None):
+
+        options_processing = [opt.lower() for opt in options]
+
+        # This is the rest of the help text we will generate.
+        help_text_additional = ': one of ' + \
+            ', '.join(['"%s"' % option for option in options]) + '.'
+
+        if group is not None:
+            parent = group
+        else:
+            parent = parser
+
+        parent.add_option(
+            '--' + optname,
+            type="choice",
+            metavar='[' + ', '.join(options) + ']',
+            choices=options_processing,
+            dest=optname.replace('-', '_'),
+            help=help_text,# + help_text_additional,
+        )
+
+    logging_group = OptionGroup(parser, "Logging setup options",
+        "These options will configure how logging is setup for the script.")
+    parser.add_option_group(logging_group)
+
+    add_option(
+        'run-mode',
+        ['background', 'command', 'app'],
+        'profile to run script in'
+    )
+
+    add_option(
+        'logging',
+        ['none', 'LOCAL'], # , 'remote', 'FULL'],
+        'details where the script can send log messages to',
+        logging_group,
+    )
+
+    add_option(
+        'loglevel',
+        ['debug', 'info', 'WARN', 'error', 'fatal'],
+        'set the threshold level for logging',
+        logging_group,
+    )
+
+    add_option(
+        'logdest',
+        ['FILE', 'stderr'],
+        'set the destination for local logging output',
+        logging_group,
+    )
+
+    logging_group.add_option('--logfile', type='string', help='log file to write out to')
+
+    add_option(
+        'needs-connection',
+        ['YES', 'no'],
+        'indicates whether the ability to connect is required, if not, then it causes the script to terminate cleanly',
+    )
+
+    add_option(
+        'announce',
+        ['yes', 'ERROR', 'no'],
+        'indicates whether the user should be alerted via Azureus when the script starts and stops (or just when errors occur)'
+    )
+
+    add_option(
+        'pause-on-exit',
+        ['yes', 'error', 'NO'],
+        'indicates whether the script should pause and wait for keyboard input before terminating'
+    )
+
+    connection_group = OptionGroup(parser, "Connection setup options",
+        "These options are used to set up and test your own personal "
+        "connection settings. Running with any of these options will cause "
+        "the script not to be executed.\n")
+
+    connection_group.add_option('--setup-connection', action="store_true",
+        help="Setup up the default connection data for scripts.")
+
+    connection_group.add_option('--test-connection', action="store_true",
+        help="Test that DOPAL can connect to the connection configured.")
+
+    connection_group.add_option('--delete-connection', action="store_true",
+        help="Removes the stored connection details.")
+
+    script_env_group = OptionGroup(parser, "Script setup options",
+        "These options are used to extract and set information related to "
+        "the environment set up for the script. Running with any of these "
+        "options will cause the script not to be executed.\n")
+
+    script_env_group.add_option('--data-dir-info', action="store_true",
+        help="Prints out where the data directory is for this script.")
+
+    parser.add_option_group(connection_group)
+    parser.add_option_group(script_env_group)
+
+    options, args = parser.parse_args()
+
+    # We don't permit an explicit filename AND a conflicting log destination.
+    if options.logdest not in [None, 'file'] and options.logfile:
+        parser.error("cannot set conflicting --logdest and --logfile values")
+
+    # We don't allow any command line argument which will make us log to file
+    # if local logging isn't enabled.
+    if options.logging not in [None, 'local', 'full'] and \
+        (options.logdest or options.logfile or options.loglevel):
+        parser.error("--logging setting conflicts with other parameters")
+
+    # Want to know where data is kept?
+    if options.data_dir_info:
+        def _process_senv(senv):
+            def _process_senv_file(fpath_func, descr):
+                fpath = fpath_func(create_dir=False)
+                print descr + ':',
+                if not os.path.exists(fpath):
+                   print '(does not exist)',
+                print
+                print '  "%s"' % fpath
+                print
+
+            if senv.name is None:
+                names = [
+                    'Global data directory',
+                    'Global default connection details',
+                    'Global logging configuration file',
+                ]
+            else:
+                names = [
+                    'Script data directory',
+                    'Script data file',
+                    'Script logging configuration file',
+                ]
+
+            _process_senv_file(senv.get_data_dir, names[0])
+            _process_senv_file(senv.get_data_file_path, names[1])
+            _process_senv_file(senv.get_log_config_path, names[2])
+
+        _process_senv(ScriptEnvironment(None, 'connection.dpl'))
+        _process_senv(ScriptEnvironment(name))
+        _sys_exit(EXIT_SCRIPT_NOT_EXECUTED)
+
+    # Delete connection details?
+    if options.delete_connection:
+        conn_path = ScriptEnvironment(None, 'connection.dpl').get_data_file_path(create_dir=False)
+        if not os.path.exists(conn_path):
+            print 'No stored connection data file found.'
+        else:
+            try:
+                os.remove(conn_path)
+            except OSError, error:
+                print 'Unable to delete "%s"...' % conn_path
+                print ' ', error
+            else:
+                print 'Deleted "%s"...' % conn_path
+        _sys_exit(EXIT_SCRIPT_NOT_EXECUTED)
+
+    # Do we need to setup a connection.
+    if options.setup_connection:
+        input_connection_data()
+        _sys_exit(EXIT_SCRIPT_NOT_EXECUTED)
+
+    # Want to test the connection?
+    if options.test_connection:
+        abort_if_no_connection()
+        connection = get_stored_connection()
+
+        print 'Testing connection to', connection.link_data['host'], '...'
+        import dopal.errors
+        try:
+            connection.establish_connection(force=False)
+        except dopal.errors.LinkError, error:
+            print "Unable to establish a connection..."
+            print "   Destination:", connection.get_cgi_path(auth_details=True)
+            print "   Error:", error.to_error_string()
+            _sys_exit(EXIT_SCRIPT_NOT_EXECUTED)
+        else:
+            print "Connection established, examining XML/HTTP plugin settings..."
+
+            # While we're at it, let the user know whether their settings are
+            # too restrictive.
+            #
+            # XXX: We need a subclass of RemoteMethodError representing
+            # Access Denied messages.
+            from dopal.errors import NoSuchMethodError, RemoteMethodError
+
+            # Read-only methods?
+            try:
+                connection.get_plugin_interface().getTorrentManager()
+            except RemoteMethodError:
+                read_only = True
+            else:
+                read_only = False
+
+            # XXX: Some sort of plugin utility module?
+            if read_only:
+                print
+                print 'NOTE: The XML/HTTP plugin appears to be set to read-only - this may restrict'
+                print '      scripts from working properly.'
+                _sys_exit(EXIT_SCRIPT_NOT_EXECUTED)
+
+            # Generic classes became the default immediately after 2.4.0.2.
+            if connection.get_azureus_version() > (2, 4, 0, 2):
+                generic_classes = True
+                generic_classes_capable = True
+            elif connection.get_azureus_version() < (2, 4, 0, 0):
+                generic_classes = False
+                generic_classes_capable = False
+            else:
+                generic_classes_capable = True
+                try:
+                    connection.get_plugin_interface().getLogger()
+                except NoSuchMethodError:
+                    generic_classes = False
+                else:
+                    generic_classes = True
+
+            if not generic_classes:
+                print
+                if generic_classes_capable:
+                    print 'NOTE: The XML/HTTP plugin appears to have the "Use generic classes"'
+                    print '      setting disabled. This may prevent some scripts from running'
+                    print '      properly - please consider enabling this setting.'
+                else:
+                    print 'NOTE: This version of Azureus appears to be older than 2.4.0.0.'
+                    print '      This may prevent some scripts from running properly.'
+                    print '      Please consider upgrading an updated version of Azureus.'
+            else:
+                print 'No problems found with XML/HTTP plugin settings.'
+
+            _sys_exit(EXIT_SCRIPT_NOT_EXECUTED)
+
+    # Is the logging module available?
+    try:
+        import logging
+    except ImportError:
+        logging_available = False
+    else:
+        logging_available = True
+
+    # Now we need to figure out what settings have been defined.
+    #
+    # In level of importance:
+    #   - Option on command line.
+    #   - Default options for chosen profile.
+    #   - Default global settings.
+
+    # Global default settings.
+    settings = {
+        'logging': 'none',
+        'needs_connection': 'yes',
+        'announce': 'error',
+        'pause_on_exit': 'no',
+    }
+
+    # Profile default settings.
+    #
+    # I'll only define those settings which differ from the global defaults.
+    settings.update({
+        'background': {
+            'needs_connection': 'no'
+        },
+        'command': {
+            'logging': 'none',
+            'announce': 'no',
+        },
+        'app': {
+            'logging': 'none',
+            'pause_on_exit': 'error',
+            'announce': 'no',
+        },
+        None: {},
+    }[options.run_mode])
+
+    # Explicitly given settings.
+    for setting_name in settings.keys():
+        if getattr(options, setting_name) is not None:
+            settings[setting_name] = getattr(options, setting_name)
+
+    # Ensure that the user doesn't request logging settings which we can't
+    # support.
+    #
+    # logdest = file or stderr
+    # logfile = blah
+    # logging -> if local, then log to (default) file.
+    if not logging_available and \
+        (options.loglevel is not None or \
+         settings['logging'] != 'none' or \
+         options.logfile or  options.logdest):
+
+        _module_msg = "Cannot run - you either need to:\n" + \
+            "  - Install Python 2.3 or greater\n" + \
+            "  - the 'logging' module from http://www.red-dove.com/python_logging.html\n" + \
+            "  - Run the command again without --loglevel or --logging parameters"
+        _sys_exit(EXIT_MISSING_MODULE, _module_msg)
+
+    # What log level to use?
+    loglevel = None
+    if options.loglevel is not None:
+        loglevel = getattr(logging, options.loglevel.upper())
+
+    # Now we interpret the arguments given and execute ext_run.
+    kwargs = {}
+    kwargs['silent_on_connection_error'] = settings['needs_connection'] == 'no'
+    kwargs['pause_on_exit'] = {'yes': 1, 'no': 0, 'error': 2}[settings['pause_on_exit']]
+
+    kwargs['remote_notify_on_run'] = settings['announce'] == 'yes'
+    kwargs['remote_notify_on_error'] = settings['announce'] in ['yes', 'error']
+
+    # Logging settings.
+    if options.logdest == 'stderr':
+        setup_logging = True
+        logging_to_stderr = True
+    else:
+        setup_logging = None
+        logging_to_stderr = False
+
+    kwargs['setup_logging'] = setup_logging
+    kwargs['log_level'] = loglevel
+    kwargs['log_to_file'] = options.logdest == 'file' or \
+        options.logfile is not None
+    kwargs['log_file'] = options.logfile
+
+    # print_error_on_pause:
+    #   Do we want to print the error? That's a bit tough...
+    #
+    # If we know that we are logging to stderr, then any internal script
+    # error will already be printed, so we won't want to do it in that case.
+    #
+    # If an error has occurred while setting up, we will let it be printed
+    # if we pause on errors, but then we have to suppress it from being
+    # reprinted (through sys.excepthook). Otherwise, we can let sys.excepthook
+    # handle it.
+    #
+    # If we aren't logging to stderr, and an internal script error occurs,
+    # we can do the same thing as we currently do for setting up errors.
+    #
+    # However, if we are logging to stderr, we need to remember that setting
+    # up errors aren't fed through to the logger, so we should print setting
+    # up errors.
+    if logging_to_stderr:
+        # Print only initialisation errors.
+        kwargs['print_error_on_pause'] = 3
+    else:
+        # Print all errors.
+        kwargs['print_error_on_pause'] = 1
+
+    print_traceback_in_ext_run = kwargs['pause_on_exit'] and kwargs['print_error_on_pause']
+    abort_if_no_connection()
+
+    from dopal.errors import LinkError, ScriptFunctionError
+
+    # Execute script.
+    if function is not None:
+        try:
+            ext_run(name, function, **kwargs)
+        except LinkError, error:
+            print "Unable to establish a connection..."
+            print "   Connection:", error.obj
+            print "   Error:", error.to_error_string()
+            _sys_exit(EXIT_SCRIPT_NOT_EXECUTED)
+        except:
+            # Override sys.excepthook here.
+            #
+            # It does two things - firstly, if we know that the traceback
+            # has already been printed to stderr, then we suppress it
+            # being printed again. Secondly, if the exception is a
+            # ScriptFunctionError, it will print the original exception
+            # instead.
+            import sys
+
+            previous_except_hook = sys.excepthook
+            def scripting_except_hook(exc_type, exc_value, exc_tb):
+
+                is_script_function_error = False
+                if isinstance(exc_value, ScriptFunctionError):
+                    exc_value = exc_value.error
+                    exc_type = exc_value.__class__
+                    is_script_function_error = True
+
+                if logging_to_stderr and is_script_function_error:
+                    # Only script function errors will be logged to the
+                    # logger, so we'll only suppress the printing of this
+                    # exception if the exception is a scripting function
+                    # error.
+                    return
+                if print_traceback_in_ext_run:
+                    return
+                previous_except_hook(exc_type, exc_value, exc_tb)
+
+            sys.excepthook = scripting_except_hook
+            raise
+
+    return
+
+if __name__ == '__main__':
+
+    SCRIPT_NAME = 'scripting_main'
+
+    # Verify that the command line arguments are accepted.
+    run(SCRIPT_NAME, None)
+
+    # Set up two scripts, one which should work, and the other which will fail.
+    # We add in some delays, just so things don't happen too quickly.
+    print 'The following code will do 2 things - it will run a script which'
+    print 'will work, and then run a script which will fail. This is for'
+    print 'testing purposes.'
+    print
+
+    def do_something_good(script_env):
+        print "DownloadManager:", script_env.connection.get_plugin_interface().getDownloadManager()
+
+    def do_something_bad(script_env):
+        print "UploadManager:", script_env.connection.get_plugin_interface().getUploadManager()
+
+    print 'Running good script...'
+    run(SCRIPT_NAME, do_something_good)
+    print
+
+    print 'Finished running good script, waiting for 4 seconds...'
+    import time
+    time.sleep(4)
+    print
+
+    print 'Running bad script...'
+    run(SCRIPT_NAME, do_something_bad)
+    print

+ 186 - 0
html/bin/clients/fluazu/dopal/utils.py

@@ -0,0 +1,186 @@
+# File: utils.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+General utility functions.
+'''
+
+__pychecker__ = 'unusednames=metaclass_,attr_,no-unreachable'
+
+# Converts a binary string character into a hexidecimal equivalent.
+#
+# >>> byte_to_hex_form('N')
+# u'4E'
+def byte_to_hex_form(byte):
+    # ord is used to convert the byte into it's numeric equivalent.
+    # hex will turn the number into a string like "0x5" or "0xa3"
+    # the slicing will chop off the "0x" prefix.
+    # zfill will change any single digit values to have a leading 0 character.
+    # upper will force all the A-F characters to be uppercase.
+    return unicode(hex(ord(byte))[2:].zfill(2).upper())
+
+# Converts a binary string into a hexidecimal equivalent.
+#
+# >>> string_to_hex_form('xK3-')
+# u'784B332D'
+def string_to_hex_form(chars):
+    if not chars:
+        raise ValueError, "cannot convert empty string"
+    return ''.join([byte_to_hex_form(char) for char in chars])
+
+# Converts a 2 character hexidecimal string into a binary character.
+#
+# >>> hex_form_to_byte(u'4E')
+# 'L'
+def hex_pair_to_byte(hex_pair):
+    return chr(int(hex_pair.encode('utf-8'), 16))
+
+# Converts a hexidecimal string (a string containing only characters valid
+# in use for displaying a hexidecimal number, i.e. 0123456789ABCDEF) into
+# a binary string.
+#
+# >>> hex_string_to_binary(u'784B332D')
+# 'xK3-'
+def hex_string_to_binary(hex_string):
+    if len(hex_string) % 2:
+        raise ValueError, "string given has odd-number of characters, must be even"
+    if not hex_string:
+        raise ValueError, "cannot convert empty string"
+    return ''.join([hex_pair_to_byte(hex_string[i:i+2]) for i in range(0, len(hex_string), 2)])
+
+def make_short_object_id(object_id):
+    # Due to the way Azureus generates object ID's (it tends to be a
+    # long integer which is then incremented for each object it
+    # generates), we don't bother rendering the entire object ID
+    # as it would be too long (it can be as long as 20 characters).
+    #
+    # So instead, we only use the last 6 digits - turning the ID into
+    # hexidecimal. This gives us a range of 16**6 = 16.7 million. So
+    # Azureus would have to generate more than 16 million objects
+    # before it generates an ID which has the same short ID form as
+    # another object.
+    #
+    # We show the short ID as a way of easily seeing whether two
+    # objects represent the same remote object or not.
+    hex_id = hex(object_id)
+
+    if hex_id[-1] == 'L':
+        hex_short_id = hex_id[-7:-1]
+    else:
+        hex_short_id = hex_id[-6:]
+
+    return hex_short_id
+
+def parse_azureus_version_string(ver_string):
+    ver_bits = ver_string.split('_', 2)
+    if len(ver_bits) == 1:
+        major_ver, minor_ver = ver_string, None
+    else:
+        major_ver, minor_ver = ver_bits
+
+    ver_segments = [int(bit) for bit in major_ver.split('.')]
+    if minor_ver:
+        if minor_ver[0].lower() == 'b':
+            ver_segments.append('b')
+            try:
+                beta_ver = int(minor_ver[1:])
+            except ValueError:
+                pass
+            else:
+                ver_segments.append(beta_ver)
+
+    return tuple(ver_segments)
+
+#
+# I love this code. :)
+#
+# I might turn it into something more generic, and use it elsewhere..
+#
+# Would be nicer if there was a better API for doing this, but given the amount
+# of hackery that I'm doing right now, I won't complain. :)
+#
+# What a lot of effort just to act as if these methods were defined in the
+# class itself.
+import new
+class MethodFactory(object):
+
+    def __init__(self, method_object):
+        _codeobj = method_object.func_code
+        code_arguments = [
+            _codeobj.co_argcount, _codeobj.co_nlocals, _codeobj.co_stacksize,
+            _codeobj.co_flags, _codeobj.co_code, _codeobj.co_consts,
+            _codeobj.co_names, _codeobj.co_varnames, _codeobj.co_filename,
+            _codeobj.co_name, _codeobj.co_firstlineno, _codeobj.co_lnotab,
+        ]
+        self.code_arguments = code_arguments
+
+    def _build_function(self, name):
+        code_args = self.code_arguments[:]
+        code_args[9] = name
+        # code_args[8] = <modulename>
+        codeobj = new.code(*code_args)
+        return new.function(codeobj, {'__funcname__': name, '__builtins__': __builtins__}, name)
+
+    def make_instance_method(self, name, instanceobj):
+        method = self._build_function(name)
+        return new.instancemethod(method, instanceobj, type(instanceobj))
+
+    def make_class_method(self, name, classobj):
+        method = self._build_function(name)
+        return new.instancemethod(method, None, classobj)
+
+def _not_implemented(self, *args, **kwargs):
+    class_name = self.__class__.__name__
+    funcname = __funcname__
+    raise NotImplementedError, "%(class_name)s.%(funcname)s" % locals()
+
+not_implemented_factory = MethodFactory(_not_implemented)
+make_not_implemented_class_method = not_implemented_factory.make_class_method
+del _not_implemented, not_implemented_factory
+
+def handle_kwargs(kwargs, *required, **optional):
+    result = {}
+    result.update(optional)
+
+    required_args = dict([(x, None) for x in required])
+
+    for kwarg_key, kwarg_value in kwargs.iteritems():
+        if optional.has_key(kwarg_key):
+            pass
+        else:
+            try:
+                required_args.pop(kwarg_key)
+            except KeyError:
+                raise TypeError, "unexpected keyword argument: %r" % kwarg_key
+
+        result[kwarg_key] = kwarg_value
+
+    if required_args:
+        missing_key = required_args.popitem()[0]
+        raise TypeError, "missing keyword argument: %r" % missing_key
+
+    return result
+
+class Sentinel(object):
+
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return str(self.value)
+
+    def __repr__(self):
+        return '<sentinel object (%r) at 0x%08X>' % (self.value, id(self))

+ 207 - 0
html/bin/clients/fluazu/dopal/xmlutils.py

@@ -0,0 +1,207 @@
+# File: xmlutils.py
+# Library: DOPAL - DO Python Azureus Library
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details ( see the COPYING file ).
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+'''
+XML utility functions.
+'''
+
+# Given an object which has the same interface as xml.dom.Node:
+#   a) Join all concurrent text nodes together.
+#   b) Strip all trailing and leading whitespace from each text node.
+#
+# This function will recursively process the tree structure given in the node
+# object. No value will be returned by this function, instead the given object
+# will be modified.
+def normalise_xml_structure(xml_node):
+
+    # Concurrent text nodes should be joined together.
+    xml_node.normalize()
+
+    # Strip all text nodes which are empty of content (whitespace is not
+    # content).
+    from xml.dom import Node
+    nodes_to_delete = []
+
+    for node in xml_node.childNodes:
+        if node.nodeType == Node.TEXT_NODE:
+            stripped_text = node.nodeValue.strip()
+            if stripped_text:
+                node.nodeValue = stripped_text
+            else:
+                nodes_to_delete.append(node)
+        else:
+            normalise_xml_structure(node)
+
+    for node in nodes_to_delete:
+        xml_node.removeChild(node)
+        node.unlink()
+
+def get_text_content(node):
+    from xml.dom import Node
+
+    # Text content is stored directly in this node.
+    if node.nodeType == Node.TEXT_NODE:
+        return node.nodeValue
+
+    # Otherwise, must be in a child node.
+    #elif len(node.childNodes) == 1 and \
+    #    node.firstChild.nodeType == Node.TEXT_NODE:
+    #    return node.firstChild.nodeValue
+
+    # Sometimes happens for attributes with no real value.
+    elif len(node.childNodes) == 0:
+        return ''
+
+    text_node = None
+    err_text = None
+    for child in node.childNodes:
+        if child.nodeType == Node.TEXT_NODE:
+            if text_node is None:
+                text_node = child
+            else:
+                err_text = "contained multiple text nodes"
+                break
+    else:
+        if text_node is None:
+            if len(node.childNodes) != 1:
+                err_text = "contained multiple nodes, but none were text"
+            else:
+                err_text = "did not contain a character string as its value"
+        else:
+            return text_node.nodeValue
+
+    raise ValueError, ("the node %s " % node.nodeName) + err_text
+
+from xml.sax.saxutils import quoteattr, escape
+
+# This base class will be removed when XMLObject is removed.
+class _XMLObjectBase(object):
+
+    def __init__(self, tag_name):
+        self.tag_name = tag_name
+        self.attributes = {}
+        self.contents = []
+
+    def add_attribute(self, attribute_name, attribute_value):
+        self.attributes[attribute_name] = attribute_value
+
+    def add_content(self, content):
+        self.contents.append(content)
+
+    def to_string(self, out=None, indent=0):
+        if out is None:
+            # We use StringIO instead of cStringIO not to lose unicode strings.
+            import StringIO
+            out = StringIO.StringIO()
+            return_as_string = True
+        else:
+            return_as_string = False
+
+        indent_string = ' ' * indent
+        out.write(indent_string)
+        out.write('<')
+        out.write(self.tag_name)
+        for attr_name, attr_value in self.attributes.items():
+            out.write(' ')
+            out.write(attr_name)
+            out.write('=')
+            out.write(quoteattr(attr_value))
+
+        # If we have no contents, we'll close the tag here.
+        if not self.contents:
+            out.write(' />\n')
+
+        else:
+            out.write('>')
+
+        # If we have one piece of content, which is just a string, then
+        # we'll put it on the same line as the opening tag is on.
+        if len(self.contents) == 1 and not hasattr(self.contents[0], 'to_string'):
+            out.write(escape(self.contents[0]))
+
+        # Otherwise, we assume we have some more XML blocks to write out,
+        # so we'll indent them and put them on newlines.
+        elif self.contents:
+            out.write('\n')
+            for content in self.contents:
+                content.to_string(out, indent+2)
+            out.write(indent_string)
+
+        # Write out the closing tag (if we haven't written it already).
+        if self.contents:
+            out.write('</')
+            out.write(self.tag_name)
+            out.write('>\n')
+
+        # If the invocation of this method was not passed a buffer to write
+        # into, then we return the string representation.
+        if return_as_string:
+            return out.getvalue()
+
+        return None
+
+class XMLObject(_XMLObjectBase):
+    '''
+    B{Deprecated:} An object representing a block of XML.
+
+    @attention: B{Deprecated:} This class does not provide any guarantees in
+       the way that byte strings are handled. Use L{UXMLObject} instead.
+    '''
+    def __init__(self, tag_name):
+        from dopal.errors import DopalPendingDeprecationWarning
+
+        import warnings
+        warnings.warn("XMLObject is deprecated - use UXMLObject instead", DopalPendingDeprecationWarning)
+
+        _XMLObjectBase.__init__(self, tag_name)
+
+class UXMLObject(_XMLObjectBase):
+    '''
+    An object representing a block of XML.
+
+    Any string which is added to this block (either through the L{add_content}
+    or L{add_attribute} methods should be a unicode string, rather than a byte
+    string. If it is a byte string, then it must be a string which contains
+    text in the system's default encoding - attempting to add text encoding in
+    other formats is not allowed.
+    '''
+
+    def to_string(self, out=None, indent=0):
+        result = _XMLObjectBase.to_string(self, out, indent)
+        if result is None:
+            return None
+        return unicode(result)
+
+    def encode(self, encoding='UTF-8'):
+        return (('<?xml version="1.0" encoding="%s"?>\n' % encoding) + self.to_string()).encode(encoding)
+
+    def __unicode__(self):
+        return self.to_string()
+
+def make_xml_ref_for_az_object(object_id):
+    '''
+    Creates an XML block which represents a remote object in Azureus with the given object ID.
+
+    @param object_id: The object ID to reference.
+    @type object_id: int / long
+    @return: A L{UXMLObject} instance.
+    '''
+    object_id_block = UXMLObject('_object_id')
+    object_id_block.add_content(str(object_id))
+
+    object_block = UXMLObject('OBJECT')
+    object_block.add_content(object_id_block)
+    return object_block

+ 77 - 0
html/bin/clients/fluazu/fluazu.py

@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+################################################################################
+# $Id: fluazu.py 2548 2007-02-08 14:18:17Z b4rt $
+# $Date: 2007-02-08 08:18:17 -0600 (Thu, 08 Feb 2007) $
+# $Revision: 2548 $
+################################################################################
+#                                                                              #
+# LICENSE                                                                      #
+#                                                                              #
+# This program is free software; you can redistribute it and/or                #
+# modify it under the terms of the GNU General Public License (GPL)            #
+# as published by the Free Software Foundation; either version 2               #
+# of the License, or (at your option) any later version.                       #
+#                                                                              #
+# This program is distributed in the hope that it will be useful,              #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of               #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the                 #
+# GNU General Public License for more details.                                 #
+#                                                                              #
+# To read the license please visit http://www.gnu.org/copyleft/gpl.html        #
+#                                                                              #
+#                                                                              #
+################################################################################
+#                                                                              #
+#  Requirements :                                                              #
+#   * DOPAL                                                                    #
+#     http://dopal.sourceforge.net                                             #
+#   * Azureus with XML/HTTP Plugin                                             #
+#     http://azureus.sourceforge.net                                           #
+#     http://azureus.sourceforge.net/plugin_details.php?plugin=xml_http_if)    #
+#                                                                              #
+################################################################################
+# standard-imports
+import sys
+# fluazu
+from fluazu.FluAzuD import FluAzuD
+################################################################################
+
+""" ------------------------------------------------------------------------ """
+""" main                                                                     """
+""" ------------------------------------------------------------------------ """
+if __name__ == '__main__':
+
+    # version
+    if sys.argv[1:] == ['--version']:
+        from fluazu import __version_str__
+        print __version_str__
+        sys.exit(0)
+
+    # check argv-length
+    if len(sys.argv) < 7:
+        from fluazu import __version_str__
+        print "fluazu %s" % __version_str__
+        print "\nError: missing arguments.\n"
+        print "Usage:"
+        print "fluazu.py path host port secure username password\n"
+        print " path     : flux-path"
+        print " host     : host of azureus-server"
+        print " port     : port of azureus-server (xml/http, default: 6884)"
+        print " secure   : use secure connection to azureus (0/1)"
+        print " username : username to use when connecting to azureus-server"
+        print " password : password to use when connecting to azureus-server\n"
+        sys.exit(0)
+
+    # run daemon
+    daemon = FluAzuD()
+    exitVal = 0
+    try:
+        exitVal = daemon.run(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6])
+    except KeyboardInterrupt:
+        daemon.shutdown()
+        exitVal = 0
+    except Exception, e:
+        print e
+
+    # exit
+    sys.exit(exitVal)

+ 792 - 0
html/bin/clients/fluazu/fluazu/FluAzuD.py

@@ -0,0 +1,792 @@
+################################################################################
+# $Id: FluAzuD.py 2891 2007-04-13 19:04:50Z b4rt $
+# $Date: 2007-04-13 14:04:50 -0500 (Fri, 13 Apr 2007) $
+# $Revision: 2891 $
+################################################################################
+#                                                                              #
+# LICENSE                                                                      #
+#                                                                              #
+# This program is free software; you can redistribute it and/or                #
+# modify it under the terms of the GNU General Public License (GPL)            #
+# as published by the Free Software Foundation; either version 2               #
+# of the License, or (at your option) any later version.                       #
+#                                                                              #
+# This program is distributed in the hope that it will be useful,              #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of               #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the                 #
+# GNU General Public License for more details.                                 #
+#                                                                              #
+# To read the license please visit http://www.gnu.org/copyleft/gpl.html        #
+#                                                                              #
+#                                                                              #
+################################################################################
+# standard-imports
+import sys
+import os
+import time
+# fluazu
+from fluazu.output import printMessage, printError, printException
+from fluazu.Transfer import Transfer
+# dopal
+from dopal.main import make_connection
+from dopal.errors import LinkError
+import dopal.aztypes
+################################################################################
+
+""" ------------------------------------------------------------------------ """
+""" FluAzuD                                                                  """
+""" ------------------------------------------------------------------------ """
+class FluAzuD(object):
+
+    """ class-fields """
+    MAX_RECONNECT_TRIES = 5
+
+    """ -------------------------------------------------------------------- """
+    """ __init__                                                             """
+    """ -------------------------------------------------------------------- """
+    def __init__(self):
+        self.running = 1
+        self.transfers = []
+        self.downloads = {}
+        self.pid = '0'
+
+        # tf-settings
+        self.tf_path = ''
+        self.tf_pathTransfers = ''
+
+        # flu-settings
+        self.flu_path = ''
+        self.flu_pathTransfers = ''
+        self.flu_pathTransfersRun = ''
+        self.flu_pathTransfersDel = ''
+        self.flu_fileCommand = ''
+        self.flu_filePid = ''
+        self.flu_fileStat = ''
+
+        # azu-settings
+        self.azu_host = '127.0.0.1'
+        self.azu_port = 6884
+        self.azu_secure = False
+        self.azu_user = ''
+        self.azu_pass = ''
+        self.azu_version_str = ''
+
+        # dopal
+        self.connection = None
+        self.interface = None
+        self.dm = None
+
+    """ -------------------------------------------------------------------- """
+    """ run                                                                  """
+    """ -------------------------------------------------------------------- """
+    def run(self, path, host, port, secure, username, password):
+        printMessage("fluazu starting up:")
+
+        # set vars
+        self.tf_path = path
+        self.tf_pathTransfers = self.tf_path + '.transfers/'
+        self.flu_path = self.tf_path + '.fluazu/'
+        self.flu_fileCommand = self.flu_path + 'fluazu.cmd'
+        self.flu_filePid = self.flu_path + 'fluazu.pid'
+        self.flu_fileStat = self.flu_path + 'fluazu.stat'
+        self.flu_pathTransfers = self.flu_path + 'cur/'
+        self.flu_pathTransfersRun = self.flu_path + 'run/'
+        self.flu_pathTransfersDel = self.flu_path + 'del/'
+        self.azu_host = host
+        self.azu_port = int(port)
+        if secure == '1':
+            self.azu_secure = True
+        else:
+            self.azu_secure = False
+        self.azu_user = username
+        self.azu_pass = password
+
+        # more vars
+        printMessage("flu-path: %s" % str(self.flu_path))
+        printMessage("azu-host: %s" % str(self.azu_host))
+        printMessage("azu-port: %s" % str(self.azu_port))
+        printMessage("azu-secure: %s" % str(self.azu_secure))
+        if len(self.azu_user) > 0:
+            printMessage("azu-user: %s" % str(self.azu_user))
+            printMessage("azu-pass: %s" % str(self.azu_pass))
+
+        # initialize
+        if not self.initialize():
+            printError("there were problems initializing fluazu, shutting down...")
+            self.shutdown()
+            return 1
+
+        # main
+        return self.main()
+
+    """ -------------------------------------------------------------------- """
+    """ initialize                                                           """
+    """ -------------------------------------------------------------------- """
+    def initialize(self):
+
+        # flu
+
+        # check dirs
+        if not self.checkDirs():
+            printError("Error checking dirs. path: %s" % self.tf_path)
+            return False
+
+        # write pid-file
+        self.pid = (str(os.getpid())).strip()
+        printMessage("writing pid-file %s (%s)" % (self.flu_filePid, self.pid))
+        try:
+            pidFile = open(self.flu_filePid, 'w')
+            pidFile.write(self.pid + "\n")
+            pidFile.flush()
+            pidFile.close()
+        except:
+            printError("Failed to write pid-file %s (%s)" % (self.flu_filePid, self.pid))
+            return False
+
+        # delete command-file if exists
+        if os.path.isfile(self.flu_fileCommand):
+            try:
+                printMessage("removing command-file %s ..." % self.flu_fileCommand)
+                os.remove(self.flu_fileCommand)
+            except:
+                printError("Failed to delete commandfile %s" % self.flu_fileCommand)
+                return False
+
+        # load transfers
+        self.loadTransfers()
+
+        # azu
+        printMessage("connecting to Azureus-Server (%s:%d)..." % (self.azu_host, self.azu_port))
+
+        # set connection details
+        connection_details = {}
+        connection_details['host'] = self.azu_host
+        connection_details['port'] = self.azu_port
+        connection_details['secure'] = self.azu_secure
+        if len(self.azu_user) > 0:
+            connection_details['user'] = self.azu_user
+            connection_details['password'] = self.azu_pass
+
+        # make connection
+        try:
+            self.connection = make_connection(**connection_details)
+            self.connection.is_persistent_connection = True
+            self.interface = self.connection.get_plugin_interface()
+        except:
+            printError("could not connect to Azureus-Server")
+            printException()
+            return False
+
+        # azureus version
+        self.azu_version_str = str(self.connection.get_azureus_version())
+        self.azu_version_str = self.azu_version_str.replace(", ", ".")
+        self.azu_version_str = self.azu_version_str.replace("(", "")
+        self.azu_version_str = self.azu_version_str.replace(")", "")
+        printMessage("connected. Azureus-Version: %s" % self.azu_version_str)
+
+        # download-manager
+        self.dm = self.interface.getDownloadManager()
+        if self.dm is None:
+            printError("Error getting Download-Manager object")
+            return False
+
+        # write stat-file and return
+        return self.writeStatFile()
+
+    """ -------------------------------------------------------------------- """
+    """ shutdown                                                             """
+    """ -------------------------------------------------------------------- """
+    def shutdown(self):
+        printMessage("fluazu shutting down...")
+
+        # delete stat-file if exists
+        if os.path.isfile(self.flu_fileStat):
+            try:
+                printMessage("deleting stat-file %s ..." % self.flu_fileStat)
+                os.remove(self.flu_fileStat)
+            except:
+                printError("Failed to delete stat-file %s " % self.flu_fileStat)
+
+        # delete pid-file if exists
+        if os.path.isfile(self.flu_filePid):
+            try:
+                printMessage("deleting pid-file %s ..." % self.flu_filePid)
+                os.remove(self.flu_filePid)
+            except:
+                printError("Failed to delete pid-file %s " % self.flu_filePid)
+
+    """ -------------------------------------------------------------------- """
+    """ main                                                                 """
+    """ -------------------------------------------------------------------- """
+    def main(self):
+
+        # main-loop
+        while self.running > 0:
+
+            # check if connection still valid, shutdown if it is not
+            if not self.checkAzuConnection():
+                # shutdown
+                self.shutdown()
+                # return
+                return 1
+
+            # update downloads
+            self.updateDownloads()
+
+            # update transfers
+            for transfer in self.transfers:
+                if transfer.name in self.downloads:
+                    # update
+                    transfer.update(self.downloads[transfer.name])
+
+            # inner loop
+            for i in range(4):
+
+                # process daemon command stack
+                if self.processCommandStack():
+                    # shutdown
+                    self.running = 0
+                    break;
+
+                # process transfers command stacks
+                for transfer in self.transfers:
+                    if transfer.isRunning():
+                        if transfer.processCommandStack(self.downloads[transfer.name]):
+                            # update downloads
+                            self.updateDownloads()
+
+                # sleep
+                time.sleep(1)
+
+        # shutdown
+        self.shutdown()
+
+        # return
+        return 0
+
+    """ -------------------------------------------------------------------- """
+    """ reload                                                               """
+    """ -------------------------------------------------------------------- """
+    def reload(self):
+        printMessage("reloading...")
+
+        # delete-requests
+        self.processDeleteRequests()
+
+        # run-requests
+        self.processRunRequests()
+
+        # transfers
+        self.loadTransfers()
+
+    """ -------------------------------------------------------------------- """
+    """ processDeleteRequests                                                """
+    """ -------------------------------------------------------------------- """
+    def processDeleteRequests(self):
+        printMessage("processing delete-requests...")
+
+        # read requests
+        requests = []
+        try:
+            for fileName in os.listdir(self.flu_pathTransfersDel):
+                # add
+                requests.append(fileName)
+                # del file
+                delFile = self.flu_pathTransfersDel + fileName
+                try:
+                    os.remove(delFile)
+                except:
+                    printError("Failed to delete file : %s" % delFile)
+        except:
+            return False
+
+        # process requests
+        if len(requests) > 0:
+            for fileName in requests:
+                printMessage("deleting %s ..." % fileName)
+                # update downloads
+                self.downloads = {}
+                self.updateDownloads()
+                # remove if needed
+                if fileName in self.downloads:
+                    # remove transfer
+                    self.removeTransfer(fileName)
+                # del file
+                delFile = self.flu_pathTransfers + fileName
+                try:
+                    os.remove(delFile)
+                except:
+                    printError("Failed to delete file : %s" % delFile)
+
+        # return
+        return True
+
+    """ -------------------------------------------------------------------- """
+    """ processRunRequests                                                   """
+    """ -------------------------------------------------------------------- """
+    def processRunRequests(self):
+        printMessage("processing run-requests...")
+
+        # read requests
+        requests = []
+        try:
+            for fileName in os.listdir(self.flu_pathTransfersRun):
+                inputFile = self.flu_pathTransfersRun + fileName
+                outputFile = self.flu_pathTransfers + fileName
+                # move file + add to requests
+                try:
+                    # read file to mem
+                    f = open(inputFile, 'r')
+                    data = f.read()
+                    f.close()
+                    # delete
+                    os.remove(inputFile)
+                    # write file
+                    f = open(outputFile, 'w')
+                    f.write(data)
+                    f.flush()
+                    f.close()
+                    # add
+                    requests.append(fileName)
+                except:
+                    printError("Failed to move file : %s" % inputFile)
+        except:
+            return False
+
+        # process requests
+        if len(requests) > 0:
+            try:
+                # update downloads
+                self.downloads = {}
+                self.updateDownloads()
+                for fileName in requests:
+                    # add if needed
+                    if fileName not in self.downloads:
+                        try:
+                            # add
+                            self.addTransfer(fileName)
+                        except:
+                            printError("exception when adding new transfer %s" % fileName)
+                            raise
+                    # downloads
+                    tries = 0
+                    while tries < 5 and fileName not in self.downloads:
+                        #if fileName not in self.downloads:
+                        printMessage("download %s missing, update downloads..." % fileName)
+                        self.updateDownloads()
+                        # sleep + increment
+                        time.sleep(1)
+                        tries += 1
+                    # start transfer
+                    if fileName in self.downloads:
+                        try:
+                            transfer = Transfer(self.tf_pathTransfers, self.flu_pathTransfers, fileName)
+                            transfer.start(self.downloads[fileName])
+                        except:
+                            printError("exception when starting new transfer %s" % fileName)
+                            raise
+                    else:
+                        printError("download %s not in azureus-downloads, cannot start it." % fileName)
+            except:
+                printMessage("exception when processing run-requests:")
+                printException()
+
+        # return
+        return True
+
+    """ -------------------------------------------------------------------- """
+    """ addTransfer                                                          """
+    """ -------------------------------------------------------------------- """
+    def addTransfer(self, tname):
+        printMessage("adding new transfer %s ..." % tname)
+        try:
+            # transfer-object
+            transfer = Transfer(self.tf_pathTransfers, self.flu_pathTransfers, tname)
+
+            # torrent-object
+            torrent = self.interface.getTorrentManager().createFromBEncodedFile(transfer.fileTorrent)
+
+            # file-objects
+            fileSource = dopal.aztypes.wrap_file(transfer.fileTorrent)
+            fileTarget = dopal.aztypes.wrap_file(transfer.tf.savepath)
+
+            # add
+            self.dm.addDownload(torrent, fileSource, fileTarget)
+
+            # return
+            return True
+        except:
+            printMessage("exception when adding transfer:")
+            printException()
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ removeTransfer                                                       """
+    """ -------------------------------------------------------------------- """
+    def removeTransfer(self, tname):
+        printMessage("removing transfer %s ..." % tname)
+        try:
+            self.downloads[tname].remove()
+            return True
+        except:
+            printMessage("exception when removing transfer:")
+            printException()
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ loadTransfers                                                        """
+    """ -------------------------------------------------------------------- """
+    def loadTransfers(self):
+        printMessage("loading transfers...")
+        self.transfers = []
+        try:
+            for fileName in os.listdir(self.flu_pathTransfers):
+                self.transfers.append(Transfer(self.tf_pathTransfers, self.flu_pathTransfers, fileName))
+            return True
+        except:
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ updateDownloads                                                      """
+    """ -------------------------------------------------------------------- """
+    def updateDownloads(self):
+        azu_dls = self.dm.getDownloads()
+        for download in azu_dls:
+            tfile = (os.path.split(str(download.getTorrentFileName())))[1]
+            self.downloads[tfile] = download
+
+    """ -------------------------------------------------------------------- """
+    """ processCommandStack                                                  """
+    """ -------------------------------------------------------------------- """
+    def processCommandStack(self):
+        if os.path.isfile(self.flu_fileCommand):
+
+            # process file
+            printMessage("Processing command-file %s ..." % self.flu_fileCommand)
+            try:
+
+                # read file to mem
+                try:
+                    f = open(self.flu_fileCommand, 'r')
+                    data = f.read()
+                    f.close()
+                except:
+                    printError("Failed to read command-file : %s" % self.flu_fileCommand)
+                    raise
+
+                # delete file
+                try:
+                    os.remove(self.flu_fileCommand)
+                except:
+                    printError("Failed to delete command-file : %s" % self.flu_fileCommand)
+
+                # exec commands
+                if len(data) > 0:
+                    commands = data.split("\n")
+                    if len(commands) > 0:
+                        for command in commands:
+                            if len(command) > 0:
+                                try:
+                                    # exec, early out when reading a quit-command
+                                    if self.execCommand(command):
+                                        return True
+                                except:
+                                    printError("Failed to exec command: %s" % command)
+                    else:
+                        printMessage("No commands found.")
+                else:
+                    printMessage("No commands found.")
+
+            except:
+                printError("Failed to process command-stack : %s" % self.flu_fileCommand)
+        return False
+
+    """ -------------------------------------------------------------------- """
+    """ execCommand                                                          """
+    """ -------------------------------------------------------------------- """
+    def execCommand(self, command):
+
+        # op-code
+        opCode = command[0]
+
+        # q
+        if opCode == 'q':
+            printMessage("command: stop-request, setting shutdown-flag...")
+            return True
+
+        # r
+        elif opCode == 'r':
+            printMessage("command: reload-request, reloading...")
+            self.reload()
+            return False
+
+        # u
+        elif opCode == 'u':
+            if len(command) < 2:
+                printMessage("invalid rate.")
+                return False
+            rateNew = command[1:]
+            printMessage("command: setting upload-rate to %s ..." % rateNew)
+            self.setRateU(int(rateNew))
+            return False
+
+        # d
+        elif opCode == 'd':
+            if len(command) < 2:
+                printMessage("invalid rate.")
+                return False
+            rateNew = command[1:]
+            printMessage("command: setting download-rate to %s ..." % rateNew)
+            self.setRateD(int(rateNew))
+            return False
+
+        # s
+        elif opCode == 's':
+            try:
+                if len(command) < 3:
+                    raise
+                workLoad = command[1:]
+                sets = workLoad.split(":")
+                setKey = sets[0]
+                setVal = sets[1]
+                if len(setKey) < 1 or len(setVal) < 1:
+                    raise
+                printMessage("command: changing setting %s to %s ..." % (setKey, setVal))
+                if self.changeSetting(setKey, setVal):
+                    self.writeStatFile()
+                return False
+            except:
+                printMessage("invalid setting.")
+                return False
+
+        # default
+        else:
+            printMessage("op-code unknown: %s" % opCode)
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ checkDirs                                                            """
+    """ -------------------------------------------------------------------- """
+    def checkDirs(self):
+
+        # tf-paths
+        if not os.path.isdir(self.tf_path):
+            printError("Invalid path-dir: %s" % self.tf_path)
+            return False
+        if not os.path.isdir(self.tf_pathTransfers):
+            printError("Invalid tf-transfers-dir: %s" % self.tf_pathTransfers)
+            return False
+
+        # flu-paths
+        if not os.path.isdir(self.flu_path):
+            try:
+                printMessage("flu-main-path %s does not exist, trying to create ..." % self.flu_path)
+                os.mkdir(self.flu_path, 0700)
+                printMessage("done.")
+            except:
+                printError("Failed to create flu-main-path %s" % self.flu_path)
+                return False
+        if not os.path.isdir(self.flu_pathTransfers):
+            try:
+                printMessage("flu-transfers-path %s does not exist, trying to create ..." % self.flu_pathTransfers)
+                os.mkdir(self.flu_pathTransfers, 0700)
+                printMessage("done.")
+            except:
+                printError("Failed to create flu-main-path %s" % self.flu_pathTransfers)
+                return False
+        if not os.path.isdir(self.flu_pathTransfersRun):
+            try:
+                printMessage("flu-transfers-run-path %s does not exist, trying to create ..." % self.flu_pathTransfersRun)
+                os.mkdir(self.flu_pathTransfersRun, 0700)
+                printMessage("done.")
+            except:
+                printError("Failed to create flu-main-path %s" % self.flu_pathTransfersRun)
+                return False
+        if not os.path.isdir(self.flu_pathTransfersDel):
+            try:
+                printMessage("flu-transfers-del-path %s does not exist, trying to create ..." % self.flu_pathTransfersDel)
+                os.mkdir(self.flu_pathTransfersDel, 0700)
+                printMessage("done.")
+            except:
+                printError("Failed to create flu-main-path %s" % self.flu_pathTransfersDel)
+                return False
+
+        # return
+        return True
+
+    """ -------------------------------------------------------------------- """
+    """ changeSetting                                                        """
+    """ -------------------------------------------------------------------- """
+    def changeSetting(self, key, val):
+        try:
+
+            # get plugin-config
+            config_object = self.interface.getPluginconfig()
+
+            # core-keys
+            coreKeys = { \
+                'CORE_PARAM_INT_MAX_ACTIVE': config_object.CORE_PARAM_INT_MAX_ACTIVE, \
+                'CORE_PARAM_INT_MAX_ACTIVE_SEEDING': config_object.CORE_PARAM_INT_MAX_ACTIVE_SEEDING, \
+                'CORE_PARAM_INT_MAX_CONNECTIONS_GLOBAL': config_object.CORE_PARAM_INT_MAX_CONNECTIONS_GLOBAL, \
+                'CORE_PARAM_INT_MAX_CONNECTIONS_PER_TORRENT': config_object.CORE_PARAM_INT_MAX_CONNECTIONS_PER_TORRENT, \
+                'CORE_PARAM_INT_MAX_DOWNLOAD_SPEED_KBYTES_PER_SEC': config_object.CORE_PARAM_INT_MAX_DOWNLOAD_SPEED_KBYTES_PER_SEC, \
+                'CORE_PARAM_INT_MAX_DOWNLOADS': config_object.CORE_PARAM_INT_MAX_DOWNLOADS, \
+                'CORE_PARAM_INT_MAX_UPLOAD_SPEED_KBYTES_PER_SEC': config_object.CORE_PARAM_INT_MAX_UPLOAD_SPEED_KBYTES_PER_SEC, \
+                'CORE_PARAM_INT_MAX_UPLOAD_SPEED_SEEDING_KBYTES_PER_SEC': config_object.CORE_PARAM_INT_MAX_UPLOAD_SPEED_SEEDING_KBYTES_PER_SEC, \
+                'CORE_PARAM_INT_MAX_UPLOADS': config_object.CORE_PARAM_INT_MAX_UPLOADS, \
+                'CORE_PARAM_INT_MAX_UPLOADS_SEEDING': config_object.CORE_PARAM_INT_MAX_UPLOADS_SEEDING \
+            }
+            if key not in coreKeys:
+                printMessage("settings-key unknown: %s" % key)
+                return False
+
+            # change setting
+            try:
+                config_object.setIntParameter(coreKeys[key], int(val))
+                return True
+            except:
+                printMessage("Failed to change setting %s to %s" % (key, val))
+                printException()
+                return False
+
+        except:
+            printMessage("Failed to get Plugin-Config.")
+            printException()
+        return False
+
+    """ -------------------------------------------------------------------- """
+    """ writeStatFile                                                        """
+    """ -------------------------------------------------------------------- """
+    def writeStatFile(self):
+        try:
+
+            # get plugin-config
+            config_object = self.interface.getPluginconfig()
+
+            # get vars
+            coreVars = [ \
+                config_object.CORE_PARAM_INT_MAX_ACTIVE, \
+                config_object.CORE_PARAM_INT_MAX_ACTIVE_SEEDING, \
+                config_object.CORE_PARAM_INT_MAX_CONNECTIONS_GLOBAL, \
+                config_object.CORE_PARAM_INT_MAX_CONNECTIONS_PER_TORRENT, \
+                config_object.CORE_PARAM_INT_MAX_DOWNLOAD_SPEED_KBYTES_PER_SEC, \
+                config_object.CORE_PARAM_INT_MAX_DOWNLOADS, \
+                config_object.CORE_PARAM_INT_MAX_UPLOAD_SPEED_KBYTES_PER_SEC, \
+                config_object.CORE_PARAM_INT_MAX_UPLOAD_SPEED_SEEDING_KBYTES_PER_SEC, \
+                config_object.CORE_PARAM_INT_MAX_UPLOADS, \
+                config_object.CORE_PARAM_INT_MAX_UPLOADS_SEEDING \
+            ]
+            coreParams = {}
+            for coreVar in coreVars:
+                try:
+                    coreParams[coreVar] = config_object.getIntParameter(coreVar, 0)
+                except:
+                    coreParams[coreVar] = 0
+                    printException()
+
+            # write file
+            try:
+                f = open(self.flu_fileStat, 'w')
+                f.write("%s\n" % self.azu_host)
+                f.write("%d\n" % self.azu_port)
+                f.write("%s\n" % self.azu_version_str)
+                for coreVar in coreVars:
+                    f.write("%d\n" % coreParams[coreVar])
+                f.flush()
+                f.close()
+                return True
+            except:
+                printError("Failed to write statfile %s " % self.flu_fileStat)
+                printException()
+
+        except:
+            printMessage("Failed to get Plugin-Config.")
+            printException()
+        return False
+
+    """ -------------------------------------------------------------------- """
+    """ setRateU                                                             """
+    """ -------------------------------------------------------------------- """
+    def setRateU(self, rate):
+        try:
+            config_object = self.interface.getPluginconfig()
+            config_object.set_upload_speed_limit(rate)
+            return True
+        except:
+            printMessage("Failed to set upload-rate.")
+            printException()
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ setRateD                                                             """
+    """ -------------------------------------------------------------------- """
+    def setRateD(self, rate):
+        try:
+            config_object = self.interface.getPluginconfig()
+            config_object.set_download_speed_limit(rate)
+            return True
+        except:
+            printMessage("Failed to set download-rate.")
+            printException()
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ checkAzuConnection                                                   """
+    """ -------------------------------------------------------------------- """
+    def checkAzuConnection(self):
+
+        # con valid
+        try:
+            if self.connection.is_connection_valid():
+                return True
+            else:
+                raise
+
+        # con not valid
+        except:
+
+            # out
+            printMessage("connection to Azureus-server lost, reconnecting to %s:%d ..." % (self.azu_host, self.azu_port))
+
+            # try to reconnect
+            for i in range(FluAzuD.MAX_RECONNECT_TRIES):
+
+                # sleep
+                time.sleep(i << 2)
+
+                # out
+                printMessage("reconnect-try %d ..." % (i + 1))
+
+                # establish con
+                try:
+                    self.connection.establish_connection(True)
+                    printMessage("established connection to Azureus-server")
+                except:
+                    printError("Error establishing connection to Azureus-server")
+                    printException()
+                    continue
+
+                # interface
+                try:
+                    self.interface = self.connection.get_plugin_interface()
+                except LinkError, error:
+                    printError("Error getting interface object")
+                    printException()
+                    self.interface = None
+                    continue
+
+                # download-manager
+                try:
+                    self.dm = None
+                    self.dm = self.interface.getDownloadManager()
+                    if self.dm is None:
+                        raise
+                    else:
+                        return True
+                except:
+                    printError("Error getting Download-Manager object")
+                    continue
+
+            # seems like azu is down. give up
+            printError("no connection after %d tries, i give up, azu is gone" % FluAzuD.MAX_RECONNECT_TRIES)
+            return False

+ 128 - 0
html/bin/clients/fluazu/fluazu/StatFile.py

@@ -0,0 +1,128 @@
+################################################################################
+# $Id: StatFile.py 2891 2007-04-13 19:04:50Z b4rt $
+# $Date: 2007-04-13 14:04:50 -0500 (Fri, 13 Apr 2007) $
+# $Revision: 2891 $
+################################################################################
+#                                                                              #
+# LICENSE                                                                      #
+#                                                                              #
+# This program is free software; you can redistribute it and/or                #
+# modify it under the terms of the GNU General Public License (GPL)            #
+# as published by the Free Software Foundation; either version 2               #
+# of the License, or (at your option) any later version.                       #
+#                                                                              #
+# This program is distributed in the hope that it will be useful,              #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of               #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the                 #
+# GNU General Public License for more details.                                 #
+#                                                                              #
+# To read the license please visit http://www.gnu.org/copyleft/gpl.html        #
+#                                                                              #
+#                                                                              #
+################################################################################
+# standard-imports
+import os
+# fluazu
+from fluazu.output import printError
+################################################################################
+
+""" ------------------------------------------------------------------------ """
+""" StatFile                                                                 """
+""" ------------------------------------------------------------------------ """
+class StatFile(object):
+
+    """ -------------------------------------------------------------------- """
+    """ __init__                                                             """
+    """ -------------------------------------------------------------------- """
+    def __init__(self, file):
+
+        # file
+        self.file = file
+
+        # stat-fields
+        self.running = 1
+        self.percent_done = "0.0"
+        self.time_left = ""
+        self.down_speed = ""
+        self.up_speed = ""
+        self.sharing = ""
+        self.transferowner = ""
+        self.seeds = ""
+        self.peers = ""
+        self.seedlimit = ""
+        self.uptotal = ""
+        self.downtotal = ""
+        self.size = ""
+
+        # init
+        if self.file is not '':
+            self.initialize(self.file)
+
+    """ -------------------------------------------------------------------- """
+    """ initialize                                                           """
+    """ -------------------------------------------------------------------- """
+    def initialize(self, file):
+
+        # file
+        self.file = file
+
+        # read in stat-file + set fields
+        if os.path.isfile(self.file):
+            try:
+
+                # read file to mem
+                f = open(self.file, 'r')
+                data = f.read()
+                f.close()
+
+                # set fields
+                content = data.split("\n")
+                if len(content) > 12:
+                    self.running = content[0]
+                    self.percent_done = content[1]
+                    self.time_left = content[2]
+                    self.down_speed = content[3]
+                    self.up_speed = content[4]
+                    self.transferowner = content[5]
+                    self.seeds = content[6]
+                    self.peers = content[7]
+                    self.sharing = content[8]
+                    self.seedlimit = content[9]
+                    self.uptotal = content[10]
+                    self.downtotal = content[11]
+                    self.size = content[12]
+                    return True
+                else:
+                    printError("Failed to parse statfile %s " % self.file)
+
+            except:
+                printError("Failed to read statfile %s " % self.file)
+        return False
+
+    """ -------------------------------------------------------------------- """
+    """ write                                                                """
+    """ -------------------------------------------------------------------- """
+    def write(self):
+
+        # write stat-file
+        try:
+            f = open(self.file, 'w')
+            f.write(str(self.running) + '\n')
+            f.write(str(self.percent_done) + '\n')
+            f.write(str(self.time_left) + '\n')
+            f.write(str(self.down_speed) + '\n')
+            f.write(str(self.up_speed) + '\n')
+            f.write(str(self.transferowner) + '\n')
+            f.write(str(self.seeds) + '\n')
+            f.write(str(self.peers) + '\n')
+            f.write(str(self.sharing) + '\n')
+            f.write(str(self.seedlimit) + '\n')
+            f.write(str(self.uptotal) + '\n')
+            f.write(str(self.downtotal) + '\n')
+            f.write(str(self.size))
+            f.flush()
+            f.close()
+            return True
+        except:
+            printError("Failed to write statfile %s " % self.file)
+        return False

+ 605 - 0
html/bin/clients/fluazu/fluazu/Transfer.py

@@ -0,0 +1,605 @@
+################################################################################
+# $Id: Transfer.py 3055 2007-05-26 17:01:17Z b4rt $
+# $Date: 2007-05-26 12:01:17 -0500 (Sat, 26 May 2007) $
+# $Revision: 3055 $
+################################################################################
+#                                                                              #
+# LICENSE                                                                      #
+#                                                                              #
+# This program is free software; you can redistribute it and/or                #
+# modify it under the terms of the GNU General Public License (GPL)            #
+# as published by the Free Software Foundation; either version 2               #
+# of the License, or (at your option) any later version.                       #
+#                                                                              #
+# This program is distributed in the hope that it will be useful,              #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of               #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the                 #
+# GNU General Public License for more details.                                 #
+#                                                                              #
+# To read the license please visit http://www.gnu.org/copyleft/gpl.html        #
+#                                                                              #
+#                                                                              #
+################################################################################
+# standard-imports
+import sys
+import os
+# fluazu
+from fluazu.output import printMessage, printError, getOutput, printException
+from fluazu.TransferFile import TransferFile
+from fluazu.StatFile import StatFile
+################################################################################
+
+""" ------------------------------------------------------------------------ """
+""" Transfer                                                                 """
+""" ------------------------------------------------------------------------ """
+class Transfer(object):
+
+    """ tf states """
+    TF_STOPPED = 0
+    TF_RUNNING = 1
+    TF_NEW = 2
+    TF_QUEUED = 3
+
+    """ azu states """
+    AZ_DOWNLOADING = 4
+    AZ_ERROR = 8
+    AZ_PREPARING = 2
+    AZ_QUEUED = 9
+    AZ_READY = 3
+    AZ_SEEDING = 5
+    AZ_STOPPED = 7
+    AZ_STOPPING = 6
+    AZ_WAITING = 1
+
+    """ azu -> flu map """
+    STATE_MAP = { \
+        AZ_DOWNLOADING: TF_RUNNING, \
+        AZ_ERROR: TF_STOPPED, \
+        AZ_PREPARING: TF_RUNNING, \
+        AZ_QUEUED: TF_RUNNING, \
+        AZ_READY: TF_STOPPED, \
+        AZ_SEEDING: TF_RUNNING, \
+        AZ_STOPPED: TF_STOPPED, \
+        AZ_STOPPING: TF_RUNNING, \
+        AZ_WAITING: TF_STOPPED \
+    }
+
+    """ -------------------------------------------------------------------- """
+    """ __init__                                                             """
+    """ -------------------------------------------------------------------- """
+    def __init__(self, tf_pathTransfers, flu_pathTransfers, file):
+        self.state = Transfer.TF_STOPPED
+        self.state_azu = Transfer.AZ_STOPPED
+        self.tf_pathTransfers = tf_pathTransfers
+        self.flu_pathTransfers = flu_pathTransfers
+        self.name = file
+        self.fileTorrent = self.tf_pathTransfers + file
+        self.fileMeta = self.flu_pathTransfers + file
+
+        # file-vars
+        self.fileStat = self.fileTorrent + ".stat"
+        self.fileCommand = self.fileTorrent + ".cmd"
+        self.fileLog = self.fileTorrent + ".log"
+        self.filePid = self.fileTorrent + ".pid"
+
+        # meta-file-object
+        self.tf = None
+
+        # stat-object
+        self.sf = None
+
+        # initialize
+        self.initialize()
+
+    """ -------------------------------------------------------------------- """
+    """ initialize                                                           """
+    """ -------------------------------------------------------------------- """
+    def initialize(self):
+
+        # out
+        printMessage("initializing transfer %s ..." % self.name)
+
+        # meta-file
+        printMessage("loading transfer-file %s ..." % self.fileMeta)
+        self.tf = TransferFile(self.fileMeta)
+
+        # stat-file
+        printMessage("loading statfile %s ..." % self.fileStat)
+        self.sf = StatFile(self.fileStat)
+
+        # verbose
+        printMessage("transfer loaded.")
+
+        # return
+        return True
+
+    """ -------------------------------------------------------------------- """
+    """ update                                                               """
+    """ -------------------------------------------------------------------- """
+    def update(self, download):
+
+        # azu-state
+        self.state_azu = download.getState()
+
+        # set state
+        self.state = Transfer.STATE_MAP[self.state_azu]
+
+        # only when running
+        if self.state == Transfer.TF_RUNNING:
+
+            # stat
+            self.statRunning(download)
+
+    """ -------------------------------------------------------------------- """
+    """ start                                                                """
+    """ -------------------------------------------------------------------- """
+    def start(self, download):
+        self.log("starting transfer %s (%s) ..." % (str(self.name), str(self.tf.transferowner)))
+
+        # stat
+        self.statStartup(download)
+
+        # write pid
+        self.writePid()
+
+        # start transfer
+        try:
+            download.restart()
+        except:
+            self.log("exception when starting transfer :")
+            printException()
+
+        # refresh
+        download.refresh_object()
+
+        # set state
+        self.state = Transfer.STATE_MAP[download.getState()]
+
+        # set rates
+        self.setRateU(download, int(self.tf.max_upload_rate))
+        self.setRateD(download, int(self.tf.max_download_rate))
+
+        # log
+        self.log("transfer started.")
+
+        # return
+        return True
+
+    """ -------------------------------------------------------------------- """
+    """ stop                                                                 """
+    """ -------------------------------------------------------------------- """
+    def stop(self, download):
+        self.log("stopping transfer %s (%s) ..." % (str(self.name), str(self.tf.transferowner)))
+
+        # stat
+        self.statShutdown(download)
+
+        # stop transfer
+        retVal = True
+        try:
+            download.stop()
+            retVal = True
+        except:
+            self.log("exception when stopping transfer :")
+            printException()
+            retVal = False
+
+        # delete pid
+        self.deletePid()
+
+        # log
+        self.log("transfer stopped.")
+
+        # states
+        self.state = Transfer.TF_STOPPED
+        self.state_azu = Transfer.AZ_STOPPED
+
+        # return
+        return retVal
+
+    """ -------------------------------------------------------------------- """
+    """ isRunning                                                            """
+    """ -------------------------------------------------------------------- """
+    def isRunning(self):
+        return (self.state == Transfer.TF_RUNNING)
+
+    """ -------------------------------------------------------------------- """
+    """ processCommandStack                                                  """
+    """ -------------------------------------------------------------------- """
+    def processCommandStack(self, download):
+        if os.path.isfile(self.fileCommand):
+
+            # process file
+            self.log("Processing command-file %s ..." % self.fileCommand)
+            try:
+
+                # read file to mem
+                f = open(self.fileCommand, 'r')
+                data = f.read()
+                f.close()
+
+                # delete file
+                try:
+                    os.remove(self.fileCommand)
+                except:
+                    self.log("Failed to delete command-file : %s" % self.fileCommand)
+
+                # exec commands
+                if len(data) > 0:
+                    commands = data.split("\n")
+                    if len(commands) > 0:
+                        for command in commands:
+                            if len(command) > 0:
+                                # stop reading a quit-command
+                                if self.execCommand(download, command):
+                                    # stop it
+                                    self.stop(download)
+                                    # return
+                                    return True
+                    else:
+                        self.log("No commands found.")
+                else:
+                    self.log("No commands found.")
+
+            except:
+                self.log("Failed to read command-file : %s" % self.fileCommand)
+        return False
+
+    """ -------------------------------------------------------------------- """
+    """ execCommand                                                          """
+    """ -------------------------------------------------------------------- """
+    def execCommand(self, download, command):
+
+        opCode = command[0]
+
+        # q
+        if opCode == 'q':
+            self.log("command: stop-request, setting shutdown-flag...")
+            return True
+
+        # u
+        elif opCode == 'u':
+            if len(command) < 2:
+                self.log("invalid rate.")
+                return False
+            rateNew = command[1:]
+            self.log("command: setting upload-rate to %s ..." % rateNew)
+            # set rate
+            if self.setRateU(download, rateNew):
+                # update meta-object
+                self.tf.max_upload_rate = rateNew
+                self.tf.write()
+            # return
+            return False
+
+        # d
+        elif opCode == 'd':
+            if len(command) < 2:
+                self.log("invalid rate.")
+                return False
+            rateNew = command[1:]
+            self.log("command: setting download-rate to %s ..." % rateNew)
+            # set rate
+            if self.setRateD(download, rateNew):
+                # update meta-object
+                self.tf.max_download_rate = rateNew
+                self.tf.write()
+            # return
+            return False
+
+        # r
+        elif opCode == 'r':
+            if len(command) < 2:
+                self.log("invalid runtime-code.")
+                return False
+            runtimeNew = command[1]
+            rt = ''
+            if runtimeNew == '0':
+                rt = 'False'
+            elif runtimeNew == '1':
+                rt = 'True'
+            else:
+                self.log("runtime-code unknown: %s" % runtimeNew)
+                return False
+            self.log("command: setting die-when-done to %s" % rt)
+            # update meta-object
+            self.tf.die_when_done = rt
+            self.tf.write()
+            return False
+
+        # s
+        elif opCode == 's':
+            if len(command) < 2:
+                self.log("invalid sharekill.")
+                return False
+            sharekillNew = command[1:]
+            self.log("command: setting sharekill to %s ..." % sharekillNew)
+            # update meta-object
+            self.tf.sharekill = sharekillNew
+            self.tf.write()
+            return False
+
+        # default
+        else:
+            self.log("op-code unknown: %s" % opCode)
+            return False
+
+        return False
+
+    """ -------------------------------------------------------------------- """
+    """ setRateU                                                             """
+    """ -------------------------------------------------------------------- """
+    def setRateU(self, download, rate):
+        try:
+            download.setUploadRateLimitBytesPerSecond((int(rate) << 10))
+            return True
+        except:
+            printMessage("Failed to set upload-rate.")
+            printException()
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ setRateD                                                             """
+    """ -------------------------------------------------------------------- """
+    def setRateD(self, download, rate):
+        try:
+            download.setMaximumDownloadKBPerSecond(int(rate))
+            return True
+        except:
+            printMessage("Failed to set download-rate.")
+            printException()
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ statStartup                                                          """
+    """ -------------------------------------------------------------------- """
+    def statStartup(self, download):
+        # set some values
+        self.sf.running = Transfer.TF_RUNNING
+        self.sf.percent_done = 0
+        self.sf.time_left = "Starting..."
+        self.sf.down_speed = "0.00 kB/s"
+        self.sf.up_speed = "0.00 kB/s"
+        self.sf.transferowner = self.tf.transferowner
+        self.sf.seeds = ""
+        self.sf.peers = ""
+        self.sf.sharing = ""
+        self.sf.seedlimit = ""
+        self.sf.uptotal = 0
+        self.sf.downtotal = 0
+        try:
+            # get size
+            try:
+                size = str(download.getTorrent().getSize())
+                self.sf.size = size
+            except:
+                printException()
+            # write
+            return self.sf.write()
+        except:
+            printException()
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ statRunning                                                          """
+    """ -------------------------------------------------------------------- """
+    def statRunning(self, download):
+
+        # die-when-done
+        if self.state_azu == Transfer.AZ_SEEDING and self.tf.die_when_done.lower() == 'true':
+            self.log("die-when-done set, setting shutdown-flag...")
+            self.stop(download)
+            return
+
+        # set some values
+        self.sf.running = Transfer.TF_RUNNING
+        try:
+            try:
+
+                # stats
+                if download == None:
+                    return
+                stats = download.getStats()
+                if stats == None:
+                    return
+
+                # die-on-seed-limit
+                if self.state_azu == Transfer.AZ_SEEDING:
+                    sk = float(self.tf.sharekill)
+                    if sk > 0:
+                        try:
+                            shareRatio = (float(stats.getShareRatio())) / 10
+                            if shareRatio >= sk:
+                                self.log("seed-limit %s reached (%s), setting shutdown-flag..." % (self.tf.sharekill, str(shareRatio)))
+                                self.stop(download)
+                                return
+                        except:
+                            printException()
+
+                # completed
+                try:
+                    pctf = (float(stats.getCompleted())) / 10
+                    self.sf.percent_done = str(pctf)
+                except:
+                    printException()
+
+                # time_left
+                try:
+                    self.sf.time_left = str(stats.getETA())
+                except:
+                    self.sf.time_left = '-'
+
+                # down_speed
+                try:
+                    self.sf.down_speed = "%.1f kB/s" % ((float(stats.getDownloadAverage())) / 1024)
+                except:
+                    printException()
+
+                # up_speed
+                try:
+                    self.sf.up_speed = "%.1f kB/s" % ((float(stats.getUploadAverage())) / 1024)
+                except:
+                    printException()
+
+                # uptotal
+                try:
+                    self.sf.uptotal = str(stats.getUploaded())
+                except:
+                    printException()
+
+                # downtotal
+                try:
+                    self.sf.downtotal = str(stats.getDownloaded())
+                except:
+                    printException()
+
+            except:
+                printException()
+
+            # hosts
+            try:
+                ps = download.getPeerManager().getStats()
+                scrape = download.getLastScrapeResult()
+
+                # seeds
+                try:
+                    countS = int(scrape.getSeedCount())
+                    if (countS < 0):
+                        countS = 0
+                    countSC = int(ps.getConnectedSeeds())
+                    if (countSC < 0):
+                        countSC = 0
+                    self.sf.seeds = "%d (%d)" % (countSC, countS)
+                except:
+                    printException()
+
+                # peers
+                try:
+                    countP = int(scrape.getNonSeedCount())
+                    if (countP < 0):
+                        countP = 0
+                    countPC = int(ps.getConnectedLeechers())
+                    if (countPC < 0):
+                        countPC = 0
+                    self.sf.peers = "%d (%d)" % (countPC, countP)
+                except:
+                    printException()
+
+            except:
+                printException()
+
+            # write
+            return self.sf.write()
+
+        except:
+            printException()
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ statShutdown                                                         """
+    """ -------------------------------------------------------------------- """
+    def statShutdown(self, download, error = None):
+
+        # set some values
+        self.sf.running = Transfer.TF_STOPPED
+        self.sf.down_speed = "0.00 kB/s"
+        self.sf.up_speed = "0.00 kB/s"
+        self.sf.transferowner = self.tf.transferowner
+        self.sf.seeds = ""
+        self.sf.peers = ""
+        self.sf.sharing = ""
+        self.sf.seedlimit = ""
+        try:
+
+            # stats
+            try:
+                stats = download.getStats()
+
+                # done
+                if download.isComplete():
+                    self.sf.percent_done = 100
+                    self.sf.time_left = "Download Succeeded!"
+
+                # not done
+                else:
+                    try:
+                        pctf = float(stats.getCompleted())
+                        pctf /= 10
+                        pcts = "-" + str(pctf)
+                        pctf = float(pcts)
+                        pctf -= 100
+                        self.sf.percent_done = str(pctf)
+                    except:
+                        printException()
+                    self.sf.time_left = "Transfer Stopped"
+
+                # uptotal
+                try:
+                    self.sf.uptotal = str(stats.getUploaded())
+                except:
+                    printException()
+
+                # downtotal
+                try:
+                    self.sf.downtotal = str(stats.getDownloaded())
+                except:
+                    printException()
+            except:
+                printException()
+
+            # size
+            try:
+                self.sf.size = str(download.getTorrent().getSize())
+            except:
+                printException()
+
+            # error
+            if error is not None:
+                self.sf.time_left = "Error: %s" % error
+
+            # write
+            return self.sf.write()
+
+        except:
+            printException()
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ writePid                                                             """
+    """ -------------------------------------------------------------------- """
+    def writePid(self):
+        self.log("writing pid-file %s " % self.filePid)
+        try:
+            pidFile = open(self.filePid, 'w')
+            pidFile.write("0\n")
+            pidFile.flush()
+            pidFile.close()
+            return True
+        except Exception, e:
+            self.log("Failed to write pid-file %s" % self.filePid)
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ deletePid                                                            """
+    """ -------------------------------------------------------------------- """
+    def deletePid(self):
+        self.log("deleting pid-file %s " % self.filePid)
+        try:
+            os.remove(self.filePid)
+            return True
+        except Exception, e:
+            self.log("Failed to delete pid-file %s" % self.filePid)
+            return False
+
+    """ -------------------------------------------------------------------- """
+    """ log                                                                  """
+    """ -------------------------------------------------------------------- """
+    def log(self, message):
+        printMessage(message)
+        try:
+            f = open(self.fileLog, "a+")
+            f.write(getOutput(message))
+            f.flush()
+            f.close()
+        except Exception, e:
+            printError("Failed to write log-file %s" % self.fileLog)

+ 125 - 0
html/bin/clients/fluazu/fluazu/TransferFile.py

@@ -0,0 +1,125 @@
+################################################################################
+# $Id: TransferFile.py 2891 2007-04-13 19:04:50Z b4rt $
+# $Date: 2007-04-13 14:04:50 -0500 (Fri, 13 Apr 2007) $
+# $Revision: 2891 $
+################################################################################
+#                                                                              #
+# LICENSE                                                                      #
+#                                                                              #
+# This program is free software; you can redistribute it and/or                #
+# modify it under the terms of the GNU General Public License (GPL)            #
+# as published by the Free Software Foundation; either version 2               #
+# of the License, or (at your option) any later version.                       #
+#                                                                              #
+# This program is distributed in the hope that it will be useful,              #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of               #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the                 #
+# GNU General Public License for more details.                                 #
+#                                                                              #
+# To read the license please visit http://www.gnu.org/copyleft/gpl.html        #
+#                                                                              #
+#                                                                              #
+################################################################################
+# standard-imports
+import os
+# fluazu
+from fluazu.output import printError
+################################################################################
+
+""" ------------------------------------------------------------------------ """
+""" TransferFile                                                             """
+""" ------------------------------------------------------------------------ """
+class TransferFile(object):
+
+    """ -------------------------------------------------------------------- """
+    """ __init__                                                             """
+    """ -------------------------------------------------------------------- """
+    def __init__(self, file):
+
+        # file
+        self.file = file
+
+        # fields
+        self.transferowner = ""
+        self.savepath = ""
+        self.max_upload_rate = ""
+        self.max_download_rate = ""
+        self.max_uploads = ""
+        self.superseeder = ""
+        self.die_when_done = ""
+        self.sharekill = ""
+        self.minport = ""
+        self.maxport = ""
+        self.maxcons = ""
+        self.rerequest = ""
+
+        # init
+        if self.file is not '':
+            self.initialize(self.file)
+
+    """ -------------------------------------------------------------------- """
+    """ initialize                                                           """
+    """ -------------------------------------------------------------------- """
+    def initialize(self, file):
+
+        # file
+        self.file = file
+
+        # read in transfer-file + set fields
+        if os.path.isfile(self.file):
+            try:
+
+                # read file to mem
+                f = open(self.file, 'r')
+                data = f.read()
+                f.close()
+
+                # set fields
+                content = data.split("\n")
+                if len(content) > 11:
+                    self.transferowner = content[0]
+                    self.savepath = content[1]
+                    self.max_upload_rate = content[2]
+                    self.max_download_rate = content[3]
+                    self.max_uploads = content[4]
+                    self.superseeder = content[5]
+                    self.die_when_done = content[6]
+                    self.sharekill = content[7]
+                    self.minport = content[8]
+                    self.maxport = content[9]
+                    self.maxcons = content[10]
+                    self.rerequest = content[11]
+                    return True
+                else:
+                    printError("Failed to parse transfer-file %s " % self.file)
+
+            except:
+                printError("Failed to read transfer-file %s " % self.file)
+        return False
+
+    """ -------------------------------------------------------------------- """
+    """ write                                                                """
+    """ -------------------------------------------------------------------- """
+    def write(self):
+
+        # write transfer-file
+        try:
+            f = open(self.file, 'w')
+            f.write(str(self.transferowner) + '\n')
+            f.write(str(self.savepath) + '\n')
+            f.write(str(self.max_upload_rate) + '\n')
+            f.write(str(self.max_download_rate) + '\n')
+            f.write(str(self.max_uploads) + '\n')
+            f.write(str(self.superseeder) + '\n')
+            f.write(str(self.die_when_done) + '\n')
+            f.write(str(self.sharekill) + '\n')
+            f.write(str(self.minport) + '\n')
+            f.write(str(self.maxport) + '\n')
+            f.write(str(self.maxcons) + '\n')
+            f.write(str(self.rerequest))
+            f.flush()
+            f.close()
+            return True
+        except:
+            printError("Failed to write transfer-file %s " % self.file)
+        return False

+ 26 - 0
html/bin/clients/fluazu/fluazu/__init__.py

@@ -0,0 +1,26 @@
+################################################################################
+# $Id: __init__.py 2891 2007-04-13 19:04:50Z b4rt $
+# $Date: 2007-04-13 14:04:50 -0500 (Fri, 13 Apr 2007) $
+# $Revision: 2891 $
+################################################################################
+#                                                                              #
+# LICENSE                                                                      #
+#                                                                              #
+# This program is free software; you can redistribute it and/or                #
+# modify it under the terms of the GNU General Public License (GPL)            #
+# as published by the Free Software Foundation; either version 2               #
+# of the License, or (at your option) any later version.                       #
+#                                                                              #
+# This program is distributed in the hope that it will be useful,              #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of               #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the                 #
+# GNU General Public License for more details.                                 #
+#                                                                              #
+# To read the license please visit http://www.gnu.org/copyleft/gpl.html        #
+#                                                                              #
+#                                                                              #
+################################################################################
+
+# version
+__version__ = (0, 0, 6)
+__version_str__ = '%s.%s' % (__version__[0], ''.join([str(part) for part in __version__[1:]]))

+ 59 - 0
html/bin/clients/fluazu/fluazu/output.py

@@ -0,0 +1,59 @@
+################################################################################
+# $Id: output.py 2552 2007-02-08 21:40:46Z b4rt $
+# $Date: 2007-02-08 15:40:46 -0600 (Thu, 08 Feb 2007) $
+# $Revision: 2552 $
+################################################################################
+#                                                                              #
+# LICENSE                                                                      #
+#                                                                              #
+# This program is free software; you can redistribute it and/or                #
+# modify it under the terms of the GNU General Public License (GPL)            #
+# as published by the Free Software Foundation; either version 2               #
+# of the License, or (at your option) any later version.                       #
+#                                                                              #
+# This program is distributed in the hope that it will be useful,              #
+# but WITHOUT ANY WARRANTY; without even the implied warranty of               #
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the                 #
+# GNU General Public License for more details.                                 #
+#                                                                              #
+# To read the license please visit http://www.gnu.org/copyleft/gpl.html        #
+#                                                                              #
+#                                                                              #
+################################################################################
+# standard-imports
+import sys
+import time
+################################################################################
+
+""" ------------------------------------------------------------------------ """
+""" getPrefix                                                                """
+""" ------------------------------------------------------------------------ """
+def getPrefix():
+    return time.strftime('[%Y/%m/%d - %H:%M:%S]') + " "
+
+""" ------------------------------------------------------------------------ """
+""" getOutput                                                                """
+""" ------------------------------------------------------------------------ """
+def getOutput(message):
+    return getPrefix() + message + "\n"
+
+""" ------------------------------------------------------------------------ """
+""" printMessage                                                             """
+""" ------------------------------------------------------------------------ """
+def printMessage(message):
+    sys.stdout.write(getOutput(message))
+    sys.stdout.flush()
+
+""" ------------------------------------------------------------------------ """
+""" printError                                                               """
+""" ------------------------------------------------------------------------ """
+def printError(message):
+    sys.stderr.write(getOutput(message))
+    sys.stderr.flush()
+
+""" ------------------------------------------------------------------------ """
+""" printException                                                           """
+""" ------------------------------------------------------------------------ """
+def printException():
+    print getPrefix(), sys.exc_info()
+    sys.stdout.flush()

+ 1356 - 0
html/bin/clients/mainline/BTL/CMap.py

@@ -0,0 +1,1356 @@
+#!/usr/bin/env python
+
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# By David Harrison
+
+# I was playing with doctest when I wrote this.  I still haven't
+# decided how useful doctest is as opposed to implementing unit tests
+# directly.  --Dave
+
+if __name__ == '__main__':
+    import sys
+    sys.path = ['.','..'] + sys.path  # HACK to simplify unit testing.
+
+from BTL.translation import _
+
+class BEGIN:    # represents special BEGIN location before first next.
+    pass
+
+from UserDict import DictMixin
+from cmap_swig import *
+import sys
+from weakref import WeakKeyDictionary
+LEAK_TEST = False
+
+class CMap(object,DictMixin):  
+    """In-order mapping. Provides same operations and behavior as a dict,
+       but provides in-order iteration.  Additionally provides operations to
+       find the nearest key <= or >= a given key.
+
+       This provides a significantly wider set of operations than
+       berkeley db BTrees, but it provides no means for persistence.
+
+       LIMITATION: The key must be a python numeric type, e.g., an integer
+       or a float.  The value can be any python object.
+
+         Operation:       Time                 Applicable
+                          Complexity:          Methods:
+         ---------------------------------------------------
+         Item insertion:  O(log n)             append, __setitem__
+         Item deletion:   O(log n + k)         __delitem__, erase   
+         Key search:      O(log n)             __getitem__, get, find, 
+                                               __contains__
+         Value search:    n/a
+         Iteration step:  amortized O(1),      next, prev
+                          worst-case O(log n)
+         Memory:          O(n)
+
+       n = number of elements in map.  k = number of iterators pointing
+       into map.  CMap assumes there are few iterators in existence at 
+       any given time. 
+       
+       Iterators are not invalidated by insertions.  Iterators are
+       invalidated by deletions only when the key-value pair
+       referenced is deleted.  Deletion has a '+k' because the
+       __delitem__ searches linearly through the set of iterators
+       pointing into this map to find any iterator pointing at the
+       deleted item and then invalidates the iterator.
+
+       This class is backed by the C++ STL map class, but conforms
+       to the Python container interface."""
+
+    class _AbstractIterator:
+        """Iterates over elements in the map in order."""
+
+        def __init__(self, m, si = BEGIN ): # "s.." implies swig object.
+            """Creates an iterator pointing to element si in map m.
+            
+               Do not instantiate directly.  Use iterkeys, itervalues, or
+               iteritems.
+
+               The _AbstractIterator takes ownership of any C++ iterator
+               (i.e., the swig object 'si') and will deallocate it when
+               the iterator is deallocated.
+
+               Examples of typical behavior:
+
+               >>> from CMap import *
+               >>> m = CMap()
+               >>> m[12] = 6
+               >>> m[9] = 4
+               >>> for k in m:
+               ...     print int(k)
+               ...
+               9
+               12
+               >>>
+
+               Example edge cases (empty map):
+
+               >>> from CMap import *
+               >>> m = CMap()
+               >>> try:
+               ...     i = m.__iter__()
+               ...     i.value()
+               ... except IndexError:
+               ...     print 'IndexError.'
+               ...
+               IndexError.
+               >>> try:
+               ...     i.next()
+               ... except StopIteration:
+               ...     print 'stopped'
+               ...
+               stopped
+
+               @param map: CMap.
+               @param node: Node that this iterator will point at.  If None
+                 then the iterator points to end().  If BEGIN
+                 then the iterator points to one before the beginning.
+             """
+            assert isinstance(m, CMap)
+            assert not isinstance(si, CMap._AbstractIterator)
+            if si == None:
+                self._si = map_end(m._smap)
+            else:
+                self._si = si           # C++ iterator wrapped by swig.
+            self._map = m
+            m._iterators[self] = 1      # using map as set of weak references.
+
+        def __hash__(self):
+            return id(self)
+        
+        def __cmp__(self, other):
+            if not self._si or not other._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN and other._si == BEGIN: return 0
+            if self._si == BEGIN and other._si != BEGIN: return -1
+            elif self._si != BEGIN and other._si == BEGIN: return 1
+            return iter_cmp(self._map._smap, self._si, other._si )
+
+        def at_begin(self):
+            """equivalent to self == m.begin() where m is a CMap.
+            
+                 >>> from CMap import CMap
+                 >>> m = CMap()
+                 >>> i = m.begin()
+                 >>> i == m.begin()
+                 True
+                 >>> i.at_begin()
+                 True
+                 >>> i == m.end()   # no elements so begin()==end()
+                 True
+                 >>> i.at_end()
+                 True
+                 >>> m[6] = 'foo'   # insertion does not invalidate iterators.
+                 >>> i = m.begin()
+                 >>> i == m.end()
+                 False
+                 >>> i.value()
+                 'foo'
+                 >>> try:           # test at_begin when not at beginning.
+                 ...    i.next()
+                 ... except StopIteration:
+                 ...    print 'ok'
+                 ok
+                 >>> i.at_begin()
+                 False
+                     
+                 
+            """
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:  # BEGIN is one before begin().  Yuck!!
+                return False
+            return map_iter_at_begin(self._map._smap, self._si)
+        
+        def at_end(self):
+            """equivalent to self == m.end() where m is a CMap, but
+               at_end is faster because it avoids the dynamic memory
+               alloation in m.end().
+
+                 >>> from CMap import CMap
+                 >>> m = CMap()
+                 >>> m[6] = 'foo'
+                 >>> i = m.end()   # test when at end.
+                 >>> i == m.end()
+                 True
+                 >>> i.at_end()
+                 True
+                 >>> int(i.prev())
+                 6
+                 >>> i.at_end()    # testing when not at end.
+                 False
+
+               """
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:
+                return False
+            return map_iter_at_end(self._map._smap, self._si)
+        
+        def key(self):
+            """@return: the key of the key-value pair referenced by this
+                   iterator.
+            """
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:
+                raise IndexError(_("Cannot dereference iterator until after "
+                                 "first call to .next."))
+            elif map_iter_at_end(self._map._smap, self._si):
+                raise IndexError()
+            
+            return iter_key(self._si)
+
+        def value(self):
+            """@return: the value of the key-value pair currently referenced
+                   by this iterator.
+            """
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:
+                raise IndexError(_("Cannot dereference iterator until after "
+                                 "first call to next."))
+            elif map_iter_at_end(self._map._smap, self._si):
+                raise IndexError()
+
+            return iter_value(self._si)
+        
+        def item(self):
+            """@return the key-value pair referenced by this iterator.
+            """
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            return self.key(), self.value()
+
+        def _next(self):
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:
+                self._si = map_begin(self._map._smap)
+
+                if map_iter_at_end(self._map._smap,self._si):
+                    raise StopIteration
+                return
+
+            if map_iter_at_end(self._map._smap,self._si):
+                raise StopIteration
+
+            iter_incr(self._si)
+
+            if map_iter_at_end(self._map._smap,self._si):
+                raise StopIteration
+            
+        def _prev(self):
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:
+                raise StopIteration()
+            
+            elif map_iter_at_begin(self._map._smap, self._si):
+                self._si = BEGIN
+                raise StopIteration
+
+            iter_decr(self._si)
+
+        def __del__(self):
+            # Python note: if a reference to x is intentionally
+            # eliminated using "del x" and there are other references
+            # to x then __del__ does not get called at this time.
+            # Only when the last reference is deleted by an intentional
+            # "del" or when the reference goes out of scope does
+            # the __del__ method get called.
+            self._invalidate()
+            
+        def _invalidate(self):
+            if self._si == None:
+                return
+            try:
+                del self._map._iterators[self]
+            except KeyError:
+                pass  # could've been removed because weak reference,
+                      # and because _invalidate is called from __del__.
+            if self._si != BEGIN:
+                iter_delete(self._si)
+            self._si = None
+
+        def __iter__(self):
+            """If the iterator is itself iteratable then we do things like:
+                >>> from CMap import CMap
+                >>> m = CMap()
+                >>> m[10] = 'foo'
+                >>> m[11] = 'bar'
+                >>> for x in m.itervalues():
+                ...     print x
+                ...
+                foo
+                bar
+                
+            """
+            return self
+
+        def __len__(self):
+            return len(self._map)
+
+    class KeyIterator(_AbstractIterator):
+        def next(self):
+            """Returns the next key in the map.
+            
+               Insertion does not invalidate iterators.  Deletion only
+               invalidates an iterator if the iterator pointed at the
+               key-value pair being deleted.
+               
+               This is implemented by moving the iterator and then
+               dereferencing it.  If we dereferenced and then moved
+               then we would get the odd behavior:
+               
+                 Ex:  I have keys [1,2,3].  The iterator i points at 1.
+                   print i.next()   # prints 1
+                   print i.next()   # prints 2
+                   print i.prev()   # prints 3
+                   print i.prev()   # prints 2
+               
+               However, because we move and then dereference, when an
+               iterator is first created it points to nowhere
+               so that the first next moves to the first element.
+               
+               Ex:
+                   >>> from CMap import *
+                   >>> m = CMap()
+                   >>> m[5] = 1
+                   >>> m[8] = 4
+                   >>> i = m.__iter__()
+                   >>> print int(i.next())
+                   5
+                   >>> print int(i.next())
+                   8
+                   >>> print int(i.prev())
+                   5
+               
+               We are still left with the odd behavior that an
+               iterator cannot be dereferenced until after the first next().
+               
+               Ex edge cases:
+                   >>> from CMap import CMap
+                   >>> m = CMap()
+                   >>> i = m.__iter__()
+                   >>> try:
+                   ...     i.prev()
+                   ... except StopIteration:
+                   ...     print 'StopIteration'
+                   ...
+                   StopIteration
+                   >>> m[5]='a'
+                   >>> i = m.iterkeys()
+                   >>> int(i.next())
+                   5
+                   >>> try: i.next()
+                   ... except StopIteration:  print 'StopIteration'
+                   ...
+                   StopIteration
+                   >>> int(i.prev())
+                   5
+                   >>> try: int(i.prev())
+                   ... except StopIteration: print 'StopIteration'
+                   ...
+                   StopIteration
+                   >>> int(i.next())
+                   5
+                   
+            """
+            self._next()
+            return self.key()
+
+        def prev(self):
+            """Returns the previous key in the map.
+
+               See next() for more detail and examples.
+               """
+            self._prev()
+            return self.key()
+
+    class ValueIterator(_AbstractIterator):
+        def next(self):
+            """@return: next value in the map.
+
+                >>> from CMap import *
+                >>> m = CMap()
+                >>> m[5] = 10
+                >>> m[6] = 3
+                >>> i = m.itervalues()
+                >>> int(i.next())
+                10
+                >>> int(i.next())
+                3
+            """
+            self._next()
+            return self.value()
+        
+        def prev(self):
+            self._prev()
+            return self.value()
+
+    class ItemIterator(_AbstractIterator):
+        def next(self):
+            """@return: next item in the map's key ordering.
+
+                >>> from CMap import CMap
+                >>> m = CMap()
+                >>> m[5] = 10
+                >>> m[6] = 3
+                >>> i = m.iteritems()
+                >>> k,v = i.next()
+                >>> int(k)
+                5
+                >>> int(v)
+                10
+                >>> k,v = i.next()
+                >>> int(k)
+                6
+                >>> int(v)
+                3
+            """
+            self._next()
+            return self.key(), self.value()
+
+        def prev(self):
+            self._prev()
+            return self.key(), self.value()
+    
+    def __init__(self, d={} ):
+        """Instantiate RBTree containing values from passed dict and
+           ordered based on cmp.
+
+            >>> m = CMap()
+            >>> len(m)
+            0
+            >>> m[5]=2
+            >>> len(m)
+            1
+            >>> print m[5]
+            2
+
+        """
+        #self._index = {}                # to speed up searches.
+        self._smap = map_constructor()  # C++ map wrapped by swig.
+        for key, value in d.items():
+            self[key]=value
+        self._iterators = WeakKeyDictionary()
+                                   # whenever node is deleted. search iterators
+                                   # for any iterator that becomes invalid.
+
+    def __contains__(self,x):
+        return self.get(x) != None
+
+    def __iter__(self):
+        """@return: KeyIterator positioned one before the beginning of the
+            key ordering so that the first next() returns the first key."""
+        return CMap.KeyIterator(self)
+
+    def begin(self):
+        """Returns an iterator pointing at first key-value pair.  This
+           differs from iterkeys, itervalues, and iteritems which return an
+           iterator pointing one before the first key-value pair.
+
+           @return: key iterator to first key-value.
+
+              >>> from CMap import *
+              >>> m = CMap()
+              >>> m[5.0] = 'a'
+              >>> i = m.begin()
+              >>> int(i.key())    # raises no IndexError.
+              5
+              >>> i = m.iterkeys()
+              >>> try:
+              ...     i.key()
+              ... except IndexError:
+              ...     print 'IndexError raised'
+              ...
+              IndexError raised
+           """
+        i = CMap.KeyIterator(self, map_begin(self._smap) )
+        return i
+
+        
+    def end(self):
+        """Returns an iterator pointing after end of key ordering.
+           The iterator's prev method will move to the last
+           key-value pair in the ordering.  This in keeping with
+           the notion that a range is specified as [i,j) where
+           j is not in the range, and the range [i,j) where i==j
+           is an empty range.
+
+           This operation takes O(1) time.
+
+           @return: key iterator one after end.
+           """
+        i = CMap.KeyIterator(self,None) # None means one after last node.
+        return i
+
+    def iterkeys(self):
+        return CMap.KeyIterator(self)
+
+    def itervalues(self):
+        return CMap.ValueIterator(self)
+
+    def iteritems(self):
+        return CMap.ItemIterator(self)
+
+    def __len__(self):
+        return map_size(self._smap)
+
+    def __str__(self):
+        s = "{"
+        first = True
+        for k,v in self.items():
+            if first:
+                first = False
+            else:
+                s += ", "
+            if type(v) == str:
+                s += "%s: '%s'" % (k,v)
+            else:
+                s += "%s: %s" % (k,v)
+        s += "}"
+        return s
+    
+    def __repr__(self):
+        return self.__str__()
+    
+    def __getitem__(self, key):
+        # IMPL 1: without _index
+        return map_find(self._smap,key)     # raises KeyError if key not found
+
+        # IMPL 2: with _index.
+        #return iter_value(self._index[key])
+
+    def __setitem__(self, key, value):
+        """
+            >>> from CMap import CMap
+            >>> m = CMap()
+            >>> m[6] = 'bar'
+            >>> m[6]
+            'bar'
+            >>>
+            """
+        assert type(key) == int or type(key) == float
+        
+        # IMPL 1. without _index.
+        map_set(self._smap,key,value)
+
+        ## IMPL 2. with _index
+        ## If using indices following allows us to perform only one search.
+        #i = map_insert_iter(self._smap,key,value)
+        #if iter_value(i) != value:
+        #    iter_set(i,value)     
+        #else: self._index[key] = i
+        ## END IMPL2
+
+    def __delitem__(self, key):
+        """Deletes the item with matching key from the map.
+
+           This takes O(log n + k) where n is the number of elements
+           in the map and k is the number of iterators pointing into the map.
+           Before deleting the item it linearly searches through
+           all iterators pointing into the map and invalidates any that
+           are pointing at the item about to be deleted.
+
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> m[12] = 'foo'
+           >>> m[13] = 'bar'
+           >>> m[14] = 'boo'
+           >>> del m[12]
+           >>> try:
+           ...   m[12]
+           ... except KeyError:
+           ...   print 'ok'
+           ...
+           ok
+           >>> j = m.begin()
+           >>> int(j.next())
+           14
+           >>> i = m.begin()
+           >>> i.value()
+           'bar'
+           >>> del m[13]  # delete object referenced by an iterator
+           >>> try:
+           ...   i.value()
+           ... except RuntimeError:
+           ...   print 'ok'
+           ok
+           >>> j.value()   # deletion should not invalidate other iterators.
+           'boo'
+
+           """
+        
+        #map_erase( self._smap, key )  # map_erase is dangerous.  It could
+                                       # delete the node causing an iterator
+                                       # to become invalid. --Dave
+                                       
+        si = map_find_iter( self._smap, key )  # si = swig'd iterator.
+        if map_iter_at_end(self._smap, si):
+            iter_delete(si)
+            raise KeyError(key)
+
+        for i in list(self._iterators):
+            if iter_cmp( self._smap, i._si, si ) == 0:
+                i._invalidate()
+        map_iter_erase( self._smap, si )
+        iter_delete(si)
+
+        #iter_delete( self._index[key] )  # IMPL 2. with _index.
+        #del self._index[key]             # IMPL 2. with _index.
+
+    def erase(self, iter):
+        """Remove item pointed to by the iterator.  All iterators that
+           point at the erased item including the passed iterator
+           are immediately invalidated after the deletion completes.
+
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> m[12] = 'foo'
+           >>> i = m.find(12)
+           >>> m.erase(i)
+           >>> len(m) == 0
+           True
+
+           """
+        if not iter._si:
+            raise RuntimeError( _("invalid iterator") )
+        if iter._si == BEGIN:
+            raise IndexError(_("Iterator does not point at key-value pair" ))
+        if self is not iter._map:
+            raise IndexError(_("Iterator points into a different CMap."))
+        if map_iter_at_end(self._smap, iter._si):
+            raise IndexError( _("Cannot erase end() iterator.") )
+
+        # invalidate iterators.
+        for i in list(self._iterators):
+            if iter._si is not i._si and iiter_cmp( self._smmap, iter._si, i._si ) == 0:
+                i._invalidate()
+
+        # remove item from the map.
+        map_iter_erase( self._smap, iter._si )        
+
+        # invalidate last iterator pointing to the deleted location in the map.
+        iter._invalidate()
+
+    def __del__(self):
+
+        # invalidate all iterators.
+        for i in list(self._iterators):
+            i._invalidate()
+        map_delete(self._smap)
+
+    def get(self, key, default=None):
+        """@return value corresponding to specified key or return 'default'
+               if the key is not found.
+           """
+        try:
+            return map_find(self._smap,key)     # IMPL 1. without _index.
+            #return iter_value(self._index[key])  # IMPL 2. with _index.
+
+        except KeyError:
+            return default
+
+    def keys(self):
+        """
+           >>> from CMap import *
+           >>> m = CMap()
+           >>> m[4.0] = 7
+           >>> m[6.0] = 3
+           >>> [int(x) for x in m.keys()]  # m.keys() but guaranteed integers.
+           [4, 6]
+           
+        """
+        k = []
+        for key in self:
+            k.append(key)
+        return k
+    
+    def values(self):
+        """
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> m[4.0] = 7
+           >>> m[6.0] = 3
+           >>> m.values()
+           [7, 3]
+           
+        """
+        i = self.itervalues()
+        v = []
+        try:
+            while True:
+                v.append(i.next())
+        except StopIteration:
+            pass
+        return v
+        
+
+    def items(self):
+        """
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> m[4.0] = 7
+           >>> m[6.0] = 3
+           >>> [(int(x[0]),int(x[1])) for x in m.items()]
+           [(4, 7), (6, 3)]
+           
+        """
+    
+        i = self.iteritems()
+        itms = []
+        try:
+            while True:
+                itms.append(i.next())
+        except StopIteration:
+            pass
+        
+        return itms
+    
+    def has_key(self, key):
+        """
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> m[4.0] = 7
+           >>> if m.has_key(4): print 'ok'
+           ...
+           ok
+           >>> if not m.has_key(7): print 'ok'
+           ...
+           ok
+           
+        """
+        try:
+            self[key]
+        except KeyError:
+            return False
+        return True
+
+    def clear(self):
+        """delete all entries
+
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> m[4] = 7
+           >>> m.clear()
+           >>> print len(m)
+           0
+           
+        """
+
+        self.__del__()
+        self._smap = map_constructor()
+
+    def copy(self):
+        """return shallow copy"""
+        return CMap(self)
+
+    def lower_bound(self,key):
+        """
+         Finds smallest key equal to or above the lower bound.
+
+         Takes O(log n) time.
+
+         @param x: Key of (key, value) pair to be located.
+         @return: Key Iterator pointing to first item equal to or greater
+                  than key, or end() if no such item exists.
+
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> m[10] = 'foo'
+           >>> m[15] = 'bar'
+           >>> i = m.lower_bound(11)   # iterator.
+           >>> int(i.key())
+           15
+           >>> i.value()
+           'bar'
+           
+        Edge cases:
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> i = m.lower_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+
+           >>> m[10] = 'foo'
+           >>> i = m.lower_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+           >>> i = m.lower_bound(9)
+           >>> if i == m.begin(): print 'ok'
+           ...
+           ok
+
+        """
+        return CMap.KeyIterator(self, map_lower_bound( self._smap, key ))
+
+
+    def upper_bound(self, key):
+        """
+         Finds largest key equal to or below the upper bound.  In keeping
+         with the [begin,end) convention, the returned iterator
+         actually points to the key one above the upper bound. 
+
+         Takes O(log n) time.
+
+         @param  x:  Key of (key, value) pair to be located.
+         @return:  Iterator pointing to first element equal to or greater than
+                  key, or end() if no such item exists.
+
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> m[10] = 'foo'
+           >>> m[15] = 'bar'
+           >>> m[17] = 'choo'
+           >>> i = m.upper_bound(11)   # iterator.
+           >>> i.value()
+           'bar'
+
+         Edge cases:
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> i = m.upper_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+           >>> m[10] = 'foo'
+           >>> i = m.upper_bound(9)
+           >>> i.value()
+           'foo'
+           >>> i = m.upper_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+
+        """
+        return CMap.KeyIterator(self, map_upper_bound( self._smap, key ))
+
+    def find(self,key):
+        """
+          Finds the item with matching key and returns a KeyIterator
+          pointing at the item.  If no match is found then returns end().
+     
+          Takes O(log n) time.
+     
+            >>> from CMap import CMap
+            >>> m = CMap()
+            >>> i = m.find(10)
+            >>> if i == m.end(): print 'ok'
+            ...
+            ok
+            >>> m[10] = 'foo'
+            >>> i = m.find(10)
+            >>> int(i.key())
+            10
+            >>> i.value()
+            'foo'
+
+        """
+        return CMap.KeyIterator(self, map_find_iter( self._smap, key ))
+
+    def update_key( self, iter, key ):
+        """
+          Modifies the key of the item referenced by iter.  If the
+          key change is small enough that no reordering occurs then
+          this takes amortized O(1) time.  If a reordering occurs then
+          this takes O(log n).
+
+          WARNING!!! The passed iterator MUST be assumed to be invalid
+          upon return and should be deallocated.
+
+          Typical use:
+            >>> from CMap import CMap
+            >>> m = CMap()
+            >>> m[10] = 'foo'
+            >>> m[8] = 'bar'
+            >>> i = m.find(10)
+            >>> m.update_key(i,7)   # i is assumed to be invalid upon return.
+            >>> del i
+            >>> [(int(x[0]),x[1]) for x in m.items()]  # reordering occurred.
+            [(7, 'foo'), (8, 'bar')]
+            >>> i = m.find(8)
+            >>> m.update_key(i,9)   # no reordering.
+            >>> del i
+            >>> [(int(x[0]),x[1]) for x in m.items()]
+            [(7, 'foo'), (9, 'bar')]
+
+          Edge cases:          
+            >>> i = m.find(7)
+            >>> i.value()
+            'foo'
+            >>> try:                # update to key already in the map.
+            ...     m.update_key(i,9)
+            ... except KeyError:
+            ...     print 'ok'
+            ...
+            ok
+            >>> m[7]
+            'foo'
+            >>> i = m.iterkeys()
+            >>> try:                 # updating an iter pointing at BEGIN.
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+            >>> i = m.end()
+            >>> try:                 # updating an iter pointing at end().
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+                        
+        """
+        assert isinstance(iter,CMap._AbstractIterator)
+        if iter._si == BEGIN:
+            raise IndexError( _("Iterator does not point at key-value pair") )
+        if self is not iter._map:
+            raise IndexError(_("Iterator points into a different CIndexedMap."))
+        if map_iter_at_end(self._smap, iter._si):
+            raise IndexError( _("Cannot update end() iterator.") )
+        map_iter_update_key(self._smap, iter._si, key)
+
+    def append(self, key, value):
+        """Performs an insertion with the hint that it probably should
+           go at the end.
+
+           Raises KeyError if the key is already in the map.
+
+             >>> from CMap import CMap
+             >>> m = CMap()
+             >>> m.append(5.0,'foo')    # append to empty map.
+             >>> len(m)
+             1
+             >>> [int(x) for x in m.keys()] # see note (1)
+             [5]
+             >>> m.append(10.0, 'bar')  # append in-order
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(5, 'foo'), (10, 'bar')]
+             >>> m.append(3.0, 'coo')   # out-of-order.
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(3, 'coo'), (5, 'foo'), (10, 'bar')]
+             >>> try:
+             ...     m.append(10.0, 'blah') # append key already in map.
+             ... except KeyError:
+             ...     print 'ok'
+             ...
+             ok
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(3, 'coo'), (5, 'foo'), (10, 'bar')]
+             >>>
+
+             note (1): int(x[0]) is used because 5.0 can appear as either 5
+             or 5.0 depending on the version of python.
+           """
+        map_append(self._smap,key,value)
+    
+        
+
+class CIndexedMap(CMap):
+    """This is an ordered mapping, exactly like CMap except that it
+       provides a cross-index allowing average O(1) searches based on value.
+       This adds  the constraint that values must be unique.
+
+         Operation:       Time                 Applicable
+                          Complexity:          Methods:
+         ---------------------------------------------------
+         Item insertion:  O(log n)             append, __setitem__
+         Item deletion:   O(log n + k)         __delitem__, erase   
+         Key search:      O(log n)             __getitem__, get, find, 
+                                               __contains__
+         Value search:    average O(1)  as per dict
+         Iteration step:  amortized O(1),      next, prev
+                          worst-case O(log n)
+         Memory:          O(n)
+
+       n = number of elements in map.  k = number of iterators pointing
+       into map.  CIndexedMap assumes there are few iterators in existence 
+       at any given time. 
+
+       The hash table increases the factor in the
+       O(n) memory cost of the Map by a constant
+    """
+    
+    def __init__(self, dict={} ):
+        CMap.__init__(self,dict)
+        self._value_index = {}   # cross-index. maps value->iterator.
+
+    def __setitem__(self, key, value):
+        """
+            >>> from CMap import *
+            >>> m = CIndexedMap()
+            >>> m[6] = 'bar'
+            >>> m[6]
+            'bar'
+            >>> int(m.get_key_by_value('bar'))
+            6
+            >>> try:
+            ...    m[7] = 'bar'
+            ... except ValueError:
+            ...    print 'value error'
+            value error
+            >>> m[6] = 'foo'
+            >>> m[6]
+            'foo'
+            >>> m[7] = 'bar'
+            >>> m[7]
+            'bar'
+            >>> m[7] = 'bar'  # should not raise exception
+            >>> m[7] = 'goo'
+            >>> m.get_key_by_value('bar')  # should return None.
+            >>> 
+
+        """
+        assert type(key) == int or type(key) == float
+        if self._value_index.has_key(value) and \
+           iter_key(self._value_index[value]) != key:
+            raise ValueError( _("Value %s already exists.  Values must be "
+                "unique.") % str(value) )
+
+        si = map_insert_iter(self._smap,key,value) # si points where insert
+                                                   # should occur whether 
+                                                   # insert succeeded or not.
+                                                   # si == "swig iterator"
+        sival = iter_value(si)
+        if sival != value:          # if insert failed because k already exists
+            iter_set(si,value)      # then force set.
+            self._value_index[value] = si
+            viter = self._value_index[sival]
+            iter_delete(viter)     # remove old value from index
+            del self._value_index[sival]  
+        else:                      # else insert succeeded so update index.
+            self._value_index[value] = si
+        #self._index[key] = si       # IMPL 2. with _index.
+
+    def __delitem__(self, key):
+        """
+            >>> from CMap import CIndexedMap
+            >>> m = CIndexedMap()
+            >>> m[6] = 'bar'
+            >>> m[6]
+            'bar'
+            >>> int(m.get_key_by_value('bar'))
+            6
+            >>> del m[6]
+            >>> if m.get_key_by_value('bar'):
+            ...     print 'found'
+            ... else:
+            ...     print 'not found.'
+            not found.
+
+        """
+        i = map_find_iter( self._smap, key )
+        if map_iter_at_end( self._smap, i ):
+            iter_delete(i)
+            raise KeyError(key)
+        else:
+            value = iter_value(i)
+            for i in list(self._iterators):
+                if iter_cmp( self._smap, i._si, iter._si ) == 0:
+                    i._invalidate()
+            map_iter_erase( self._smap, i )
+            viter = self._value_index[value]
+            iter_delete(i)
+            iter_delete( viter )
+            del self._value_index[value]
+            #del self._index[key]         # IMPL 2. with _index.
+        assert map_size(self._smap) == len(self._value_index)
+
+    def has_value(self, value):
+       return self._value_index.has_key(value)
+
+    def get_key_by_value(self, value):
+        """Returns the key cross-indexed from the passed unique value, or
+           returns None if the value is not in the map."""
+        si = self._value_index.get(value)  # si == "swig iterator"
+        if si == None: return None
+        return iter_key(si)
+
+    def append( self, key, value ):
+        """See CMap.append
+
+             >>> from CMap import CIndexedMap
+             >>> m = CIndexedMap()
+             >>> m.append(5,'foo')
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(5, 'foo')]
+             >>> m.append(10, 'bar')
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(5, 'foo'), (10, 'bar')]
+             >>> m.append(3, 'coo')   # out-of-order.
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(3, 'coo'), (5, 'foo'), (10, 'bar')]
+             >>> int(m.get_key_by_value( 'bar' ))
+             10
+             >>> try:
+             ...     m.append(10, 'blah') # append key already in map.
+             ... except KeyError:
+             ...     print 'ok'
+             ...
+             ok
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(3, 'coo'), (5, 'foo'), (10, 'bar')]
+             >>> try:
+             ...     m.append(10, 'coo') # append value already in map.
+             ... except ValueError:
+             ...     print 'ok'
+             ...
+             ok
+
+        """
+        if self._value_index.has_key(value) and \
+           iter_key(self._value_index[value]) != key:
+            raise ValueError(_("Value %s already exists and value must be "
+                "unique.") % str(value) )
+        
+        si = map_append_iter(self._smap,key,value)
+        if iter_value(si) != value:
+            iter_delete(si)
+            raise KeyError(key)
+        self._value_index[value] = si
+        
+
+    def find_key_by_value(self, value):
+        """Returns a key iterator cross-indexed from the passed unique value
+           or end() if no value found.
+
+           >>> from Map import *
+           >>> m = CIndexedMap()
+           >>> m[6] = 'abc'
+           >>> i = m.find_key_by_value('abc')
+           >>> int(i.key())
+           6
+           >>> i = m.find_key_by_value('xyz')
+           >>> if i == m.end(): print 'i points at end()'
+           i points at end()
+
+        """
+        si = self._value_index.get(value)  # si == "swig iterator."
+        if si != None:
+            si = iter_copy(si); # copy else operations like increment on the
+                                # KeyIterator would modify the value index.
+        return CMap.KeyIterator(self,si)
+
+    def copy(self):
+        """return shallow copy"""
+        return CIndexedMap(self)
+
+    def update_key( self, iter, key ):
+        """
+          see CMap.update_key.
+          
+          WARNING!! You MUST assume that the passed iterator is invalidated
+          upon return.
+
+          Typical use:
+            >>> from CMap import CIndexedMap
+            >>> m = CIndexedMap()
+            >>> m[10] = 'foo'
+            >>> m[8] = 'bar'
+            >>> i = m.find(10)
+            >>> m.update_key(i,7)   # i is assumed to be invalid upon return.
+            >>> del i
+            >>> int(m.get_key_by_value('foo'))
+            7
+            >>> [(int(x[0]),x[1]) for x in m.items()]    # reordering occurred.
+            [(7, 'foo'), (8, 'bar')]
+            >>> i = m.find(8)
+            >>> m.update_key(i,9)   # no reordering.
+            >>> del i
+            >>> [(int(x[0]),x[1]) for x in m.items()]
+            [(7, 'foo'), (9, 'bar')]
+
+          Edge cases:
+            >>> i = m.find(7)
+            >>> i.value()
+            'foo'
+            >>> try:
+            ...     m.update_key(i,9)
+            ... except KeyError:
+            ...     print 'ok'
+            ...
+            ok
+            >>> m[7]
+            'foo'
+            >>> int(m.get_key_by_value('foo'))
+            7
+            >>> i = m.iterkeys()
+            >>> try:                 # updating an iter pointing at BEGIN.
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+            >>> i = m.end()
+            >>> try:                # updating an iter pointing at end().
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+            
+            
+        """
+        if not iter._si:
+            raise RuntimeError( _("invalid iterator") )
+        if iter._si == BEGIN:
+            raise IndexError(_("Iterator does not point at key-value pair" ))
+        if self is not iter._map:
+            raise IndexError(_("Iterator points into a different "
+                               "CIndexedMap."))
+        if map_iter_at_end(self._smap, iter._si):
+            raise IndexError( _("Cannot update end() iterator.") )
+        si = map_iter_update_key_iter(self._smap, iter._si, key)
+                                   # raises KeyError if key already in map.
+
+        if si != iter._si:         # if map is reordered...
+            value = iter.value();
+            val_si = self._value_index[value]
+            iter_delete(val_si)
+            self._value_index[value] = si
+
+    def erase(self, iter):
+        """Remove item pointed to by the iterator.  Iterator is immediately
+           invalidated after the deletion completes."""
+        if not iter._si:
+            raise RuntimeError( _("invalid iterator") )
+        if iter._si == BEGIN:
+            raise IndexError(_("Iterator does not point at key-value pair." ))
+        if self is not iter._map:
+            raise IndexError(_("Iterator points into a different "
+                               "CIndexedMap."))
+        if map_iter_at_end(self._smap, iter._si):
+            raise IndexError( _("Cannot update end() iterator.") )
+        value = iter.value()
+        CMap.erase(self,iter)
+        del self._value_index[value]
+
+if __name__ == "__main__":
+    import doctest
+    import random
+
+
+    ##############################################
+    # UNIT TESTS
+    print "Testing module"
+    doctest.testmod(sys.modules[__name__])
+    print "doctest complete."
+    
+    
+    ##############################################
+    # MEMORY LEAK TESTS
+
+    if LEAK_TEST:
+        i = 0
+        import gc
+        class X:
+            x = range(1000)  # something moderately big.
+    
+        # TEST 1. This does not cause memory to grow.
+        #m = CMap()
+        #map_insert(m._smap,10,X())
+        #while True:
+        #    i += 1
+        #    it = map_find_iter( m._smap, 10 )
+        #    iter_delete(it)
+        #    del it
+        #    if i % 100 == 0:
+        #      gc.collect()
+    
+        # TEST 2: This does not caus a memory leak.
+        #m = map_constructor_double()
+        #while True:
+        #    i += 1
+        #    map_insert_double(m,10,5)        # here
+        #    it = map_find_iter_double( m, 10 )
+        #    map_iter_erase_double( m, it )     # or here is the problem.
+        #    iter_delete_double(it)
+        #    del it
+        #    #assert len(m) == 0
+        #    assert map_size_double(m) == 0
+        #    if i % 100 == 0:
+        #      gc.collect()
+    
+        # TEST 3. No memory leak
+        #m = CMap()
+        #while True:
+        #    i += 1
+        #    map_insert(m._smap,10,X())        # here
+        #    it = map_find_iter( m._smap, 10 )
+        #    map_iter_erase( m._smap, it )     # or here is the problem.
+        #    iter_delete(it)
+        #    del it
+        #    assert len(m) == 0
+        #    assert map_size(m._smap) == 0
+        #    if i % 100 == 0:
+        #      gc.collect()
+    
+    
+        # TEST 4: map creation and deletion.
+        #while True:
+        #  m = map_constructor()
+        #  map_delete(m);
+    
+        # TEST 5: test iteration.
+        #m = map_constructor()
+        #for i in xrange(10):
+        #    map_insert(m,i,X())
+        #while True:
+        #    i = map_begin(m)
+        #    while not map_iter_at_begin(m,i):
+        #      iter_incr(i)
+        #    iter_delete(i)
+    
+        # TEST 6:
+        #m = map_constructor()
+        #for i in xrange(10):
+        #    map_insert(m,i,X())
+        #while True:
+        #    map_find( m, random.randint(0,9) )
+    
+        # TEST 7:
+        #m = map_constructor()
+        #for i in xrange(50):  
+        #  map_insert( m, i, X() )
+        #while True:
+        #  for i in xrange(50):
+        #    map_set( m, i, X() )
+    
+        # TEST 8
+        # aha!  Another leak! Fixed.
+        #m = map_constructor()
+        #while True:
+        #    i += 1
+        #    map_insert(m,10,X()) 
+        #    map_erase(m,10)
+        #    assert map_size(m) == 0
+    
+        # TEST 9
+        m = map_constructor()
+        for i in xrange(50):  
+            map_insert( m, i, X() )
+        while True:
+            it = map_find_iter( m, 5 )
+            map_iter_update_key( m, it, 1000 )  
+            iter_delete(it)
+            it = map_find_iter( m, 1000 )
+            map_iter_update_key( m, it, 5)  
+            iter_delete(it)
+      
+

+ 1273 - 0
html/bin/clients/mainline/BTL/CMultiMap.py

@@ -0,0 +1,1273 @@
+#!/usr/bin/env python
+
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# By David Harrison
+
+# I was playing with doctest when I wrote this.  I still haven't
+# decided how useful doctest is as opposed to implementing unit tests
+# directly.  --Dave
+
+if __name__ == '__main__':
+    import sys
+    sys.path = ['.','..'] + sys.path  # HACK to simplify unit testing.
+
+from BTL.translation import _
+
+class BEGIN:    # represents special BEGIN location before first next.
+    pass
+
+from UserDict import DictMixin
+from cmultimap_swig import *
+import sys
+from weakref import WeakKeyDictionary
+LEAK_TEST = False
+
+class CMultiMap(object, DictMixin):  
+    """In-order mapping.  Similar to a dict, except that it provides in-order
+       iteration and searches for the nearest key <= or >= a given key.  
+       Distinguishes itself from CMap in that CMultiMap instances allows 
+       multiple entries with the same key, thus __getitem__ and get always
+       return a list.  If there are no matching keys then __getitem__
+       or get returns an empty list, one match a single-element list, etc.
+       Values with the same key have arbitrary order.
+
+       LIMITATION: The key must be a double.  The value can be anything.
+
+         Item insertion:  O(log n)          append, __setitem__
+         Item deletion:   O(log n + k)      erase    
+                          O(log n + k + m)  __delitem__
+         Key search:      O(log n)          find, __contains__
+                          O(log n + m)      __getitem__, get
+         Value search:    n/a
+         Iteration step:  amortized O(1), worst-case O(log n)
+         Memory:          O(n)
+
+       n = number of elements in map.  k = number of iterators pointing
+       into map.  The assumption here is that there are few iterators
+       in existence at any given time.  m = number of elements matching
+       the key.
+       
+       Iterators are not invalidated by insertions.  Iterators are invalidated
+       by deletions only when the key-value pair referenced is deleted.
+       Deletion has a '+k' because __delitem__ searches linearly
+       through the set of iterators to find any iterator pointing at the
+       deleted item and then invalidates the iterator.
+
+       This class is backed by the C++ STL map class, but conforms
+       to the Python container interface."""
+
+    class _AbstractIterator:
+        """Iterates over elements in the map in order."""
+
+        def __init__(self, m, si = BEGIN ): # "s.." implies swig object.
+            """Creates an iterator pointing to element si in map m.
+            
+               Do not instantiate directly.  Use iterkeys, itervalues, or
+               iteritems.
+
+               The _AbstractIterator takes ownership of any C++ iterator
+               (i.e., the swig object 'si') and will deallocate it when
+               the iterator is deallocated.
+
+               Examples of typical behavior:
+
+               >>> from CMultiMap import CMultiMap
+               >>> m = CMultiMap()
+               >>> m[12] = 6
+               >>> m[9] = 4
+               >>> for k in m:
+               ...     print int(k)
+               ...
+               9
+               12
+               >>>
+
+               Example edge cases (empty map):
+
+               >>> from CMultiMap import CMultiMap
+               >>> m = CMultiMap()
+               >>> try:
+               ...     i = m.__iter__()
+               ...     i.value()
+               ... except IndexError:
+               ...     print 'IndexError.'
+               ...
+               IndexError.
+               >>> try:
+               ...     i.next()
+               ... except StopIteration:
+               ...     print 'stopped'
+               ...
+               stopped
+
+               @param mmap: CMultiMap.
+               @param node: Node that this iterator will point at.  If None
+                 then the iterator points to end().  If BEGIN
+                 then the iterator points to one before the beginning.
+             """
+            assert isinstance(m, CMultiMap)
+            assert not isinstance(si, CMultiMap._AbstractIterator)
+            if si == None:
+                self._si = mmap_end(m._smmap)
+            else:
+                self._si = si           # C++ iterator wrapped by swig.
+            self._mmap = m
+            m._iterators[self] = 1      # using map as set of weak references.
+
+        def __hash__(self):
+            return id(self)
+        
+        def __cmp__(self, other):
+            if not self._si or not other._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN and other._si == BEGIN: return 0
+            if self._si == BEGIN and other._si != BEGIN: return -1
+            elif self._si != BEGIN and other._si == BEGIN: return 1
+            return iiter_cmp(self._mmap._smmap, self._si, other._si )
+
+        def at_begin(self):
+            """equivalent to self == m.begin() where m is a CMultiMap.
+            
+                 >>> from CMultiMap import CMultiMap
+                 >>> m = CMultiMap()
+                 >>> i = m.begin()
+                 >>> i == m.begin()
+                 True
+                 >>> i.at_begin()
+                 True
+                 >>> i == m.end()   # no elements so begin()==end()
+                 True
+                 >>> i.at_end()
+                 True
+                 >>> m[6] = 'foo'   # insertion does not invalidate iterators.
+                 >>> i = m.begin()
+                 >>> i == m.end()
+                 False
+                 >>> i.value()
+                 'foo'
+                 >>> try:           # test at_begin when not at beginning.
+                 ...    i.next()
+                 ... except StopIteration:
+                 ...    print 'ok'
+                 ok
+                 >>> i.at_begin()
+                 False
+                     
+                 
+            """
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:  # BEGIN is one before begin().  Yuck!!
+                return False
+            return mmap_iiter_at_begin(self._mmap._smmap, self._si)
+        
+        def at_end(self):
+            """equivalent to self == m.end() where m is a CMap, but
+               at_end is faster because it avoids the dynamic memory
+               alloation in m.end().
+
+                 >>> from CMultiMap import CMultiMap
+                 >>> m = CMultiMap()
+                 >>> m[6] = 'foo'
+                 >>> i = m.end()   # test when at end.
+                 >>> i == m.end()
+                 True
+                 >>> i.at_end()
+                 True
+                 >>> int(i.prev())
+                 6
+                 >>> i.at_end()    # testing when not at end.
+                 False
+
+               """
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:
+                return False
+            return mmap_iiter_at_end(self._mmap._smmap, self._si)
+        
+        def key(self):
+            """@return: the key of the key-value pair referenced by this
+                   iterator.
+            """
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:
+                raise IndexError(_("Cannot dereference iterator until after "
+                                 "first call to .next."))
+            elif mmap_iiter_at_end(self._mmap._smmap, self._si):
+                raise IndexError()
+            
+            return iiter_key(self._si)
+
+        def value(self):
+            """@return: the value of the key-value pair currently referenced
+                   by this iterator.
+            """
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:
+                raise IndexError(_("Cannot dereference iterator until after "
+                                 "first call to next."))
+            elif mmap_iiter_at_end(self._mmap._smmap, self._si):
+                raise IndexError()
+
+            return iiter_value(self._si)
+        
+        def item(self):
+            """@return the key-value pair referenced by this iterator.
+            """
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            return self.key(), self.value()
+
+        def _next(self):
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:
+                self._si = mmap_begin(self._mmap._smmap)
+
+                if mmap_iiter_at_end(self._mmap._smmap,self._si):
+                    raise StopIteration
+                return
+
+            if mmap_iiter_at_end(self._mmap._smmap,self._si):
+                raise StopIteration
+
+            iiter_incr(self._si)
+
+            if mmap_iiter_at_end(self._mmap._smmap,self._si):
+                raise StopIteration
+            
+        def _prev(self):
+            if not self._si:
+                raise RuntimeError( _("invalid iterator") )
+            if self._si == BEGIN:
+                raise StopIteration()
+            
+            elif mmap_iiter_at_begin(self._mmap._smmap, self._si):
+                self._si = BEGIN
+                raise StopIteration
+
+            iiter_decr(self._si)
+
+        def __del__(self):
+            # Python note: "del x" merely eliminates one reference to an
+            # object. __del__ isn't called until the ref count goes to 0.
+            # Only when the last reference is gone is __del__ called.
+            self._invalidate()
+            
+        def _invalidate(self):
+            if self._si == None:  # if already invalidated...
+                return
+            try:
+                del self._mmap._iterators[self]
+            except KeyError:
+                pass  # could've been removed because weak reference,
+                      # and because _invalidate is called from __del__.
+            if self._si != BEGIN:
+                iiter_delete(self._si)
+            self._si = None
+                
+        def __iter__(self):
+            """If the iterator is itself iteratable then we do things like:
+                >>> from CMultiMap import CMultiMap
+                >>> m = CMultiMap()
+                >>> m[10] = 'foo'
+                >>> m[11] = 'bar'
+                >>> for x in m.itervalues():
+                ...     print x
+                ...
+                foo
+                bar
+                
+            """
+            return self
+
+        def __len__(self):
+            return len(self._mmap)
+        
+    class KeyIterator(_AbstractIterator):
+        def next(self):
+            """Returns the next key in the mmap.
+            
+               Insertion does not invalidate iterators.  Deletion only
+               invalidates an iterator if the iterator pointed at the
+               key-value pair being deleted.
+               
+               This is implemented by moving the iterator and then
+               dereferencing it.  If we dereferenced and then moved
+               then we would get the odd behavior:
+               
+                 Ex:  I have keys [1,2,3].  The iterator i points at 1.
+                   print i.next()   # prints 1
+                   print i.next()   # prints 2
+                   print i.prev()   # prints 3
+                   print i.prev()   # prints 2
+               
+               However, because we move and then dereference, when an
+               iterator is first created it points to nowhere
+               so that the first next moves to the first element.
+               
+               Ex:
+                   >>> from CMultiMap import *
+                   >>> m = CMultiMap()
+                   >>> m[5] = 1
+                   >>> m[8] = 4
+                   >>> i = m.__iter__()
+                   >>> print int(i.next())
+                   5
+                   >>> print int(i.next())
+                   8
+                   >>> print int(i.prev())
+                   5
+               
+               We are still left with the odd behavior that an
+               iterator cannot be dereferenced until after the first next().
+               
+               Ex edge cases:
+                   >>> from CMultiMap import CMultiMap
+                   >>> m = CMultiMap()
+                   >>> i = m.__iter__()
+                   >>> try:
+                   ...     i.prev()
+                   ... except StopIteration:
+                   ...     print 'StopIteration'
+                   ...
+                   StopIteration
+                   >>> m[5]='a'
+                   >>> i = m.iterkeys()
+                   >>> int(i.next())
+                   5
+                   >>> try: i.next()
+                   ... except StopIteration:  print 'StopIteration'
+                   ...
+                   StopIteration
+                   >>> int(i.prev())
+                   5
+                   >>> try: int(i.prev())
+                   ... except StopIteration: print 'StopIteration'
+                   ...
+                   StopIteration
+                   >>> int(i.next())
+                   5
+                   
+            """
+            self._next()
+            return self.key()
+
+        def prev(self):
+            """Returns the previous key in the mmap.
+
+               See next() for more detail and examples.
+               """
+            self._prev()
+            return self.key()
+
+    class ValueIterator(_AbstractIterator):
+        def next(self):
+            """@return: next value in the mmap.
+
+                >>> from CMultiMap import *
+                >>> m = CMultiMap()
+                >>> m[5] = 10
+                >>> m[6] = 3
+                >>> i = m.itervalues()
+                >>> int(i.next())
+                10
+                >>> int(i.next())
+                3
+            """
+            self._next()
+            return self.value()
+        
+        def prev(self):
+            self._prev()
+            return self.value()
+
+    class ItemIterator(_AbstractIterator):
+        def next(self):
+            """@return: next item in the mmap's key ordering.
+
+                >>> from CMultiMap import CMultiMap
+                >>> m = CMultiMap()
+                >>> m[5] = 10
+                >>> m[6] = 3
+                >>> i = m.iteritems()
+                >>> k,v = i.next()
+                >>> int(k)
+                5
+                >>> int(v)
+                10
+                >>> k,v = i.next()
+                >>> int(k)
+                6
+                >>> int(v)
+                3
+            """
+            self._next()
+            return self.key(), self.value()
+
+        def prev(self):
+            self._prev()
+            return self.key(), self.value()
+    
+    def __init__(self, d={} ):
+        """Instantiate RBTree containing values from passed dict and
+           ordered based on cmp.
+
+            >>> m = CMultiMap()
+            >>> len(m)
+            0
+            >>> m[5]=2
+            >>> len(m)
+            1
+            >>> print m[5]
+            [2]
+
+        """
+        self._smmap = mmap_constructor()  # C++ mmap wrapped by swig.
+        for key, value in d.items():
+            self[key]=value
+        self._iterators = WeakKeyDictionary()
+                                   # whenever node is deleted. search iterators
+                                   # for any iterator that becomes invalid.
+
+    def __contains__(self,x):
+        return self.has_key(x)
+
+    def __iter__(self):
+        """@return: KeyIterator positioned one before the beginning of the
+            key ordering so that the first next() returns the first key."""
+        return CMultiMap.KeyIterator(self)
+
+    def begin(self):
+        """Returns an iterator pointing at first key-value pair.  This
+           differs from iterkeys, itervalues, and iteritems which return an
+           iterator pointing one before the first key-value pair.
+
+           @return: key iterator to first key-value.
+
+              >>> from CMultiMap import *
+              >>> m = CMultiMap()
+              >>> m[5.0] = 'a'
+              >>> i = m.begin()
+              >>> int(i.key())    # raises no IndexError.
+              5
+              >>> i = m.iterkeys()
+              >>> try:
+              ...     i.key()
+              ... except IndexError:
+              ...     print 'IndexError raised'
+              ...
+              IndexError raised
+           """
+        i = CMultiMap.KeyIterator(self, mmap_begin(self._smmap) )
+        return i
+
+        
+    def end(self):
+        """Returns an iterator pointing after end of key ordering.
+           The iterator's prev method will move to the last
+           key-value pair in the ordering.  This in keeping with
+           the notion that a range is specified as [i,j) where
+           j is not in the range, and the range [i,j) where i==j
+           is an empty range.
+
+           This operation takes O(1) time.
+
+           @return: key iterator one after end.
+           """
+        i = CMultiMap.KeyIterator(self,None) # None means one after last node.
+        return i
+
+    def iterkeys(self):
+        return CMultiMap.KeyIterator(self)
+
+    def itervalues(self):
+        return CMultiMap.ValueIterator(self)
+
+    def iteritems(self):
+        return CMultiMap.ItemIterator(self)
+
+    def __len__(self):
+        return mmap_size(self._smmap)
+
+    def __str__(self):
+        s = "{"
+        first = True
+        for k,v in self.items():
+            if first:
+                first = False
+            else:
+                s += ", "
+            if type(v) == str:
+                s += "%s: '%s'" % (k,v)
+            else:
+                s += "%s: %s" % (k,v)
+        s += "}"
+        return s
+    
+    def __repr__(self):
+        return self.__str__()
+    
+    def __getitem__(self, key):
+        """Returns a list containing all matching values or the empty list
+           if the key is not found.
+
+           This differs in behavior from CMap which simply returns the value or
+           throws a KeyError if it is not present.
+           """
+        si = mmap_find_iiter(self._smmap,key) # raises KeyError if key not found
+        result = []
+        while not mmap_iiter_at_end(self._smmap, si) and iiter_key(si) == key:
+          result.append( iiter_value(si) )
+          iiter_incr(si)
+        iiter_delete(si)
+        return  result
+
+    def __setitem__(self, key, value):
+        """
+            >>> from CMultiMap import CMultiMap
+            >>> m = CMultiMap()
+            >>> m[6] = 'bar'
+            >>> m[6]
+            ['bar']
+            >>>
+            """
+        assert type(key) == int or type(key) == float
+        mmap_insert(self._smmap,key,value)
+
+    def __delitem__(self, key):
+        """Deletes all items with matching key from the mmap.
+
+           This takes O(log n + km) where n is the number of elements
+           in the mmap and k is the number of iterators pointing into the mmap,
+           and m is the number of items matching the key.
+           
+           Before deleting ab item it linearly searches through
+           all iterators pointing into the mmap and invalidates any that
+           are pointing at the item about to be deleted.
+
+           Raises a KeyError if the key is not found.
+
+           >>> from CMultiMap import CMultiMap
+           >>> m = CMultiMap()
+           >>> m[12] = 'foo'
+           >>> m[13] = 'bar'
+           >>> m[14] = 'boo'
+           >>> del m[12]
+           >>> m[12]
+           []
+           >>> j = m.begin()
+           >>> int(j.next())
+           14
+           >>> i = m.begin()
+           >>> i.value()
+           'bar'
+           >>> del m[13]  # delete object referenced by an iterator
+           >>> try:
+           ...   i.value()
+           ... except RuntimeError:
+           ...   print 'ok'
+           ok
+           >>> j.value()   # deletion should not invalidate other iterators.
+           'boo'
+
+           """
+
+        si = sprev = None
+
+        try:
+          #mmap_erase( self._smmap, key )  # mmap_erase is dangerous.  It could
+                                           # delete the node causing an
+                                           # iterator to become invalid. --Dave
+                                         
+          si = mmap_find_iiter( self._smmap, key )  # si = swig'd iterator.
+          if mmap_iiter_at_end(self._smmap, si):
+              raise KeyError(key)
+          sprev = iiter_copy(si)
+          
+          # HERE this could be written more efficiently. --Dave.
+          while not mmap_iiter_at_end(self._smmap, si) and \
+                iiter_key(si) == key:
+              for i in list(self._iterators):
+                  if iiter_cmp( self._smmap, i._si, si ) == 0:
+                      i._invalidate()
+              iiter_incr(si)
+              mmap_iiter_erase( self._smmap, sprev )
+              iiter_assign(sprev, si)
+              
+        finally:      
+          if si:
+              iiter_delete(si)
+          if sprev:
+              iiter_delete(sprev)
+
+    def erase(self, iter):
+        """Remove item pointed to by the iterator.  All iterators that
+           point at the erased item including the passed iterator
+           are immediately invalidated after the deletion completes.
+
+           >>> from CMultiMap import CMultiMap
+           >>> m = CMultiMap()
+           >>> m[12] = 'foo'
+           >>> i = m.find(12)
+           >>> m.erase(i)
+           >>> len(m) == 0
+           True
+
+           """
+        if not iter._si:
+            raise RuntimeError( _("invalid iterator") )
+        if iter._si == BEGIN:
+            raise IndexError(_("Iterator does not point at key-value pair" ))
+        if self is not iter._mmap:
+            raise IndexError(_("Iterator points into a different CMultiMap."))
+        if mmap_iiter_at_end(self._smmap, iter._si):
+            raise IndexError( _("Cannot erase end() iterator.") )
+
+        # invalidate iterators.
+        for i in list(self._iterators):
+            if iter._si is not i._si and iiter_cmp( self._smmap, iter._si, i._si ) == 0:
+                i._invalidate()
+
+        # remove item from the map.
+        mmap_iiter_erase( self._smmap, iter._si )
+
+        # invalidate last iterator pointing to the deleted location in the map.
+        iter._invalidate() 
+
+
+    def __del__(self):
+
+        # invalidate all iterators.
+        for i in list(self._iterators):
+            i._invalidate()
+        mmap_delete(self._smmap)
+
+    def get(self, key, default=None):
+        """
+           @return list containing values corresponding to specified key or
+               return a single-element list containing 'default'
+               if the key is not found.  If 'default' is None then the
+               empty list is returned when the key is not found.
+
+           >>> from CMultiMap import *
+           >>> m = CMultiMap()
+           >>> m[5] = 'a'
+           >>> m.get(5)
+           ['a']
+           >>> m[5] = 'b'
+           >>> m.get(5)
+           ['a', 'b']
+           >>> m.get(6)
+           []
+           >>> m.get(6,'c')
+           ['c']
+ 
+           """
+        if self.has_key(key):
+            return self[key]
+        if default is None:
+            return []
+        return [default]
+
+    def keys(self):
+        """
+           >>> from CMultiMap import *
+           >>> m = CMultiMap()
+           >>> m[4.0] = 7
+           >>> m[6.0] = 3
+           >>> [int(x) for x in m.keys()]  # m.keys() but guaranteed integers.
+           [4, 6]
+           
+        """
+        k = []
+        for key in self:
+            k.append(key)
+        return k
+    
+    def values(self):
+        """
+           >>> from CMultiMap import CMultiMap
+           >>> m = CMultiMap()
+           >>> m[4.0] = 7
+           >>> m[6.0] = 3
+           >>> m.values()
+           [7, 3]
+           
+        """
+        i = self.itervalues()
+        v = []
+        try:
+            while True:
+                v.append(i.next())
+        except StopIteration:
+            pass
+        return v
+        
+
+    def items(self):
+        """
+           >>> from CMultiMap import CMultiMap
+           >>> m = CMultiMap()
+           >>> m[4.0] = 7
+           >>> m[6.0] = 3
+           >>> [(int(x[0]),int(x[1])) for x in m.items()]
+           [(4, 7), (6, 3)]
+           
+        """
+    
+        i = self.iteritems()
+        itms = []
+        try:
+            while True:
+                itms.append(i.next())
+        except StopIteration:
+            pass
+        
+        return itms
+    
+    def has_key(self, key):
+        """
+           >>> from CMultiMap import CMultiMap
+           >>> m = CMultiMap()
+           >>> m[4.0] = 7
+           >>> if m.has_key(4): print 'ok'
+           ...
+           ok
+           >>> if not m.has_key(7): print 'ok'
+           ...
+           ok
+           
+        """
+        try:
+            mmap_find(self._smmap, key)
+        except KeyError:
+            return False
+        return True
+
+    def clear(self):
+        """delete all entries
+
+           >>> from CMultiMap import CMultiMap
+           >>> m = CMultiMap()
+           >>> m[4] = 7
+           >>> m.clear()
+           >>> print len(m)
+           0
+           
+        """
+
+        self.__del__()
+        self._smmap = mmap_constructor()
+
+    def copy(self):
+        """return shallow copy"""
+        return CMultiMap(self)
+
+    def lower_bound(self,key):
+        """
+         Finds smallest key equal to or above the lower bound.
+
+         Takes O(log n) time.
+
+         @param x: Key of (key, value) pair to be located.
+         @return: Key Iterator pointing to first item equal to or greater
+                  than key, or end() if no such item exists.
+
+           >>> from CMultiMap import CMultiMap
+           >>> m = CMultiMap()
+           >>> m[10] = 'foo'
+           >>> m[15] = 'bar'
+           >>> i = m.lower_bound(11)   # iterator.
+           >>> int(i.key())
+           15
+           >>> i.value()
+           'bar'
+           
+        Edge cases:
+           >>> from CMultiMap import CMultiMap
+           >>> m = CMultiMap()
+           >>> i = m.lower_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+
+           >>> m[10] = 'foo'
+           >>> i = m.lower_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+           >>> i = m.lower_bound(9)
+           >>> if i == m.begin(): print 'ok'
+           ...
+           ok
+
+        """
+        return CMultiMap.KeyIterator(self, mmap_lower_bound( self._smmap, key ))
+
+
+    def upper_bound(self, key):
+        """
+         Finds largest key equal to or below the upper bound.  In keeping
+         with the [begin,end) convention, the returned iterator
+         actually points to the key one above the upper bound. 
+
+         Takes O(log n) time.
+
+         @param  x:  Key of (key, value) pair to be located.
+         @return:  Iterator pointing to first element equal to or greater than
+                  key, or end() if no such item exists.
+
+           >>> from CMultiMap import CMultiMap
+           >>> m = CMultiMap()
+           >>> m[10] = 'foo'
+           >>> m[15] = 'bar'
+           >>> m[17] = 'choo'
+           >>> i = m.upper_bound(11)   # iterator.
+           >>> i.value()
+           'bar'
+
+         Edge cases:
+           >>> from CMultiMap import CMultiMap
+           >>> m = CMultiMap()
+           >>> i = m.upper_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+           >>> m[10] = 'foo'
+           >>> i = m.upper_bound(9)
+           >>> i.value()
+           'foo'
+           >>> i = m.upper_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+
+        """
+        return CMultiMap.KeyIterator(self, mmap_upper_bound( self._smmap, key ))
+
+    def find(self,key):
+        """
+          Finds the first item with matching key and returns a KeyIterator
+          pointing at the item.  If no match is found then returns end().
+     
+          Takes O(log n) time.
+     
+            >>> from CMultiMap import CMultiMap
+            >>> m = CMultiMap()
+            >>> i = m.find(10)
+            >>> if i == m.end(): print 'ok'
+            ...
+            ok
+            >>> m[10] = 'foo'
+            >>> i = m.find(10)
+            >>> int(i.key())
+            10
+            >>> i.value()
+            'foo'
+
+        """
+        return CMultiMap.KeyIterator(self, mmap_find_iiter( self._smmap, key ))
+
+    def update_key( self, iter, key ):
+        """
+          Modifies the key of the item referenced by iter.  If the
+          key change is small enough that no reordering occurs then
+          this takes amortized O(1) time.  If a reordering occurs then
+          this takes O(log n).
+
+          WARNING!!! The passed iterator MUST be assumed to be invalid
+          upon return.  Any further operation on the passed iterator other than 
+          deallocation results in a RuntimeError exception.
+
+          Typical use:
+            >>> from CMultiMap import CMultiMap
+            >>> m = CMultiMap()
+            >>> m[10] = 'foo'
+            >>> m[8] = 'bar'
+            >>> i = m.find(10)
+            >>> m.update_key(i,7)   # i is assumed to be invalid upon return.
+            >>> del i
+            >>> [(int(x[0]),x[1]) for x in m.items()]  # reordering occurred.
+            [(7, 'foo'), (8, 'bar')]
+            >>> i = m.find(8)
+            >>> m.update_key(i,9)   # no reordering.
+            >>> del i
+            >>> [(int(x[0]),x[1]) for x in m.items()]
+            [(7, 'foo'), (9, 'bar')]
+
+          Edge cases:          
+            >>> i = m.find(7)
+            >>> i.value()
+            'foo'
+            >>> m.update_key(i,9)  # update to key already in the mmap.
+            >>> m[7]
+            []
+            >>> m[9]
+            ['foo', 'bar']
+            >>> i = m.iterkeys()
+            >>> try:                 # updating an iter pointing at BEGIN.
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+            >>> i = m.end()
+            >>> try:                 # updating an iter pointing at end().
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+                        
+        """
+        assert isinstance(iter,CMultiMap._AbstractIterator)
+        if not iter._si:
+            raise RuntimeError( _("invalid iterator") )
+        if iter._si == BEGIN:
+            raise IndexError(_("Iterator does not point at key-value pair" ))
+        if self is not iter._mmap:
+            raise IndexError(_("Iterator points into a different CMultiMap."))
+        if mmap_iiter_at_end(self._smmap, iter._si):
+            raise IndexError( _("Cannot erase end() iterator.") )
+
+        mmap_iiter_update_key(self._smmap, iter._si, key)
+
+    def append(self, key, value):
+        """Performs an insertion with the hint that it probably should
+           go at the end.
+
+           Raises KeyError if the key is already in the mmap.
+
+             >>> from CMultiMap import CMultiMap
+             >>> m = CMultiMap()
+             >>> m.append(5.0,'foo')    # append to empty mmap.
+             >>> len(m)
+             1
+             >>> [int(x) for x in m.keys()] # see note (1)
+             [5]
+             >>> m.append(10.0, 'bar')  # append in-order
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(5, 'foo'), (10, 'bar')]
+             >>> m.append(3.0, 'coo')   # out-of-order.
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(3, 'coo'), (5, 'foo'), (10, 'bar')]
+             >>> m.append(10.0, 'blah') # append key already in mmap.
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(3, 'coo'), (5, 'foo'), (10, 'bar'), (10, 'blah')]
+             >>>
+
+             note (1): int(x[0]) is used because 5.0 can appear as either 5
+             or 5.0 depending on the version of python.
+           """
+        mmap_append(self._smmap,key,value)
+    
+
+class CIndexedMultiMap(CMultiMap):
+    """This is an ordered mmapping, exactly like CMultiMap except that it
+       provides a cross-index allowing average O(1) searches based on value.
+       This adds the constraint that values must be unique (multiple equal
+       keys can still be exist in the map).
+
+         Item insertion:  O(log n)       append, __setitem__
+         Item deletion:   O(log n)    
+         Key search:      O(log n)       __getitem__, get, find, __contains__
+         Value search:    average O(1)  as per dict
+         Iteration step:  amortized O(1), worst-case O(log n)
+         Memory:          O(n)
+
+
+       The hash table increases the factor in the
+       O(n) memory cost of the Map by a constant
+    """
+    def __init__(self, dict={} ):
+        CMultiMap.__init__(self,dict)
+        self._value_index = {}   # cross-index. mmaps value->iterator.
+
+    def __setitem__(self, key, value):
+        """
+            >>> from CMultiMap import *
+            >>> m = CIndexedMultiMap()
+            >>> m[6] = 'bar'
+            >>> m[6]
+            ['bar']
+            >>> int(m.get_key_by_value('bar'))
+            6
+            >>> try:
+            ...    m[7] = 'bar'      # values must be unique!
+            ... except ValueError:
+            ...    print 'value error'
+            value error
+            >>> m[6] = 'foo'
+            >>> m[6]
+            ['bar', 'foo']
+            >>> try:
+            ...     m[7] = 'bar'     # 2 values to 1 key. Values still unique!
+            ... except ValueError:
+            ...     print 'value error'
+            value error
+            >>> m[7]
+            []
+            >>> int(m.get_key_by_value('bar'))
+            6
+    
+        """
+        assert type(key) == int or type(key) == float
+        if self._value_index.has_key(value) and \
+           iiter_key(self._value_index[value]) != key:
+            raise ValueError( _("Value %s already exists.  Values must be "
+                "unique.") % str(value) )
+        
+        si = mmap_insert_iiter(self._smmap,key,value) # si points where insert
+                                                   # should occur whether 
+                                                   # insert succeeded or not.
+                                                   # si == "swig iterator"
+        sival = iiter_value(si)
+        if sival != value:          # if insert failed because k already exists
+            iiter_set(si,value)      # then force set.
+            self._value_index[value] = si
+            viter = self._value_index[sival]
+            iiter_delete(viter)     # remove old value from index
+            del self._value_index[sival]  
+        else:                      # else insert succeeded so update index.
+            self._value_index[value] = si
+
+    def __delitem__(self, key):
+        """
+            >>> from CMultiMap import CIndexedMultiMap
+            >>> m = CIndexedMultiMap()
+            >>> m[6] = 'bar'
+            >>> m[6]
+            ['bar']
+            >>> int(m.get_key_by_value('bar'))
+            6
+            >>> del m[6]
+            >>> if m.get_key_by_value('bar'):
+            ...     print 'found'
+            ... else:
+            ...     print 'not found.'
+            not found.
+
+        """
+        i = mmap_find_iiter( self._smmap, key )
+        if mmap_iiter_at_end( self._smmap, i ):
+            iiter_delete(i)
+            raise KeyError(key)
+        else:
+            value = iiter_value(i)
+            for i in list(self._iterators):
+                if iiter_cmp( self._smmap, i._si, iter._si ) == 0:
+                    i._invalidate()
+            mmap_iiter_erase( self._smmap, i )
+            viter = self._value_index[value]
+            iiter_delete(i)
+            iiter_delete( viter )
+            del self._value_index[value]
+        assert mmap_size(self._smmap) == len(self._value_index)
+
+    def has_value(self, value):
+       return self._value_index.has_key(value)
+
+    def get_key_by_value(self, value):
+        """Returns the key cross-indexed from the passed unique value, or
+           returns None if the value is not in the mmap."""
+        si = self._value_index.get(value)  # si == "swig iterator"
+        if si == None: return None
+        return iiter_key(si)
+
+    def append( self, key, value ):
+        """See CMultiMap.append
+
+             >>> from CMultiMap import CIndexedMultiMap
+             >>> m = CIndexedMultiMap()
+             >>> m.append(5,'foo')
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(5, 'foo')]
+             >>> m.append(10, 'bar')
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(5, 'foo'), (10, 'bar')]
+             >>> m.append(3, 'coo')   # out-of-order.
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(3, 'coo'), (5, 'foo'), (10, 'bar')]
+             >>> int(m.get_key_by_value( 'bar' ))
+             10
+             >>> m.append(10, 'blah') # append key already in mmap.
+             >>> [(int(x[0]),x[1]) for x in m.items()]
+             [(3, 'coo'), (5, 'foo'), (10, 'bar'), (10, 'blah')]
+             >>> try:
+             ...     m.append(10, 'coo') # append value already in mmap.
+             ... except ValueError:
+             ...     print 'ok'
+             ...
+             ok
+
+        """
+        if self._value_index.has_key(value) and \
+           iiter_key(self._value_index[value]) != key:
+            raise ValueError(_("Value %s already exists and value must be "
+                "unique.") % str(value) )
+        
+        si = mmap_append_iiter(self._smmap,key,value)
+        if iiter_value(si) != value:
+            iiter_delete(si)
+            raise KeyError(key)
+        self._value_index[value] = si
+        
+
+    def find_key_by_value(self, value):
+        """Returns a key iterator cross-indexed from the passed unique value
+           or end() if no value found.
+
+           >>> from CMultiMap import CIndexedMultiMap
+           >>> m = CIndexedMultiMap()
+           >>> m[6] = 'abc'
+           >>> i = m.find_key_by_value('abc')
+           >>> int(i.key())
+           6
+           >>> i = m.find_key_by_value('xyz')
+           >>> if i == m.end(): print 'i points at end()'
+           i points at end()
+
+        """
+        si = self._value_index.get(value)  # si == "swig iterator."
+        if si != None:
+            si = iiter_copy(si); # copy else operations like increment on the
+                                # KeyIterator would modify the value index.
+        return CMultiMap.KeyIterator(self,si)
+
+    def copy(self):
+        """return shallow copy"""
+        return CIndexedMultiMap(self)
+
+    def update_key( self, iter, key ):
+        """
+          see CMultiMap.update_key.
+          
+          WARNING!! You MUST assume that the passed iterator is invalidated
+          upon return.
+
+          Typical use:
+            >>> from CMultiMap import CIndexedMultiMap
+            >>> m = CIndexedMultiMap()
+            >>> m[10] = 'foo'
+            >>> m[8] = 'bar'
+            >>> i = m.find(10)
+            >>> m.update_key(i,7)   # i is assumed to be invalid upon return.
+            >>> del i
+            >>> int(m.get_key_by_value('foo'))
+            7
+            >>> [(int(x[0]),x[1]) for x in m.items()]    # reordering occurred.
+            [(7, 'foo'), (8, 'bar')]
+            >>> i = m.find(8)
+            >>> m.update_key(i,9)   # no reordering.
+            >>> del i
+            >>> [(int(x[0]),x[1]) for x in m.items()]
+            [(7, 'foo'), (9, 'bar')]
+
+          Edge cases:
+            >>> i = m.find(7)
+            >>> i.value()
+            'foo'
+            >>> m.update_key(i,9)
+            >>> m[7]
+            []
+            >>> m[9]
+            ['foo', 'bar']
+            >>> int(m.get_key_by_value('foo'))
+            9
+            >>> i = m.iterkeys()
+            >>> try:                 # updating an iter pointing at BEGIN.
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+            >>> i = m.end()
+            >>> try:                # updating an iter pointing at end().
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+            
+        """
+        if not iter._si:
+            raise RuntimeError( _("invalid iterator") )
+        if iter._si == BEGIN:
+            raise IndexError(_("Iterator does not point at key-value pair" ))
+        if self is not iter._mmap:
+            raise IndexError(_("Iterator points into a different "
+                               "CIndexedMultiMap."))
+        if mmap_iiter_at_end(self._smmap, iter._si):
+            raise IndexError( _("Cannot update end() iterator.") )
+
+        si = mmap_iiter_update_key_iiter(self._smmap, iter._si, key)
+                                   # raises KeyError if key already in mmap.
+
+        if si != iter._si:         # if mmap is reordered...
+            value = iter.value();
+            val_si = self._value_index[value]
+            iiter_delete(val_si)
+            self._value_index[value] = si
+            
+    def erase(self, iter):
+        """Remove item pointed to by the iterator.  Iterator is immediately
+           invalidated after the deletion completes."""
+        value = iter.value()
+        CMultiMap.erase(self,iter)
+        del self._value_index[value]
+
+if __name__ == "__main__":
+
+    import sys, doctest
+
+    ############
+    # UNIT TESTS
+    print "Testing module"
+    doctest.testmod(sys.modules[__name__])
+
+    if LEAK_TEST:
+        # Now test for memory leaks.
+        print "testing for memory leaks.  Loop at top to see if process memory allocation grows."
+        print "CTRL-C to stop test."
+        # Run > top
+        # Does memory consumption for the process continuously increase? Yes == leak.
+        m = CMultiMap()
+        
+        # insert and delete repeatedly.
+        i = 0
+        import gc
+        class X:
+            x = range(1000)  # something moderately big.
+        
+        #while True:
+        #    i += 1
+        #    mmap_insert(m._smmap,10,X())
+        #    it = mmap_find_iiter( m._smmap, 10 )
+        #    mmap_iiter_erase( m._smmap, it )
+        #    iiter_delete(it)
+        #    assert len(m) == 0
+        #    assert mmap_size(m._smmap) == 0
+        #    if i % 100 == 0:
+        #      gc.collect()
+            
+        while True:
+            i += 1
+            m[10] = X()
+            del m[10]
+            assert len(m) == 0
+            if i % 100 == 0:
+              gc.collect()
+       

+ 333 - 0
html/bin/clients/mainline/BTL/ConnectionRateLimitReactor.py

@@ -0,0 +1,333 @@
+# usage:
+#
+# from twisted.internet import reactor
+# from ConnectionRateLimitReactor import connectionRateLimitReactor
+# connectionRateLimitReactor(reactor, max_incomplete=10)
+#
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# by Greg Hazel
+
+import random
+import threading
+from twisted.python import failure
+from twisted.python import threadable
+from twisted.internet import error, address, abstract
+from BTL.circular_list import CircularList
+from BTL.Lists import QList
+from BTL.decorate import decorate_func
+
+debug = False
+
+
+class HookedFactory(object):
+    
+    def __init__(self, connector, factory):
+        self.connector = connector
+        self.factory = factory
+
+    def clientConnectionFailed(self, connector, reason):
+        if self.connector._started:
+            self.connector.complete()
+        return self.factory.clientConnectionFailed(connector, reason)
+
+    def buildProtocol(self, addr):
+        p = self.factory.buildProtocol(addr)
+        p.connectionMade = decorate_func(self.connector.complete,
+                                         p.connectionMade)
+        return p
+
+    def __getattr__(self, attr):
+        return getattr(self.factory, attr)
+    
+
+class IRobotConnector(object):
+    # I did this to be nice, but zope sucks.
+    ##implements(interfaces.IConnector)
+
+    def __init__(self, reactor, protocol, host, port, factory, owner, urgent,
+                 *a, **kw):
+        self.reactor = reactor
+        self.protocol = protocol
+        assert self.protocol in ('INET', 'SSL')
+        self.host = host
+        self.port = port
+        self.owner = owner
+        self.urgent = urgent
+        self.a = a
+        self.kw = kw
+        self.connector = None
+        self._started = False
+        self.preempted = False
+
+        self.factory = HookedFactory(self, factory)
+
+    def started(self):
+        if self._started:
+            raise ValueError("Connector is already started!")
+        self._started = True
+        self.reactor.add_pending_connection(self.host, self)
+        
+    def disconnect(self):
+        if self._started:
+            return self.connector.disconnect()
+        return self.stopConnecting()
+
+    def _cleanup(self):
+        if hasattr(self, 'a'):
+            del self.a
+        if hasattr(self, 'kw'):
+            del self.kw
+        if hasattr(self, 'factory'):
+            del self.factory
+        if hasattr(self, 'connector'):
+            del self.connector
+        
+    def stopConnecting(self):
+        if self._started:
+            self.connector.stopConnecting()
+            self._cleanup()
+            return            
+        self.reactor.drop_postponed(self)
+        # for accuracy
+        self.factory.startedConnecting(self)
+        abort = failure.Failure(error.UserError(string="Connection preempted"))
+        self.factory.clientConnectionFailed(self, abort)
+        self._cleanup()
+            
+    def connect(self):
+        if debug: print 'connecting', self.host, self.port
+        self.started()
+        try:
+            if self.protocol == 'SSL':
+                self.connector = self.reactor.old_connectSSL(self.host,
+                                                             self.port,
+                                                             self.factory,
+                                                             *self.a, **self.kw)
+            else:
+                self.connector = self.reactor.old_connectTCP(self.host,
+                                                             self.port,
+                                                             self.factory,
+                                                             *self.a, **self.kw)
+            # because other callbacks use this one
+            self.connector.wasPreempted = self.wasPreempted
+        except:
+            # make sure failures get removed before we raise
+            self.complete()
+            raise
+        # if connect is re-called on the connector, we want to restart
+        self.connector.connect = decorate_func(self.started,
+                                               self.connector.connect)
+        return self
+
+    def wasPreempted(self):
+        return self.preempted
+
+    def complete(self):
+        if not self._started:
+            return
+        self._started = False
+        self.reactor._remove_pending_connection(self.host, self)
+        self._cleanup()
+
+    def getDestination(self):
+        return address.IPv4Address('TCP', self.host, self.port, self.protocol)
+
+
+class Postponed(CircularList):
+
+    def __init__(self):
+        CircularList.__init__(self)
+        self.it = iter(self)
+        self.preempt = QList()
+        self.cm_to_list = {}
+
+    def __len__(self):
+        l = 0
+        for k, v in self.cm_to_list.iteritems():
+            l += len(v)
+        l += len(self.preempt)
+        return l
+
+    def append_preempt(self, c):
+        return self.preempt.append(c)
+    
+    def add_connection(self, keyable, c):
+        if keyable not in self.cm_to_list:
+            self.cm_to_list[keyable] = QList()
+            self.prepend(keyable)
+        self.cm_to_list[keyable].append(c)
+
+    def pop_connection(self):
+        if self.preempt:
+            return self.preempt.popleft()
+        keyable = self.it.next()
+        l = self.cm_to_list[keyable]
+        c = l.popleft()
+        if len(l) == 0:
+            self.remove(keyable)
+            del self.cm_to_list[keyable]
+        return c
+
+    def remove_connection(self, keyable, c):
+        # hmmm
+        if c.urgent:
+            self.preempt.remove(c)
+            return
+        l = self.cm_to_list[keyable]
+        l.remove(c)
+        if len(l) == 0:
+            self.remove(keyable)
+            del self.cm_to_list[keyable]
+
+
+class ConnectionRateLimiter(object):
+    
+    def __init__(self, reactor, max_incomplete):
+        self.reactor = reactor
+        self.postponed = Postponed()
+        self.max_incomplete = max_incomplete
+        # this can go away when urllib does
+        self.halfopen_hosts_lock = threading.RLock()
+        self.halfopen_hosts = {}
+        self.old_connectTCP = self.reactor.connectTCP
+        self.old_connectSSL = self.reactor.connectSSL
+
+        if debug:
+            from twisted.internet import task
+            def p():
+                print len(self.postponed), [ (k, len(v)) for k, v in self.halfopen_hosts.iteritems() ]
+                assert len(self.halfopen_hosts) <= self.max_incomplete
+            task.LoopingCall(p).start(1)   
+
+    # safe from any thread  
+    def add_pending_connection(self, host, connector=None):
+        if debug: print 'adding', host, 'IOthread', threadable.isInIOThread()
+        self.halfopen_hosts_lock.acquire()
+        self.halfopen_hosts.setdefault(host, []).append(connector)
+        self.halfopen_hosts_lock.release()
+
+    # thread footwork, because _remove actually starts new connections
+    def remove_pending_connection(self, host, connector=None):
+        if not threadable.isInIOThread():
+            self.reactor.callFromThread(self._remove_pending_connection,
+                                        host, connector)
+        else:
+            self._remove_pending_connection(host, connector)
+
+    def _remove_pending_connection(self, host, connector=None):
+        if debug: print 'removing', host
+        self.halfopen_hosts_lock.acquire()
+        self.halfopen_hosts[host].remove(connector)
+        if len(self.halfopen_hosts[host]) == 0:
+            del self.halfopen_hosts[host]
+            self._push_new_connections()
+        self.halfopen_hosts_lock.release()
+
+    def _push_new_connections(self):
+        if not self.postponed:
+            return
+        c = self.postponed.pop_connection()
+        self._connect(c)
+
+    def drop_postponed(self, c):
+        self.postponed.remove_connection(c.owner, c)
+
+    def _preempt_for(self, c):
+        if debug: print '\npreempting for', c.host, c.port, '\n'
+        self.postponed.append_preempt(c)
+            
+        sorted = []
+
+        for connectors in self.halfopen_hosts.itervalues():
+
+            # drop hosts with connectors that have no handle (urllib)
+            # drop hosts with any urgent connectors
+            can_preempt = True
+            for s in connectors:
+                if not s or s.urgent:
+                    can_preempt = False
+                    break
+            if not can_preempt:
+                continue
+            
+            sorted.append((len(connectors), connectors))
+
+        if len(sorted) == 0:
+            # give up. no hosts can be interrupted
+            return
+
+        # find the host with least connectors to interrupt            
+        sorted.sort()
+        connectors = sorted[0][1]
+                
+        for s in connectors:
+            s.preempted = True
+            if debug: print 'preempting', s.host, s.port
+            s.disconnect()
+        
+    def _resolve_then_connect(self, c):
+        if abstract.isIPAddress(c.host):
+            self._connect(c)
+            return c
+        df = self.reactor.resolve(c.host)
+        if debug: print 'resolving', c.host
+        def set_host(ip):
+            if debug: print 'resolved', c.host, ip
+            c.host = ip
+            self._connect(c)
+        def error(f):
+            # too lazy to figure out how to fail properly, so just connect
+            self._connect(c)
+        df.addCallbacks(set_host, error)
+        return c
+
+    def _connect(self, c):
+        # the XP connection rate limiting is unique at the IP level
+        if (len(self.halfopen_hosts) >= self.max_incomplete and
+            c.host not in self.halfopen_hosts):
+            if debug: print 'postponing', c.host, c.port
+            if c.urgent:
+                self._preempt_for(c)
+            else:
+                self.postponed.add_connection(c.owner, c)
+        else:
+            c.connect()
+        return c
+
+    def connectTCP(self, host, port, factory,
+                   timeout=30, bindAddress=None, owner=None, urgent=True):
+        c = IRobotConnector(self, 'INET', host, port, factory, owner, urgent,
+                            timeout, bindAddress)
+        self._resolve_then_connect(c)
+        return c
+
+    def connectSSL(self, host, port, factory, contextFactory,
+                   timeout=30, bindAddress=None, owner=None, urgent=True):
+        c = IRobotConnector(self, 'SSL', host, port, factory, owner, urgent,
+                            contextFactory, timeout, bindAddress)
+        self._resolve_then_connect(c)
+        return c
+
+
+
+def connectionRateLimitReactor(reactor, max_incomplete):
+    if (hasattr(reactor, 'limiter') and
+        reactor.limiter.max_incomplete != max_incomplete):
+        print 'Changing max_incomplete for ConnectionRateLimiterReactor!'
+        reactor.limiter.max_incomplete = max_incomplete
+    else:    
+        limiter = ConnectionRateLimiter(reactor, max_incomplete)
+        reactor.connectTCP = limiter.connectTCP
+        reactor.connectSSL = limiter.connectSSL
+        reactor.add_pending_connection = limiter.add_pending_connection
+        reactor.remove_pending_connection = limiter.remove_pending_connection
+        reactor.limiter = limiter

+ 415 - 0
html/bin/clients/mainline/BTL/ConvertedMetainfo.py

@@ -0,0 +1,415 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Uoti Urpala
+
+# required for Python 2.2
+from __future__ import generators
+
+import os
+import sys
+import logging
+import urlparse
+from BTL.hash import sha
+import socket
+
+#debug=True
+global_logger = logging.getLogger("BTL.ConvertedMetainfo")
+
+from BTL.translation import _
+from BTL.obsoletepythonsupport import *
+
+from BTL.bencode import bencode
+from BTL import btformats
+from BTL import BTFailure, InfoHashType
+from BTL.platform import get_filesystem_encoding, encode_for_filesystem
+from BTL.defer import ThreadedDeferred
+
+WINDOWS_UNSUPPORTED_CHARS = u'"*/:<>?\|'
+windows_translate = {}
+for x in WINDOWS_UNSUPPORTED_CHARS:
+    windows_translate[ord(x)] = u'-'
+
+noncharacter_translate = {}
+for i in xrange(0xD800, 0xE000):
+    noncharacter_translate[i] = ord('-')
+for i in xrange(0xFDD0, 0xFDF0):
+    noncharacter_translate[i] = ord('-')
+for i in (0xFFFE, 0xFFFF):
+    noncharacter_translate[i] = ord('-')
+
+del x, i
+
+def generate_names(name, is_dir):
+    if is_dir:
+        prefix = name + '.'
+        suffix = ''
+    else:
+        pos = name.rfind('.')
+        if pos == -1:
+            pos = len(name)
+        prefix = name[:pos] + '.'
+        suffix = name[pos:]
+    i = 0
+    while True:
+        yield prefix + str(i) + suffix
+        i += 1
+
+class ConvertedMetainfo(object):
+
+    def __init__(self, metainfo):
+        """metainfo is a dict.  When read from a metainfo (i.e.,
+           .torrent file), the file must first be bdecoded before
+           being passed to ConvertedMetainfo."""
+        self.bad_torrent_wrongfield = False
+        self.bad_torrent_unsolvable = False
+        self.bad_torrent_noncharacter = False
+        self.bad_conversion = False
+        self.bad_windows = False
+        self.bad_path = False
+        self.reported_errors = False
+
+        # All of the following values should be considered READONLY.
+        # Modifications to the metainfo that should be written should
+        # occur to the underlying metainfo dict directly.
+        self.is_batch = False
+        self.orig_files = None
+        self.files_fs = None
+        self.total_bytes = 0
+        self.sizes = []
+        self.comment = None
+        self.title = None          # descriptive title text for whole torrent
+        self.creation_date = None
+        self.metainfo = metainfo
+        self.encoding = None
+        self.caches = None
+
+        btformats.check_message(metainfo, check_paths=False)
+        info = metainfo['info']
+        self.is_private = info.has_key("private") and info['private']
+        if 'encoding' in metainfo:
+            self.encoding = metainfo['encoding']
+        elif 'codepage' in metainfo:
+            self.encoding = 'cp%s' % metainfo['codepage']
+        if self.encoding is not None:
+            try:
+                for s in u'this is a test', u'these should also work in any encoding: 0123456789\0':
+                    assert s.encode(self.encoding).decode(self.encoding) == s
+            except:
+                self.encoding = 'iso-8859-1'
+                self.bad_torrent_unsolvable = True
+        if info.has_key('length'):
+            self.total_bytes = info['length']
+            self.sizes.append(self.total_bytes)
+            if info.has_key('content_type'):
+                self.content_type = info['content_type']
+            else:
+                self.content_type = None  # hasattr or None.  Which is better?
+        else:
+            self.is_batch = True
+            r = []
+            self.orig_files = []
+            self.sizes = []
+            self.content_types = []
+            i = 0
+            # info['files'] is a list of dicts containing keys:
+            # 'length', 'path', and 'content_type'.  The 'content_type'
+            # key is optional.
+            for f in info['files']:
+                l = f['length']
+                self.total_bytes += l
+                self.sizes.append(l)
+                self.content_types.append(f.get('content_type'))
+                path = self._get_attr(f, 'path')
+                if len(path[-1]) == 0:
+                    if l > 0:
+                        raise BTFailure(_("Bad file path component: ")+x)
+                    # BitComet makes .torrent files with directories
+                    # listed along with the files, which we don't support
+                    # yet, in part because some idiot interpreted this as
+                    # a bug in BitComet rather than a feature.
+                    path.pop(-1)
+
+                for x in path:
+                    if not btformats.allowed_path_re.match(x):
+                        raise BTFailure(_("Bad file path component: ")+x)
+
+                self.orig_files.append('/'.join(path))
+                k = []
+                for u in path:
+                    tf2 = self._to_fs_2(u)
+                    k.append((tf2, u))
+                r.append((k,i))
+                i += 1
+            # If two or more file/subdirectory names in the same directory
+            # would map to the same name after encoding conversions + Windows
+            # workarounds, change them. Files are changed as
+            # 'a.b.c'->'a.b.0.c', 'a.b.1.c' etc, directories or files without
+            # '.' as 'a'->'a.0', 'a.1' etc. If one of the multiple original
+            # names was a "clean" conversion, that one is always unchanged
+            # and the rest are adjusted.
+            r.sort()
+            self.files_fs = [None] * len(r)
+            prev = [None]
+            res = []
+            stack = [{}]
+            for x in r:
+                j = 0
+                x, i = x
+                while x[j] == prev[j]:
+                    j += 1
+                del res[j:]
+                del stack[j+1:]
+                name = x[j][0][1]
+                if name in stack[-1]:
+                    for name in generate_names(x[j][1], j != len(x) - 1):
+                        name = self._to_fs(name)
+                        if name not in stack[-1]:
+                            break
+                stack[-1][name] = None
+                res.append(name)
+                for j in xrange(j + 1, len(x)):
+                    name = x[j][0][1]
+                    stack.append({name: None})
+                    res.append(name)
+                self.files_fs[i] = os.path.join(*res)
+                prev = x
+
+        self.name = self._get_attr(info, 'name')
+        self.name_fs = self._to_fs(self.name)
+        self.piece_length = info['piece length']
+
+        self.announce = metainfo.get('announce')
+        self.announce_list = metainfo.get('announce-list')
+        if 'announce-list' not in metainfo and 'announce' not in metainfo:
+            self.is_trackerless = True
+        else:
+            self.is_trackerless = False
+
+        self.nodes = metainfo.get('nodes', [('router.bittorrent.com', 6881)])
+
+        self.title = metainfo.get('title')
+        self.comment = metainfo.get('comment')
+        self.creation_date = metainfo.get('creation date')
+        self.locale = metainfo.get('locale')
+
+        self.safe = metainfo.get('safe')
+
+        self.url_list = metainfo.get('url-list', [])
+        if not isinstance(self.url_list, list):
+            self.url_list = [self.url_list, ]
+
+        self.caches = metainfo.get('caches')
+
+        self.hashes = [info['pieces'][x:x+20] for x in xrange(0,
+            len(info['pieces']), 20)]
+        self.infohash = InfoHashType(sha(bencode(info)).digest())
+
+
+    def show_encoding_errors(self, errorfunc):
+        self.reported_errors = True
+        if self.bad_torrent_unsolvable:
+            errorfunc(logging.ERROR,
+                      _("This .torrent file has been created with a broken "
+                        "tool and has incorrectly encoded filenames. Some or "
+                        "all of the filenames may appear different from what "
+                        "the creator of the .torrent file intended."))
+        elif self.bad_torrent_noncharacter:
+            errorfunc(logging.ERROR,
+                      _("This .torrent file has been created with a broken "
+                        "tool and has bad character values that do not "
+                        "correspond to any real character. Some or all of the "
+                        "filenames may appear different from what the creator "
+                        "of the .torrent file intended."))
+        elif self.bad_torrent_wrongfield:
+            errorfunc(logging.ERROR,
+                      _("This .torrent file has been created with a broken "
+                        "tool and has incorrectly encoded filenames. The "
+                        "names used may still be correct."))
+        elif self.bad_conversion:
+            errorfunc(logging.WARNING,
+                      _('The character set used on the local filesystem ("%s") '
+                        'cannot represent all characters used in the '
+                        'filename(s) of this torrent. Filenames have been '
+                        'changed from the original.') % get_filesystem_encoding())
+        elif self.bad_windows:
+            errorfunc(logging.WARNING,
+                      _("The Windows filesystem cannot handle some "
+                        "characters used in the filename(s) of this torrent. "
+                        "Filenames have been changed from the original."))
+        elif self.bad_path:
+            errorfunc(logging.WARNING,
+                      _("This .torrent file has been created with a broken "
+                        "tool and has at least 1 file with an invalid file "
+                        "or directory name. However since all such files "
+                        "were marked as having length 0 those files are "
+                        "just ignored."))
+
+    # At least BitComet seems to make bad .torrent files that have
+    # fields in an unspecified non-utf8 encoding.  Some of those have separate
+    # 'field.utf-8' attributes.  Less broken .torrent files have an integer
+    # 'codepage' key or a string 'encoding' key at the root level.
+    def _get_attr(self, d, attrib):
+        def _decode(o, encoding):
+            if encoding is None:
+                encoding = 'utf8'
+            if isinstance(o, str):
+                try:
+                    s = o.decode(encoding)
+                except:
+                    self.bad_torrent_wrongfield = True
+                    s = o.decode(encoding, 'replace')
+                t = s.translate(noncharacter_translate)
+                if t != s:
+                    self.bad_torrent_noncharacter = True
+                return t
+            if isinstance(o, dict):
+                return dict([ (k, _decode(v, k.endswith('.utf-8') and None or encoding)) for k, v in o.iteritems() ])
+            if isinstance(o, list):
+                return [ _decode(i, encoding) for i in o ]
+            return o
+        # we prefer utf8 if we can find it. at least it declares its encoding
+        v = _decode(d.get(attrib + '.utf-8'), 'utf8')
+        if v is None:
+            v = _decode(d[attrib], self.encoding)
+        return v
+
+    def _fix_windows(self, name, t=windows_translate):
+        bad = False
+        r = name.translate(t)
+        # for some reason name cannot end with '.' or space
+        if r[-1] in '. ':
+            r = r + '-'
+        if r != name:
+            self.bad_windows = True
+            bad = True
+        return (r, bad)
+
+    def _to_fs(self, name):
+        return self._to_fs_2(name)[1]
+
+    def _to_fs_2(self, name):
+        if sys.platform.startswith('win'):
+            name, bad = self._fix_windows(name)
+
+        r, bad = encode_for_filesystem(name)
+        self.bad_conversion = bad
+
+        return (bad, r)
+
+
+    def to_data(self):
+        return bencode(self.metainfo)
+
+
+    def check_for_resume(self, path):
+        """
+        Determine whether this torrent was previously downloaded to
+        path.  Returns:
+
+        -1: STOP! gross mismatch of files
+         0: MAYBE a resume, maybe not
+         1: almost definitely a RESUME - file contents, sizes, and count match exactly
+        """
+        STOP   = -1
+        MAYBE  =  0
+        RESUME =  1
+
+        if self.is_batch != os.path.isdir(path):
+            return STOP
+
+        disk_files = {}
+        if self.is_batch:
+            metainfo_files = dict(zip(self.files_fs, self.sizes))
+            metainfo_dirs = set()
+            for f in self.files_fs:
+                metainfo_dirs.add(os.path.split(f)[0])
+
+            # BUG: do this in a thread, so it doesn't block the UI
+            for (dirname, dirs, files) in os.walk(path):
+                here = dirname[len(path)+1:]
+                for f in files:
+                    p = os.path.join(here, f)
+                    if p in metainfo_files:
+                        disk_files[p] = os.stat(os.path.join(dirname, f))[6]
+                        if disk_files[p] > metainfo_files[p]:
+                            # file on disk that's bigger than the
+                            # corresponding one in the torrent
+                            return STOP
+                    else:
+                        # file on disk that's not in the torrent
+                        return STOP
+                for i, d in enumerate(dirs):
+                    if d not in metainfo_dirs:
+                        # directory on disk that's not in the torrent
+                        return STOP
+
+        else:
+            if os.access(path, os.F_OK):
+                disk_files[self.name_fs] = os.stat(path)[6]
+            metainfo_files = {self.name_fs : self.sizes[0]}
+
+        if len(disk_files) == 0:
+            # no files on disk, definitely not a resume
+            return STOP
+
+        if set(disk_files.keys()) != set(metainfo_files.keys()):
+            # check files
+            if len(metainfo_files) > len(disk_files):
+                #file in the torrent that's not on disk
+                return MAYBE
+        else:
+            # check sizes
+            ret = RESUME
+            for f, s in disk_files.iteritems():
+
+                if disk_files[f] < metainfo_files[f]:
+                    # file on disk that's smaller than the
+                    # corresponding one in the torrent
+                    ret = MAYBE
+                else:
+                    # file sizes match exactly
+                    continue
+            return ret
+
+    def get_tracker_ips(self, wrap_task):
+        """Returns the list of tracker IP addresses or the empty list if the
+           torrent is trackerless.  This extracts the tracker ip addresses
+           from the urls in the announce or announce list."""
+        df = ThreadedDeferred(wrap_task, self._get_tracker_ips, daemon=True)
+        return df
+
+    def _get_tracker_ips(self):
+        if hasattr(self, "_tracker_ips"):     # cache result.
+            return self._tracker_ips
+
+        if self.announce is not None:
+            urls = [self.announce]
+        elif self.announce_list is not None:  # list of lists.
+            urls = []
+            for ulst in self.announce_list:
+                urls.extend(ulst)
+        else:  # trackerless
+            assert self.is_trackerless
+            return []
+
+        tracker_ports = [urlparse.urlparse(url)[1] for url in urls]
+        trackers = [tp.split(':')[0] for tp in tracker_ports]
+        self._tracker_ips = []
+        for t in trackers:
+            try:
+                ip_list = socket.gethostbyname_ex(t)[2]
+                self._tracker_ips.extend(ip_list)
+            except socket.gaierror:
+                global_logger.error( _("Cannot find tracker with name %s") % t )
+        return self._tracker_ips
+
+
+

+ 56 - 0
html/bin/clients/mainline/BTL/CurrentRateMeasure.py

@@ -0,0 +1,56 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from BTL.platform import bttime
+
+
+class CurrentRateMeasure(object):
+
+    def __init__(self, max_rate_period, fudge=5):
+        self.max_rate_period = max_rate_period
+        self.ratesince = bttime() - fudge
+        self.last = self.ratesince
+        self.rate = 0.0
+        self.total = 0
+        self.when_next_expected = bttime() + fudge
+
+    def add_amount(self, amount):
+        """ add number of bytes received """
+        self.total += amount
+        t = bttime()
+        if t < self.when_next_expected and amount == 0:
+            return self.rate
+        self.rate = (self.rate * (self.last - self.ratesince) +
+                     amount) / (t - self.ratesince)
+        self.last = t
+        self.ratesince = max(self.ratesince, t - self.max_rate_period)
+        self.when_next_expected = t + min((amount / max(self.rate, 0.0001)), 5)
+
+    def get_rate(self):
+        """ returns bytes per second """
+        self.add_amount(0)
+        return self.rate
+
+    def get_rate_noupdate(self):
+        """ returns bytes per second """
+        return self.rate
+
+    def time_until_rate(self, newrate):
+        if self.rate <= newrate:
+            return 0
+        t = bttime() - self.ratesince
+        # as long as the newrate is lower than rate, we wait
+        # longer before throttling.
+        return ((self.rate * t) / newrate) - t
+
+    def get_total(self):
+        return self.total

+ 302 - 0
html/bin/clients/mainline/BTL/DictWithLists.py

@@ -0,0 +1,302 @@
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# These are some handy dict types:
+#
+# DictWithLists:
+#   acts like a dict, but adding a key,value appends value to a list at that key
+#   getting a value at a key returns the first value in the list
+#   a key is only removed when the list is empty
+#
+# OrderedDict:
+#  just like a dict, but d.keys() is in insertion order
+#
+# OrderedDictWithLists:
+#  a combination of the two concepts that keeps lists at key locations in
+#  insertion order
+#
+# by Greg Hazel
+# with code from David Benjamin and contributers
+
+from BTL.Lists import QList
+from BTL.obsoletepythonsupport import set
+
+class ReallyIterableDict(dict):
+    
+    # third level takes advantage of second level definitions
+    def iteritems(self):
+        for k in self:
+            yield (k, self[k])
+    def iterkeys(self):
+        return self.__iter__()
+
+    # fourth level uses definitions from lower levels
+    def itervalues(self):
+        for _, v in self.iteritems():
+            yield v
+    def values(self):
+        return [v for _, v in self.iteritems()]
+    def items(self):
+        return list(self.iteritems())
+    
+class DictWithLists(ReallyIterableDict):
+
+    def __init__(self, d = None, parent = ReallyIterableDict):
+        self.parent = parent
+        # python dict() can't take None
+        if d:
+            self.parent.__init__(self, d)
+        else:
+            self.parent.__init__(self)            
+
+    def popitem(self):
+        try:
+            key = self.keys()[0]
+        except IndexError:
+            raise KeyError('popitem(): dictionary is empty')
+        return (key, self.pop(key))
+
+    def pop(self, key, *args):
+        if key not in self and len(args) > 0:
+            return args[0]
+
+        l = self[key]
+        data = l.popleft()
+
+        # so we don't leak blank lists
+        if len(l) == 0:
+            self.parent.__delitem__(self, key)
+
+        return data
+    pop_from_row = pop
+
+    def get_from_row(self, key):
+        return self[key][0]
+            
+    def getrow(self, key):
+        return self[key]
+
+    def poprow(self, key):
+        return self.parent.pop(self, key)
+
+    def setrow(self, key, l):
+        if len(l) == 0:
+            return
+        self.parent.__setitem__(self, key, l)
+        
+    def push(self, key, value):
+        # a little footwork because the QList constructor is slow
+        if key not in self:
+            v = QList([value])
+            self.parent.__setitem__(self, key, v)
+        else:
+            self[key].append(value)
+    push_to_row = push
+
+    def keys(self):
+        return self.parent.keys(self)
+
+    def total_length(self):
+        t = 0
+        for k in self.iterkeys():
+            t += len(self.getrow(k))
+        return t
+
+
+class DictWithInts(dict):
+
+    def add(self, value):
+        self.setdefault(value, 0)
+        self[value] += 1
+
+    def remove(self, value):
+        if self[value] == 1:
+            del self[value]
+        else:
+            self[value] -= 1
+
+
+class DictWithSets(DictWithLists):
+
+    def pop(self, key, *args):
+        if key not in self and len(args) > 0:
+            return args[0]
+
+        l = self[key]
+        data = l.pop()
+
+        # so we don't leak blank sets
+        if len(l) == 0:
+            self.parent.__delitem__(self, key)
+
+        return data
+    pop_from_row = pop
+                
+    def push(self, key, value):
+        if key not in self:
+            v = set([value])
+            self.parent.__setitem__(self, key, v)
+        else:
+            self[key].add(value)
+    push_to_row = push
+
+    def remove_fom_row(self, key, value):
+        l = self[key]
+        l.remove(value)
+
+        # so we don't leak blank sets
+        if len(l) == 0:
+            self.parent.__delitem__(self, key)
+        
+
+# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
+class OrderedDict(ReallyIterableDict):
+    def __init__(self, d = None):
+        self._keys = []
+        # python dict() can't take None
+        if d:
+            ReallyIterableDict.__init__(self, dict)
+        else:
+            ReallyIterableDict.__init__(self)
+
+    def __delitem__(self, key):
+        ReallyIterableDict.__delitem__(self, key)
+        self._keys.remove(key)
+
+    def __setitem__(self, key, item):
+        ReallyIterableDict.__setitem__(self, key, item)
+        if key not in self._keys:
+            self._keys.append(key)
+
+    def clear(self):
+        ReallyIterableDict.clear(self)
+        self._keys = []
+
+    def copy(self):
+        newInstance = OrderedDict()
+        newInstance.update(self)
+        return newInstance
+
+    def items(self):
+        return zip(self._keys, self.values())
+
+    def keys(self):
+        return self._keys[:]
+
+    def __iter__(self):
+        return iter(self._keys)
+
+    def pop(self, key):
+        val = ReallyIterableDict.pop(self, key)
+        self._keys.remove(key)
+        return val
+
+    def popitem(self):
+        try:
+            key = self._keys[0]
+        except IndexError:
+            raise KeyError('dictionary is empty')
+
+        val = self.pop(key)
+
+        return (key, val)
+
+    def setdefault(self, key, failobj = None):
+        if key not in self._keys:
+            self._keys.append(key)
+        return ReallyIterableDict.setdefault(self, key, failobj)
+
+    def update(self, dict):
+        for (key,val) in dict.items():
+            self.__setitem__(key,val)
+
+    def values(self):
+        return map(self.get, self._keys)
+
+class OrderedDictWithLists(DictWithLists, OrderedDict):
+
+    def __init__(self, dict = None, parent = OrderedDict):
+        DictWithLists.__init__(self, dict, parent = parent)
+
+    def __iter__(self):
+        return iter(self._keys)
+
+
+if __name__=='__main__':
+    
+    d = DictWithLists()
+
+    for i in xrange(50):
+        for j in xrange(50):
+            d.push(i, j)
+
+    for i in xrange(50):
+        for j in xrange(50):
+            assert d.pop(i) == j
+
+    od = OrderedDict()
+
+    def make_str(i):
+        return str(i) + "extra"
+
+    for i in xrange(50):
+        od[make_str(i)] = 1
+
+    for i,j in zip(xrange(50), od.keys()):
+        assert make_str(i) == j
+
+    odl = OrderedDictWithLists()
+
+    for i in xrange(50):
+        for j in xrange(50):
+            odl.push(make_str(i), j)
+
+    for i in xrange(50):
+        for j in xrange(50):
+            assert odl.pop(make_str(i)) == j
+
+    od = OrderedDict()
+    od['2'] = [1,1,1,1,1]
+    od['1'] = [2,2,2,2,2]
+    od['3'] = [3,3,3,3,3]
+    k = od.keys()[0]
+    assert k == '2'
+
+    odl = OrderedDictWithLists()
+    odl.setrow('2', [1,1,1,1,1])
+    odl.setrow('1', [2,2,2,2,2])
+    odl.setrow('3', [3,3,3,3,3])
+    k = odl.keys()[0]
+    assert k == '2'
+
+    od = OrderedDict()
+    od['2'] = [1,1,1,1,1]
+    od['1'] = [2,2,2,2,2]
+    od['3'] = [3,3,3,3,3]
+    r = []
+    for k in od.iterkeys():
+        r.append(k)
+    assert r == ['2', '1', '3']
+
+    odl = OrderedDictWithLists()
+    odl.setrow('2', [1,1,1,1,1])
+    odl.setrow('1', [2,2,2,2,2])
+    odl.setrow('3', [3,3,3,3,3])
+    r = []
+    for k in odl.iterkeys():
+        r.append(k)
+    assert r == ['2', '1', '3']
+
+    d = DictWithLists()
+    d.push(4, 3)
+    d.push(4, 4)
+    d.push(4, 2)
+    d.push(4, 1)    
+    assert d.poprow(4) == QList([3,4,2,1])

+ 95 - 0
html/bin/clients/mainline/BTL/EventLoop.py

@@ -0,0 +1,95 @@
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# by Greg Hazel
+
+import sys
+import Queue
+import BTL.stackthreading as threading
+from BTL import defer
+from BTL.yielddefer import launch_coroutine, wrap_task
+
+
+class EventLoop(object):
+    
+    def __init__(self):
+        self.thread = threading.Thread(target=self.run)
+        self.queue = Queue.Queue()
+        self.killswitch = threading.Event()
+
+    def __getattr__(self, attr):
+        return getattr(self.thread, attr)
+    
+    def add_task(self, _f, *a, **kw):
+        self.queue.put((_f, a, kw))
+
+    def exit(self):
+        self.killswitch.set()
+        self.add_task(lambda : None)
+
+    def run(self):
+
+        while not self.killswitch.isSet():
+            func, args, kwargs = self.queue.get(True)
+
+            try:
+                v = func(*args, **kwargs)
+            except:
+                # interpreter shutdown
+                if not sys:
+                    return
+                exc_type, value, tb = sys.exc_info()
+                threading._print_traceback(sys.stderr, self.stack,
+                                           "thread %s" % self.thread.getName(),
+                                           1,
+                                           exc_type, value, tb)
+                del tb
+
+
+class RoutineLoop(object):
+
+    def __init__(self, queue_task):
+        self.killswitch = threading.Event()
+        self.queue = defer.DeferredQueue()
+        self.main_df = launch_coroutine(queue_task, self.run)
+
+    def add_task(self, _f, *a, **kw):
+        df = _f(*a, **kw)
+        self.queue.put((df,))
+
+    def add_deferred(self, df):
+        self.queue.put((df,))
+
+    def exit(self):
+        self.killswitch.set()
+        self.add_deferred(defer.succeed(True))
+
+    def run(self):
+
+        while not self.killswitch.isSet():
+            event_df = self.queue.get()
+            yield event_df
+            (df,) = event_df.getResult()
+            
+            yield df
+            try:
+                r = df.getResult()
+            except:
+                # interpreter shutdown
+                if not sys:
+                    return
+                exc_type, value, tb = sys.exc_info()
+                # no base_stack, unless we wan't to keep stack from the add_task
+                threading._print_traceback(sys.stderr, [],
+                                           "RoutineLoop", 1,
+                                           exc_type, value, tb)
+                del tb
+        
+    

+ 221 - 0
html/bin/clients/mainline/BTL/HostIP.py

@@ -0,0 +1,221 @@
+# a very simple (and silly) mechanism for getting the host_ip
+
+import socket
+from BTL.platform import bttime
+from BTL.obsoletepythonsupport import set
+from BTL.reactor_magic import reactor
+from BTL import defer
+import BTL.stackthreading as threading
+from twisted.internet.protocol import ClientFactory, Protocol
+from twisted.protocols.policies import TimeoutMixin
+try:
+    from BTL.iphelp import get_route_ip
+except:
+    get_route_ip = None
+    
+
+import thread
+
+_host_ip = 'unknown'
+_host_ip_callbacks = []
+_host_ip_cachetime = 0
+_host_ips = None
+_host_ips_cachetime = 0
+_thread_running = False
+CACHE_TIME = 3600 # hour
+
+wrap_task = reactor.callFromThread
+
+
+class RecorderProtocol(TimeoutMixin, Protocol):
+
+    def makeConnection(self, transport):
+        self.setTimeout(20)
+        Protocol.makeConnection(self, transport)
+
+    def connectionMade(self):
+        _got_result(self.transport.getHost().host)
+        self.transport.write("GET /myip HTTP/1.0\r\n\r\n")
+        self.transport.loseConnection()
+
+    def connectionLost(self, reason):
+        _got_result(None)
+
+
+class RecorderFactory(ClientFactory):
+    
+    def clientConnectionFailed(self, connector, reason):
+        _got_result(None)
+
+
+def _resolve():
+    try:
+        ip = socket.gethostbyname(socket.gethostname())
+    except socket.error, e:
+        ip = 'unknown'
+    reactor.callFromThread(_got_result, ip)
+
+
+def _finish(ip):
+    global _thread_running
+    _thread_running = False
+    _got_result(ip)
+
+
+def _got_result(ip):
+    global _host_ip
+    global _host_ip_callbacks
+    global _host_ip_cachetime
+    global _thread_running
+    if hasattr(reactor, 'ident'):
+        assert reactor.ident == thread.get_ident()
+
+    if _thread_running:
+        return
+
+    if ip is None:
+        t = threading.Thread(target=_resolve)
+        t.setDaemon(True)
+        _thread_running = True
+        t.start()
+        return
+
+    if ip is not 'unknown':
+        _host_ip = ip
+        _host_ip_cachetime = bttime()
+            
+    l = _host_ip_callbacks
+    _host_ip_callbacks = []
+    for df in l:
+        df.callback(_host_ip)
+
+
+def get_deferred_host_ip():
+    global _host_ip
+    global _host_ip_callbacks
+    global _host_ip_cachetime
+    if hasattr(reactor, 'ident'):
+        assert reactor.ident == thread.get_ident()
+
+    if _host_ip is not 'unknown' and _host_ip_cachetime + CACHE_TIME > bttime():
+        return defer.succeed(_host_ip)
+
+    if get_route_ip:
+        ip = get_route_ip()
+        if ip:
+            _host_ip = ip
+            _host_ip_cachetime = bttime()
+            return defer.succeed(_host_ip)            
+
+    df = defer.Deferred()
+    
+    if not _host_ip_callbacks:
+        def connect(ip):
+            factory = RecorderFactory()
+            factory.protocol = RecorderProtocol
+            if hasattr(reactor, 'limiter'):
+                reactor.connectTCP(ip, 80, factory, urgent=True)
+            else:
+                reactor.connectTCP(ip, 80, factory)            
+        rdf = reactor.resolve("ip.bittorrent.com")
+        rdf.addCallback(connect)
+        rdf.addErrback(lambda e : _got_result(None))
+    
+    _host_ip_callbacks.append(df)
+
+    return df
+
+
+def get_host_ip():
+    """ Blocking version, do not use from reactor thread! """
+    global _host_ip
+    global _host_ip_callbacks
+    global _host_ip_cachetime
+    if hasattr(reactor, 'ident'):
+        assert reactor.ident != thread.get_ident()
+
+    if _host_ip is not 'unknown' and _host_ip_cachetime + CACHE_TIME > bttime():
+        return _host_ip
+
+    if get_route_ip:
+        ip = get_route_ip()
+        if ip:
+            _host_ip = ip
+            _host_ip_cachetime = bttime()
+            return _host_ip
+
+    try:
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        s.settimeout(5)
+        
+        # what moron decided to make try/except/finally not work?
+        # Guido van Rossum.
+        try:
+            s.connect(("ip.bittorrent.com", 80))
+            endpoint = s.getsockname()
+            _host_ip = endpoint[0]
+            _host_ip_cachetime = bttime()
+            s.send("GET /myip HTTP/1.0\r\n\r\n")
+        except (socket.error, socket.timeout), e:
+            try:
+                _host_ip = socket.gethostbyname(socket.gethostname())
+            except socket.error, e:
+                pass
+        try:
+            s.close()
+        except:
+            pass
+    except:
+        pass        
+        
+    return _host_ip
+
+
+def get_deferred_host_ips():
+    global _host_ips
+    global _host_ips_cachetime
+    if hasattr(reactor, 'ident'):
+        assert reactor.ident == thread.get_ident()
+
+    if _host_ips is not None and _host_ips_cachetime + CACHE_TIME > bttime():
+        return defer.succeed(_host_ips)
+
+    df = get_deferred_host_ip()
+    finaldf = defer.Deferred()
+    df.addCallback(_get_deferred_host_ips2, finaldf)
+    return finaldf
+
+
+def _get_deferred_host_ips2(host_ip, finaldf):
+    if hasattr(reactor, 'ident'):
+        assert reactor.ident == thread.get_ident()
+    df = defer.ThreadedDeferred(wrap_task, _get_deferred_host_ips3,
+                                host_ip, daemon=True)
+    df.chainDeferred(finaldf)
+
+
+def _get_deferred_host_ips3(host_ip):
+    global _host_ips
+    global _host_ips_cachetime
+    if hasattr(reactor, 'ident'):
+        assert reactor.ident != thread.get_ident()
+    
+    l = set()
+
+    if host_ip is not 'unknown':
+        l.add(host_ip)
+
+    try:
+        hostname = socket.gethostname()
+        hostname, aliaslist, ipaddrlist = socket.gethostbyname_ex(hostname)
+        l.update(ipaddrlist)
+    except socket.error, e:
+        print "ARG", e
+
+    _host_ips = l
+    _host_ips_cachetime = bttime()
+
+    return _host_ips
+
+def get_host_ips():
+    return _get_deferred_host_ips3(get_host_ip())

+ 49 - 0
html/bin/clients/mainline/BTL/IPTools.py

@@ -0,0 +1,49 @@
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+from struct import pack, unpack
+from socket import inet_aton, inet_ntoa
+
+def compact(ip, port):
+    return pack("!4sH", inet_aton(ip), port) # ! == "network order"
+                                             # 4s == "4-byte string."
+                                             # H == "unsigned short"
+
+def uncompact(x):
+    ip, port = unpack("!4sH", x)
+    return inet_ntoa(ip), port
+
+def uncompact_sequence(b):
+    for x in xrange(0, len(b), 6):
+        ip, port = uncompact(b[x:x+6])
+        port = int(port)
+        yield (ip, port)
+
+def compact_sequence(s):
+    b = []
+    for addr in s:
+        c = compact(addr[0], addr[1])
+        b.append(c)
+    return ''.join(b)
+
+##import ctypes
+##class CompactAddr(ctypes.Structure):
+##    _fields_ = [('ip', ctypes.c_int32),
+##                ('port', ctypes.c_int16)]
+##
+##def compact_sequence_c(s):
+##    b = ctypes.create_string_buffer(6 * len(s))
+##    a = ctypes.addressof(b)
+##    for i, addr in enumerate(s):
+##        c = compact(addr[0], addr[1])
+##        ctypes.cast(
+##        offset = i*6
+##        b[offset:offset + 6] = c
+##    return b

+ 27 - 0
html/bin/clients/mainline/BTL/LIFOQueue.py

@@ -0,0 +1,27 @@
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# by Greg Hazel
+
+from Queue import Queue
+
+class LIFOQueue(Queue):
+    
+    # Get an item from the queue
+    def _get(self):
+        return self.queue.pop()
+
+if __name__ == '__main__':
+    l = LIFOQueue()
+    for i in xrange(10):
+        l.put(i)
+    j = 9
+    for i in xrange(10):
+        assert l.get() == j - i

+ 111 - 0
html/bin/clients/mainline/BTL/Lists.py

@@ -0,0 +1,111 @@
+# QList:
+# basically a python 2.3 compatible interface if you want deque
+#
+# SizedList:
+# handy class for keeping a fixed-length history
+# uses deque if available
+#
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# by Greg Hazel
+
+try:
+    from collections import deque
+    base_list_class = deque
+    popleft = deque.popleft
+    clear = deque.clear
+    appendleft = deque.appendleft
+    def insert(q, n, item):
+        if n == len(q):
+            deque.append(q, item)
+        else:
+            q.rotate(-(n + 1))
+            q.appendleft(item)
+            q.rotate(n + 1)
+    def pop(q, n):
+        q.rotate(-n)
+        q.popleft()
+        q.rotate(n) 
+    def remove(q, item):
+        for i, v in enumerate(q):
+            if v == item:
+                q.pop(i)
+                break
+        else:
+            raise ValueError(q.__class__.__name__ + ".remove(x): x not in list")
+except ImportError:
+    from UserList import UserList
+    base_list_class = UserList
+    def popleft(l):
+        return l.pop(0)
+    def clear(l):
+        l[:] = []
+    def appendleft(l, item):
+        l.insert(0, item)
+    insert = UserList.insert
+    pop = UserList.pop
+    remove = UserList.remove
+    
+
+class QList(base_list_class):
+
+    clear = clear
+    pop = pop
+    popleft = popleft
+    remove = remove
+    appendleft = appendleft
+    insert = insert
+
+    def __init__(self, *a, **kw):
+        base_list_class.__init__(self, *a, **kw)
+
+    # dequeu doesn't have __add__ ?
+    # overload anyway to get a base_list_class
+    def __add__(self, l):
+        n = base_list_class(self)
+        n.extend(l)
+        return n
+
+
+# I use QList becuase deque.popleft is faster than list.pop(0)
+class SizedList(QList):
+
+    def __init__(self, max_items):
+        self.max_items = max_items
+        QList.__init__(self)
+
+    def append(self, v):
+        QList.append(self, v)
+        if len(self) > self.max_items:
+            self.popleft()        
+
+
+def collapse(seq):
+    start = None
+    current = None
+    for i in seq:
+        if start is not None and i > (current + 1):
+            yield start, current + 1
+            start = i
+        elif start is None:
+            start = i
+        current = i
+    if start is not None:
+        yield start, current + 1
+        
+            
+if __name__ == '__main__':
+    l = SizedList(10)
+    for i in xrange(50):
+        l.append(i)
+    assert list(l) == range(40, 50)
+    l.appendleft(39)
+    assert list(l) == range(39, 50)

+ 59 - 0
html/bin/clients/mainline/BTL/Luciana.py

@@ -0,0 +1,59 @@
+# Two reactors spinning at once.
+# IOCP reactor for most things
+# selectreactor for SSL
+#
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# by Greg Hazel
+
+from twisted.internet import iocpreactor
+iocpreactor.proactor.install()
+
+from twisted.internet.selectreactor import SelectReactor
+selectreactor = SelectReactor()
+
+from twisted.internet import reactor
+
+selectreactor.spin_task = 0
+
+def selectrun():
+    selectreactor.iterate(0)
+    if selectreactor.spin_task > 0:
+        reactor.callLater(0.01, selectrun)
+    
+class HookedFactory(object):
+    def __init__(self, factory):
+        self.factory = factory
+
+    def startedConnecting(self, connector):
+        if selectreactor.spin_task == 0:
+            reactor.callLater(0.01, selectrun)
+        selectreactor.spin_task += 1
+        return self.factory.startedConnecting(connector)
+        
+    def clientConnectionFailed(self, connector, reason):
+        selectreactor.spin_task -= 1
+        return self.factory.clientConnectionFailed(connector, reason)
+
+    def clientConnectionLost(self, connector, reason):
+        selectreactor.spin_task -= 1
+        return self.factory.clientConnectionLost(connector, reason)
+
+    def __getattr__(self, attr):
+        return getattr(self.factory, attr)
+
+def spin_ssl(host, port, factory, contextFactory, timeout=30, bindAddress=None):
+    factory = HookedFactory(factory)
+    connector = selectreactor.connectSSL(host, port, factory, contextFactory,
+                                         timeout, bindAddress)
+    return connector
+        
+reactor.connectSSL = spin_ssl

+ 19 - 0
html/bin/clients/mainline/BTL/Map.py

@@ -0,0 +1,19 @@
+
+try:
+   from CMap import CMap, CIndexedMap
+   Map = CMap
+   IndexedMap = CIndexedMap
+   
+except:
+   from PMap import PMap, PIndexedMap
+   Map = PMap
+   IndexedMap = PIndexedMap
+   print "Using pure python version of Map.  Please compile CMap.\n"
+
+try:
+   from CMultiMap import CMultiMap, CIndexedMultiMap
+   MultiMap = CMultiMap
+   IndexedMultiMap = CIndexedMultiMap
+except:
+   print "Warning!! Please compile CMultiMap.  There is no pure "
+   print "python version of MultiMap.\n"

+ 1062 - 0
html/bin/clients/mainline/BTL/PMap.py

@@ -0,0 +1,1062 @@
+#!/usr/bin/env python
+
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# By David Harrison
+
+# I was playing with doctest when I wrote this.  I still haven't
+# decided how useful doctest is as opposed to implementing unit tests
+# directly.  --Dave
+
+# HEREDAVE: Create a bit in the map that is set whenever the
+# the map is changed in a way that would invalidate existing iterators?
+# Nope. That won't work. How do you know when to reset the bit.
+#
+# Another way is for the PMap to maintain a set of all valid iterators.
+# Whenever an action occurs that invalidates iterators, the set is cleared.
+# Before performing any operation on an iterator, the iterator checks
+# whether it is in the valid set.  For CMap, we could maintain a dead bit
+# for all values.  When a node is deleted, we set the dead bit.
+# Before performing any operation, the iterator checks the dead bit.
+
+from BTL.translation import _
+
+from bisect import bisect_left, bisect_right, insort_left
+from copy import copy
+
+# by David Harrison
+
+class PMap(object):
+    """This is an ordered mapping.  PMap --> Python Map, because it is
+       implemented using dicts and lists.
+
+       Unlike a dict, it can be iterated in order and it supports
+       lower and upper bound searches.  It also has an index implemented
+       with a dict that allows O(1) time lookups based on key.
+
+       The in-order mapping is implemented with a Python list.  The
+       index and cross-index are implemented with dicts.
+
+       Item insertion: O(n)
+       Item deletion:  O(n)
+       Key search:     O(1)
+       Value search:   n/a
+       Iteration step: O(1)
+
+       This is not semantically equivalent to CMap or CIndexedMap
+       in the following ways:
+         - iterators are invalidated by insertions and deletions.
+         - time complexity is different for many operations.
+       """
+           
+    class _AbstractIterator(object):
+        def __init__(self, map, i = -1 ):
+            """Creates an iterator pointing to item si in the map.
+            
+               Do not instantiate directly.  Use iterkeys, itervalues, or
+               iteritems.
+
+               Examples of typical behavior:
+
+               >>> from PMap import *
+               >>> m = PMap()
+               >>> m[12] = 6
+               >>> m[9] = 4
+               >>> for k in m:
+               ...     print int(k)
+               ...
+               9
+               12
+               >>>
+
+               Example edge cases (empty map):
+
+               >>> from PMap import *
+               >>> m = PMap()
+               >>> try:
+               ...     i = m.__iter__()
+               ...     i.value()
+               ... except IndexError:
+               ...     print 'IndexError.'
+               ...
+               IndexError.
+               >>> try:
+               ...     i.next()
+               ... except StopIteration:
+               ...     print 'stopped'
+               ...
+               stopped
+
+               @param map: PMap.
+               @param node: Node that this iterator will point at.  If None
+                 then the iterator points to end().  If -1
+                 then the iterator points to one before the beginning.
+             """
+            if i == None: self._i = len(map)
+            else: self._i = i
+            self._map = map
+
+        def __cmp__(self, other):
+            return self._i - other._i
+
+        def key(self):
+            """@return: the key of the key-value pair referenced by this
+                   iterator.
+            """
+            if self._i == -1:
+                raise IndexError(_("Cannot dereference iterator until after "
+                                 "first call to .next."))
+                        
+            return self._map._olist[self._i].k
+
+        def value(self):
+            """@return: the value of the key-value pair currently referenced
+                   by this iterator.
+            """
+            if self._i == -1:
+                raise IndexError(_("Cannot dereference iterator until after "
+                                 "first call to next."))
+            
+            return self._map._olist[self._i].v
+        
+        def item(self):
+            """@return the key-value pair referenced by this iterator.
+            """
+            return self.key(), self.value()
+
+        def _next(self):
+            self._i += 1
+            if self._i >= len(self._map):
+                self._i = len(self._map)
+                raise StopIteration()
+            
+        def _prev(self):
+            self._i -= 1
+            if self._i <= -1:
+                self._i = -1
+                raise StopIteration()
+
+            
+    class KeyIterator(_AbstractIterator):
+        """Returns the next key in the map.
+        
+           Unlike with CMap, insertion and deletion INVALIDATES iterators.
+           
+           This is implemented by moving the iterator and then
+           dereferencing it.  If we dereferenced and then moved
+           then we would get the odd behavior:
+           
+             Ex:  I have keys [1,2,3].  The iterator i points at 1.
+               print i.next()   # prints 1
+               print i.next()   # prints 2
+               print i.prev()   # prints 3
+               print i.prev()   # prints 2
+           
+           However, because we move and then dereference, when an
+           iterator is first created it points to nowhere
+           so that the first next moves to the first element.
+           
+           Ex:
+               >>> from PMap import PMap
+               >>> m = PMap()
+               >>> m[5] = 1
+               >>> m[8] = 4
+               >>> i = m.__iter__()
+               >>> print int(i.next())
+               5
+               >>> print int(i.next())
+               8
+               >>> print int(i.prev())
+               5
+           
+           We are still left with the odd behavior that an
+           iterator cannot be dereferenced until after the first next().
+           
+           Ex edge cases:
+               >>> from PMap import PMap
+               >>> m = PMap()
+               >>> i = m.__iter__()
+               >>> try:
+               ...     i.prev()
+               ... except StopIteration:
+               ...     print 'StopIteration'
+               ...
+               StopIteration
+               >>> m[5]='a'
+               >>> i = m.iterkeys()
+               >>> int(i.next())
+               5
+               >>> try: i.next()
+               ... except StopIteration:  print 'StopIteration'
+               ...
+               StopIteration
+               >>> int(i.prev())
+               5
+               >>> try: int(i.prev())
+               ... except StopIteration: print 'StopIteration'
+               ...
+               StopIteration
+               >>> int(i.next())
+               5
+               
+        """
+        def next(self):
+            self._next()
+            return self.key()
+
+        def prev(self):
+            self._prev()
+            return self.key()
+    
+    class ValueIterator(_AbstractIterator):
+        def next(self):
+            """@return: next value in the map.
+
+                >>> from PMap import *
+                >>> m = PMap()
+                >>> m[5] = 10
+                >>> m[6] = 3
+                >>> i = m.itervalues()
+                >>> int(i.next())
+                10
+                >>> int(i.next())
+                3
+            """
+            self._next()
+            return self.value()
+
+        def prev(self):
+            self._prev()
+            return self.value()
+       
+    class ItemIterator(_AbstractIterator):
+        def next(self):
+            """@return: next item in the map's key ordering.
+
+                >>> from PMap import *
+                >>> m = PMap()
+                >>> m[5] = 10
+                >>> m[6] = 3
+                >>> i = m.iteritems()
+                >>> k,v = i.next()
+                >>> int(k)
+                5
+                >>> int(v)
+                10
+                >>> k,v = i.next()
+                >>> int(k)
+                6
+                >>> int(v)
+                3
+            """
+            self._next()
+            return self.key(), self.value()
+
+        def prev(self):
+            self._prev()
+            return self.key(), self.value()
+    
+    class Item(object):
+        def __init__(self, k, v):
+            self.k = k
+            self.v = v
+
+        def __cmp__(self, other):
+            return self.k.__cmp__(other.k)
+ 
+        def __str__(self):
+            return "Item(%s,%s)" % ( str(self.k), str(self.v) )
+
+        def __repr__(self):
+            return "Item(%s,%s)" % ( str(self.k), str(self.v) )
+
+    def __init__(self, d = {}):
+        """
+            >>> m = PMap()
+            >>> len(m)
+            0
+            >>> m[5]=2
+            >>> len(m)
+            1
+            >>> print m[5]
+            2
+
+        """
+        self._olist = []         # list ordered based on key.
+        self._index = {}         # keyed based on key.
+        for key, value in d.items():
+            self[key] = value
+
+    def __contains__(self,x):
+        return self.get(x) != None
+    
+    def __iter__(self):
+        """@return: KeyIterator positioned one before the beginning of the
+            key ordering so that the first next() returns the first key."""
+        return PMap.KeyIterator(self)
+    
+    def begin(self):
+        """Returns an iterator pointing at first key-value pair.  This
+           differs from iterkeys, itervalues, and iteritems which return an
+           iterator pointing one before the first key-value pair.
+
+           @return: key iterator to first key-value.
+
+              >>> from PMap import *
+              >>> m = PMap()
+              >>> m[5.0] = 'a'
+              >>> i = m.begin()
+              >>> int(i.key())    # raises no IndexError.
+              5
+              >>> i = m.iterkeys()
+              >>> try:
+              ...     i.key()
+              ... except IndexError:
+              ...     print 'IndexError raised'
+              ...
+              IndexError raised
+           """
+        i = PMap.KeyIterator(self,i=0)
+        return i
+
+        
+    def end(self):
+        """Returns an iterator pointing after end of key ordering.
+           The iterator's prev method will move to the last
+           key-value pair in the ordering.  This in keeping with
+           the notion that a range is specified as [i,j) where
+           j is not in the range, and the range [i,j) where i==j
+           is an empty range.
+
+           This operation takes O(1) time.
+
+           @return: key iterator one after end.
+           """
+        i = PMap.KeyIterator(self,None)  # None goes to end of map.
+        return i
+
+    def iterkeys(self):
+        return PMap.KeyIterator(self)
+
+    def itervalues(self):
+        return PMap.ValueIterator(self)
+
+    def iteritems(self):
+        return PMap.ItemIterator(self)
+
+    def __len__(self):
+        return len(self._olist)
+
+    def __str__(self):
+        # dict is not necessarily in order.
+        #return str(dict(zip(self.keys(),self.values())))
+        s = "{"
+        first = True
+        for k,v in self.items():
+            if first:
+                first = False
+            else:
+                s += ", "
+            s += "%d: '%s'" % (k,v)
+        s += "}"
+        return s
+    
+    def __getitem__( self, k ):
+        return self._index[k]
+
+    def __setitem__(self, k, v ):
+        """O(n) insertion worst case. 
+
+            >>> from PMap import PMap
+            >>> m = PMap()
+            >>> m[6] = 'bar'
+            >>> m[6]
+            'bar'
+            >>>            
+            """
+        insort_left(self._olist, PMap.Item(k,v))
+        self._index[k] = v
+
+    def __delitem__(self, k):
+        """
+           >>> from CMap import CMap
+           >>> m = CMap()
+           >>> m[12] = 'foo'
+           >>> m[13] = 'bar'
+           >>> m[14] = 'boo'
+           >>> del m[12]
+           >>> try:
+           ...   m[12]
+           ... except KeyError:
+           ...   print 'ok'
+           ...
+           ok
+           >>> j = m.begin()
+           >>> int(j.next())
+           14
+           >>> i = m.begin()
+           >>> i.value()
+           'bar'
+           >>> del m[13]  # delete object referenced by an iterator
+        
+        """
+        del self._index[k]       # raises KeyError if key not in index.
+        i=bisect_left(self._olist,PMap.Item(k,None))
+        if self._olist[i].k != k: raise KeyError(k)
+        del self._olist[i]            
+                
+    def __del__(self):
+        del self._olist
+        del self._index
+        
+    
+    def __repr__(self):
+        return self.__str__()
+
+    def get(self, key, default=None):
+        """@return value corresponding to specified key or return 'default'
+               if the key is not found.
+           """
+        return self._index.get(key,default)
+
+
+    def keys(self):
+        """
+           >>> from PMap import *
+           >>> m = PMap()
+           >>> m[4] = 7
+           >>> m[6] = 3
+           >>> [int(x) for x in m.keys()]  # m.keys() but guaranteed integers.
+           [4, 6]
+           
+        """
+        k = []
+        for item in self._olist:
+            k.append(item.k)
+        return k
+    
+    def values(self):
+        """
+           >>> from PMap import PMap
+           >>> m = PMap()
+           >>> m[4] = 7
+           >>> m[6] = 3
+           >>> m.values()
+           [7, 3]
+           
+        """
+        v = []
+        for item in self._olist:
+            v.append(item.v)
+        return v
+        
+
+    def items(self):
+        """
+           >>> from PMap import PMap
+           >>> m = PMap()
+           >>> m[4] = 7
+           >>> m[6] = 3
+           >>> [(int(x[0]),int(x[1])) for x in m.items()]
+           [(4, 7), (6, 3)]
+           
+        """
+        itms = []
+        for item in self._olist:
+            itms.append((item.k,item.v))
+        return itms
+        
+    def has_key(self, key):
+        """
+           >>> from PMap import PMap
+           >>> m = PMap()
+           >>> m[4] = 7
+           >>> if m.has_key(4): print 'ok'
+           ...
+           ok
+           >>> if not m.has_key(7): print 'ok'
+           ...
+           ok
+           
+        """
+        return self._index.has_key(key)
+
+    def __del__(self):
+        del self._index
+        del self._olist
+
+    def clear(self):
+        """delete all entries
+
+           >>> from PMap import *
+           >>> m = PMap()
+           >>> m[4] = 7
+           >>> m.clear()
+           >>> print len(m)
+           0
+           
+        """
+
+        self.__del__()
+
+        self._olist = []
+        self._index = {}
+
+    def copy(self):
+        """return shallow copy"""
+        return PMap(self)
+
+    def lower_bound(self,key):
+        """
+         Finds smallest key equal to or above the lower bound.
+
+         Takes O(log n) time.
+
+         @param x: Key of (key, value) pair to be located.
+         @return: Key Iterator pointing to first item equal to or greater
+                  than key, or end() if no such item exists.
+
+           >>> from PMap import PMap
+           >>> m = PMap()
+           >>> m[10] = 'foo'
+           >>> m[15] = 'bar'
+           >>> i = m.lower_bound(11)   # iterator.
+           >>> int(i.key())
+           15
+           >>> i.value()
+           'bar'
+           
+         Edge cases:
+           >>> from PMap import PMap
+           >>> m = PMap()
+           >>> i = m.lower_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+
+           >>> m[10] = 'foo'
+           >>> i = m.lower_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+           >>> i = m.lower_bound(9)
+           >>> if i == m.begin(): print 'ok'
+           ...
+           ok
+
+        """
+        return PMap.KeyIterator(self,bisect_right(self._olist,
+                                         PMap.Item(key,None)))
+
+
+    def upper_bound(self, key):
+        """
+         Finds largest key equal to or below the upper bound.  In keeping
+         with the [begin,end) convention, the returned iterator
+         actually points to the key one above the upper bound. 
+
+         Takes O(log n) time.
+
+         @param  x:  Key of (key, value) pair to be located.
+         @return:  Iterator pointing to first element equal to or greater than
+                  key, or end() if no such item exists.
+
+           >>> from PMap import PMap
+           >>> m = PMap()
+           >>> m[10] = 'foo'
+           >>> m[15] = 'bar'
+           >>> m[17] = 'choo'
+           >>> i = m.upper_bound(11)   # iterator.
+           >>> i.value()
+           'bar'
+
+         Edge cases:
+           >>> from PMap import PMap
+           >>> m = PMap()
+           >>> i = m.upper_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+           >>> m[10] = 'foo'
+           >>> i = m.upper_bound(9)
+           >>> i.value()
+           'foo'
+           >>> i = m.upper_bound(11)
+           >>> if i == m.end(): print 'ok'
+           ...
+           ok
+
+        """
+        return PMap.KeyIterator(self, bisect_left(self._olist,
+                                                 PMap.Item(key,None)))
+
+    def find(self,key):
+        """
+          Finds the item with matching key and returns a KeyIterator
+          pointing at the item.  If no match is found then returns end().
+     
+          Takes O(log n) time.
+     
+            >>> from PMap import PMap
+            >>> m = PMap()
+            >>> i = m.find(10)
+            >>> if i == m.end(): print 'ok'
+            ...
+            ok
+            >>> m[10] = 'foo'
+            >>> i = m.find(10)
+            >>> int(i.key())
+            10
+            >>> i.value()
+            'foo'
+     
+        """
+        i = bisect_left(self._olist,PMap.Item(key,None))
+        if i >= len(self._olist): return self.end()
+        if self._olist[i].k != key: return self.end()  
+        return PMap.KeyIterator(self,i )
+
+    def update_key(self, iter, key):
+        """
+          Modifies the key of the item referenced by iter.  If the
+          key change is small enough that no reordering occurs then
+          this takes amortized O(1) time.  If a reordering occurs then
+          this takes O(log n).
+
+          WARNING!!! All iterators including the passed iterator must
+          be assumed to be invalid upon return.  (Note that this would
+          not be the case with CMap, where update_key would at most
+          invalidate the passed iterator).
+
+          If the passed key is already in the map then this raises
+          a KeyError exception and the map is left unchanged. If the
+          iterator is point
+
+          Typical use:
+            >>> from CMap import CMap
+            >>> m = CMap()
+            >>> m[10] = 'foo'
+            >>> m[8] = 'bar'
+            >>> i = m.find(10)
+            >>> m.update_key(i,7)   # i is assumed to be invalid upon return.
+            >>> del i
+            >>> [(int(x[0]),x[1]) for x in m.items()]  # reordering occurred.
+            [(7, 'foo'), (8, 'bar')]
+            >>> i = m.find(8)
+            >>> m.update_key(i,9)   # no reordering.
+            >>> del i
+            >>> [(int(x[0]),x[1]) for x in m.items()]
+            [(7, 'foo'), (9, 'bar')]
+
+          Edge cases:          
+            >>> i = m.find(7)
+            >>> i.value()
+            'foo'
+            >>> try:                # update to key already in the map.
+            ...     m.update_key(i,9)
+            ... except KeyError:
+            ...     print 'ok'
+            ...
+            ok
+            >>> m[7]
+            'foo'
+            >>> i = m.iterkeys()
+            >>> try:                # updating an iter pointing at BEGIN.
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+            >>> i = m.end()
+            >>> try:                # updating an iter pointing at end().
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+                        
+        """
+        old_key = iter.key()
+        if key == old_key: return
+        try:
+            before = copy(iter)
+            before.prev()
+            lower = before.key()
+        except StopIteration:
+            lower = old_key - 1  # arbitrarily lower.
+
+        if lower < key:
+            try:
+                iter.next()
+                higher = i.key()
+            except StopIteration:
+                higher = old_key + 1 # arbitrarily higher.            
+
+            if key < higher:     # if no reordering is necessary...
+                self._olist[iter._i].key = key  
+                del self._index[old_key]
+                self._index[key] = old_val
+                return
+
+        # else reordering is necessary so delete and reinsert.
+        del self[old_key]
+        self[key] = old_val
+
+    def append(self, k, v):
+        """Performs an insertion with the hint that it probably should
+           go at the end.
+
+           Raises KeyError if the key is already in the map.
+
+             >>> from PMap import *
+             >>> m = PMap()
+             >>> m.append(5,'foo')
+             >>> m
+             {5: 'foo'}
+             >>> m.append(10, 'bar')
+             >>> m
+             {5: 'foo', 10: 'bar'}
+             >>> m.append(3, 'coo')   # out-of-order.
+             >>> m
+             {3: 'coo', 5: 'foo', 10: 'bar'}
+             >>> try:
+             ...     m.append(10, 'blah') # append key already in map.
+             ... except KeyError:
+             ...     print 'ok'
+             ...
+             ok
+             >>> m
+             {3: 'coo', 5: 'foo', 10: 'bar'}
+
+        """
+        if self._index.has_key(k):
+            raise KeyError(_("Key is already in the map.  "
+                             "Keys must be unique."))
+                        
+        if len(self._olist) == 0 or k > self._olist[len(self._olist)-1].k:
+            self._olist.append(PMap.Item(k,v))
+        else:
+            insort_left(self._olist, PMap.Item(k,v))
+        self._index[k] = v
+
+class PIndexedMap(PMap):
+    """This is an ordered mapping, exactly like PMap except that it
+       provides a cross-index allowing O(1) searches based on value.
+       This adds  the constraint that values must be unique.
+
+       The cross-index is implemented with a dict.
+
+       Item insertion: O(n)
+       Item deletion:  O(n)
+       Key search:     average O(1)
+       Value search:   average O(1)
+       Iteration step: O(1)
+       Memory:         O(n)
+
+       This is not semantically equivalent to CIndexedMap
+       in the following ways:
+         - iterators are invalidated by insertions and deletions.
+       """
+           
+    def __init__(self, dict={} ):
+        """
+            >>> m = PIndexedMap()
+            >>> len(m)
+            0
+            >>> m[5]=2
+            >>> len(m)
+            1
+            >>> print m[5]
+            2
+
+        """
+        self._value_index = {}    # keyed on value.
+        PMap.__init__(self, dict)
+
+    def __setitem__(self, k, v ):
+        """O(n) insertion worst case. 
+
+            >>> from PMap import *
+            >>> m = PIndexedMap()
+            >>> m[6] = 'bar'
+            >>> m[6]
+            'bar'
+            >>> m.get_key_by_value('bar')
+            6
+            >>> try:
+            ...    m[7] = 'bar'
+            ... except ValueError:
+            ...    print 'value error'
+            value error
+            >>> m[6] = 'foo'  # change 6 so 7 can be mapped to 'bar'
+            >>> m[6]
+            'foo'
+            >>> m[7] = 'bar'
+            >>> m[7]
+            'bar'
+            >>> m[7] = 'bar'  # should not raise exception
+            >>> m[7] = 'goo'
+            >>> m.get_key_by_value('bar')  # should return None.
+            >>>
+            
+            """
+        # if value is already in the map then throw an error.
+        try:
+            if self._value_index[v] != k:
+                raise ValueError( _("Value is already in the map. "
+                                  "Both value and key must be unique." ))
+        except KeyError:
+            # value was not in the cross index.
+            pass
+
+        try:
+            old_val = self._index[k]
+            self._index[k] = v
+            del self._value_index[old_val]
+            self._value_index[v] = k
+            
+        except KeyError:
+            # key is not already in the map.
+            pass
+            
+        insort_left(self._olist, PIndexedMap.Item(k,v))
+        self._value_index[v] = k
+        self._index[k] = v
+
+    def __delitem__(self, k):
+        """
+            >>> from PMap import PIndexedMap
+            >>> m = PIndexedMap()
+            >>> m[6] = 'bar'
+            >>> m[6]
+            'bar'
+            >>> int(m.get_key_by_value('bar'))
+            6
+            >>> del m[6]
+            >>> if m.get_key_by_value('bar'):
+            ...     print 'found'
+            ... else:
+            ...     print 'not found.'
+            not found.
+
+        """
+        del self._index[k]       # raises KeyError if key not in index.
+        i=bisect_left(self._olist,PIndexedMap.Item(k,None))
+        if self._olist[i].k != k: raise KeyError(k)
+        v=self._olist[i].v
+        del self._value_index[v]
+        del self._olist[i]
+            
+    def get_key_by_value( self, v ):
+        """Returns the key cross-indexed from the passed unique value, or
+           returns None if the value is not in the map."""
+        k = self._value_index.get(v)
+        if k == None: return None
+        return k
+
+    def find_key_by_value( self, v ):
+        """Returns a key iterator cross-indexed from the passed unique value
+           or end() if no value found.
+
+           >>> from PMap import *
+           >>> m = PIndexedMap()
+           >>> m[6] = 'abc'
+           >>> i = m.find_key_by_value('abc')
+           >>> i.key()
+           6
+           >>> i = m.find_key_by_value('xyz')
+           >>> if i == m.end(): print 'i points at end()'
+           i points at end()
+
+        """
+        try:
+            k = self._value_index[v]  # raises KeyError if no value found.
+            i = bisect_left(self._olist,PIndexedMap.Item(k,None))
+            return PIndexedMap.KeyIterator(self,i)
+        except KeyError, e:
+            return self.end()
+                
+    def __del__(self):
+        del self._value_index
+        PMap.__del__(self)
+    
+    def clear(self):
+        """delete all entries
+
+           >>> from PMap import PIndexedMap
+           >>> m = PIndexedMap()
+           >>> m[4] = 7
+           >>> m.clear()
+           >>> print len(m)
+           0
+           
+        """
+        PMap.clear(self)
+        self._value_index = {}
+
+    def copy(self):
+        """return shallow copy"""
+        return PIndexedMap(self)
+
+    def update_key(self, iter, key):
+        """
+          Modifies the key of the item referenced by iter.  If the
+          key change is small enough that no reordering occurs then
+          this takes amortized O(1) time.  If a reordering occurs then
+          this takes O(log n).
+
+          WARNING!!! The passed iterator MUST be assumed to be invalid
+          upon return and should be deallocated.
+
+          If the passed key is already in the map then this raises
+          a KeyError exception and the map is left unchanged. If the
+          iterator is point
+
+          Typical use:
+            >>> from PMap import PIndexedMap
+            >>> m = PIndexedMap()
+            >>> m[10] = 'foo'
+            >>> m[8] = 'bar'
+            >>> i = m.find(10)
+            >>> m.update_key(i,7)   # i is assumed to be invalid upon return.
+            >>> del i
+            >>> m                   # reordering occurred.
+            {7: 'foo', 8: 'bar'}
+            >>> i = m.find(8)
+            >>> m.update_key(i,9)   # no reordering.
+            >>> del i
+            >>> m
+            {7: 'foo', 9: 'bar'}
+
+          Edge cases:          
+            >>> i = m.find(7)
+            >>> i.value()
+            'foo'
+            >>> try:                # update to key already in the map.
+            ...     m.update_key(i,9)
+            ... except KeyError:
+            ...     print 'ok'
+            ...
+            ok
+            >>> m[7]
+            'foo'
+            >>> i = m.iterkeys()
+            >>> try:                # updating an iter pointing at BEGIN.
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+            >>> i = m.end()
+            >>> try:                # updating an iter pointing at end().
+            ...    m.update_key(i,10)
+            ... except IndexError:
+            ...    print 'ok'
+            ...
+            ok
+                        
+        """
+        old_key = iter.key()
+        if key == old_key: return
+        old_val = iter.value()
+        if self._index.has_key(key): raise KeyError(key)
+        try:
+            before = copy(iter)
+            before.prev()
+            lower = before.key()
+        except StopIteration:
+            lower = old_key - 1  # arbitrarily lower.
+
+        if lower < key:
+            try:
+                iter.next()
+                higher = i.key()
+            except StopIteration:
+                higher = old_key + 1 # arbitrarily higher.            
+
+            if key < higher:     # if no reordering is necessary...
+                self._olist[iter._i].key = key  
+                del self._index[old_key]
+                self._index[key] = old_val
+                self._value_index[old_val] = key
+                return
+
+        # else reordering is necessary so delete and reinsert.
+        del self[old_key]
+        self[key] = old_val
+
+    def append(self, k, v):
+        """Performs an insertion with the hint that it probably should
+           go at the end.
+
+           Raises KeyError if the key is already in the map.
+
+             >>> from PMap import PIndexedMap
+             >>> m = PIndexedMap()
+             >>> m.append(5,'foo')
+             >>> m
+             {5: 'foo'}
+             >>> m.append(10, 'bar')
+             >>> m
+             {5: 'foo', 10: 'bar'}
+             >>> m.append(3, 'coo')   # out-of-order.
+             >>> m
+             {3: 'coo', 5: 'foo', 10: 'bar'}
+             >>> m.get_key_by_value( 'bar' )
+             10
+             >>> try:
+             ...     m.append(10, 'blah') # append key already in map.
+             ... except KeyError:
+             ...     print 'ok'
+             ...
+             ok
+             >>> m
+             {3: 'coo', 5: 'foo', 10: 'bar'}
+             >>> try:
+             ...     m.append(10, 'coo') # append value already in map.
+             ... except ValueError:
+             ...     print 'ok'
+             ...
+             ok
+
+        """
+        # if value is already in the map then throw an error.
+        try:
+            if self._value_index[v] != k:
+                raise ValueError( _("Value is already in the map. "
+                                  "Both values and keys must be unique.") )
+        except KeyError:
+            # values was not in the cross index.
+            pass
+
+        if self._index.has_key(k):
+            raise KeyError( _("Key is already in the map.  Both values and "
+                            "keys must be unique.") )
+                        
+        if len(self._olist) == 0 or k > self._olist[len(self._olist)-1].k:
+            self._olist.append(PIndexedMap.Item(k,v))
+        else:
+            insort_left(self._olist, PIndexedMap.Item(k,v))
+
+        self._value_index[v] = k
+        self._index[k] = v
+        
+if __name__ == "__main__":
+
+    import sys, doctest
+
+    ############
+    # UNIT TESTS
+    if len(sys.argv) == 1:
+        import doctest,sys
+        print "Testing module"
+        doctest.testmod(sys.modules[__name__])
+

+ 62 - 0
html/bin/clients/mainline/BTL/SaneThreadedResolver.py

@@ -0,0 +1,62 @@
+# Just like ThreadedResolver, but doesn't suck
+#
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# by Greg Hazel
+
+import socket
+import operator
+from twisted.internet import error, defer, threads
+from twisted.python import failure
+
+class SaneThreadedResolver:
+    # I won't do this. Zope.interface sucks.
+    #implements(IResolverSimple)
+
+    def __init__(self, reactor):
+        self.reactor = reactor
+        self._runningQueries = {}
+
+    def _fail(self, name, err):
+        err = error.DNSLookupError("address %r not found: %s" % (name, err))
+        return failure.Failure(err)
+
+    def _checkTimeout(self, result, name, userDeferred):
+        if userDeferred in self._runningQueries:
+            cancelCall = self._runningQueries.pop(userDeferred)
+            cancelCall.cancel()
+
+        if userDeferred.called:
+            return
+
+        if isinstance(result, failure.Failure):
+            userDeferred.errback(self._fail(name, result.getErrorMessage()))
+        else:
+            userDeferred.callback(result)
+
+    def _doGetHostByName(self, name, onStart):
+        self.reactor.callFromThread(onStart)
+        return socket.gethostbyname(name)
+
+    def getHostByName(self, name, timeout = (1, 3, 11, 45)):
+        if timeout:
+            timeoutDelay = reduce(operator.add, timeout)
+        else:
+            timeoutDelay = 60
+        userDeferred = defer.Deferred()
+        def _onStart():
+            cancelCall = self.reactor.callLater(
+                timeoutDelay, self._checkTimeout,
+                self._fail(name, "timeout error"), name, userDeferred)
+            self._runningQueries[userDeferred] = cancelCall
+        lookupDeferred = threads.deferToThread(self._doGetHostByName, name, _onStart)
+        lookupDeferred.addBoth(self._checkTimeout, name, userDeferred)
+        return userDeferred

+ 26 - 0
html/bin/clients/mainline/BTL/ThreadProxy.py

@@ -0,0 +1,26 @@
+from BTL.defer import Deferred, defer_to_thread
+
+class ThreadProxy(object):
+    __slots__ = ('obj', 'local_queue_task', 'thread_queue_task')
+    def __init__(self, obj, local_queue_task, thread_queue_task):
+        self.obj = obj
+        self.local_queue_task = local_queue_task
+        self.thread_queue_task = thread_queue_task
+
+    def __gen_call_wrapper__(self, f):
+        def call_wrapper(*a, **kw):
+            return defer_to_thread(self.local_queue_task, self.thread_queue_task,
+                                   f, *a, **kw)
+        return call_wrapper
+
+    def __getattr__(self, attr):
+        a = getattr(self.obj, attr)
+        if callable(a):
+            return self.__gen_call_wrapper__(a)
+        return a
+
+    def call_with_obj(self, _f, *a, **k):
+        w = self.__gen_call_wrapper__(_f)
+        return w(self.obj, *a, **k)
+
+

+ 69 - 0
html/bin/clients/mainline/BTL/TimeLeftEstimator.py

@@ -0,0 +1,69 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen
+
+from BTL.platform import bttime
+
+
+class TimeLeftEstimator(object):
+
+    def __init__(self, left):
+        self.start = None
+        self.last = None
+        self.rate = 0
+        self.remaining = None
+        self.left = left
+        self.broke = False
+        self.got_anything = False
+        self.when_next_expected = bttime() + 5
+
+    def add_amount(self, amount):
+        """ add number of bytes received """
+        if not self.got_anything:
+            self.got_anything = True
+            self.start = bttime() - 2
+            self.last = self.start
+            self.left -= amount
+            return
+        self.update(bttime(), amount)
+
+    def remove_amount(self, amount):
+        self.left += amount
+
+    def get_time_left(self):
+        """ returns seconds """
+        if not self.got_anything:
+            return None
+        t = bttime()
+        if t - self.last > 15:
+            self.update(t, 0)
+        return self.remaining
+
+    def get_size_left(self):
+        return self.left
+
+    def update(self, t, amount):
+        self.left -= amount
+        if t < self.when_next_expected and amount == 0:
+            return
+        try:
+            self.rate = ((self.rate * (self.last - self.start)) + amount) / (t - self.start)
+            self.last = t
+            self.remaining = self.left / self.rate
+            if self.start < self.last - self.remaining:
+                self.start = self.last - self.remaining
+        except ZeroDivisionError:
+            self.remaining = None
+        if self.broke and self.last - self.start < 20:
+            self.start = self.last - 20
+        if self.last - self.start > 20:
+            self.broke = True
+        self.when_next_expected = t + min((amount / max(self.rate, 0.0001)), 5)

+ 21 - 0
html/bin/clients/mainline/BTL/__init__.py

@@ -0,0 +1,21 @@
+
+
+LOCALE_URL = "http://translations.bittorrent.com/"
+
+class BTFailure(Exception):
+    pass
+
+# this class is weak sauce
+class InfoHashType(str):
+    def __repr__(self):
+        return self.encode('hex')
+    def short(self):
+        return repr(self)[:8]
+
+# soon.
+## InfoHashType breaks bencode, so just don't do it.
+#def InfoHashType(s):
+#    return s
+
+def infohash_short(s):
+    return s.encode('hex')[:8]

BIN
html/bin/clients/mainline/BTL/addrmap.dat


+ 36 - 0
html/bin/clients/mainline/BTL/asyncexecutor.py

@@ -0,0 +1,36 @@
+""" An simple lightweight asynchronous executor class with nice
+    java type static methods """
+from twisted.python.threadpool import ThreadPool
+
+
+
+class AsyncExecutor(object):
+    """ defaults to minthreads=5, maxthreads=20 """
+    pool = ThreadPool( name = 'AsyncExecutorPool')
+
+    def _execute(self,  func, *args, **kwargs):
+        if not self.pool.started:
+            self.pool.start()
+        self.pool.dispatch(None, func, *args, **kwargs)
+    
+    execute = classmethod(_execute)
+    stop = pool.stop
+    
+def test():
+    import random
+    import time
+
+    def test(digit):
+        print 'Testing %d' % digit
+        time.sleep(random.randint(1, 5000)/1000)
+        print '     finished with test %d' % digit
+    for i in xrange(10):
+        AsyncExecutor.execute(test, )
+    AsyncExecutor.stop() 
+
+if __name__ == '__main__':
+    test()
+    
+    
+    
+    

+ 70 - 0
html/bin/clients/mainline/BTL/atexit_threads.py

@@ -0,0 +1,70 @@
+# threads are dumb, this module is smart.
+#
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# by Greg Hazel
+
+import sys
+import time
+import atexit
+import threading
+
+def _get_non_daemons():
+    return [d for d in threading.enumerate() if not d.isDaemon() and d != threading.currentThread()]
+
+def register(func, *targs, **kargs):
+    def duh():
+        nondaemons = _get_non_daemons()
+        for th in nondaemons:
+            th.join()
+        func(*targs, **kargs)
+    atexit.register(duh)
+
+
+def megadeth():
+    time.sleep(10)
+    try:
+        import wx
+        wx.Kill(wx.GetProcessId(), wx.SIGKILL)
+    except:
+        pass
+
+def register_verbose(func, *targs, **kargs):
+    def duh():
+        nondaemons = _get_non_daemons()
+        timeout = 4
+        for th in nondaemons:
+            start = time.time()
+            th.join(timeout)
+            timeout = max(0, timeout - (time.time() - start))
+            if timeout == 0:
+                break
+
+        # kill all the losers
+        # remove this when there are no more losers
+        t = threading.Thread(target=megadeth)
+        t.setDaemon(True)
+        t.start()
+
+        if timeout == 0:
+            sys.stderr.write("non-daemon threads not shutting down "
+                             "in a timely fashion:\n")
+            nondaemons = _get_non_daemons()
+            for th in nondaemons:
+                sys.stderr.write("  %s\n" % th)
+            sys.stderr.write("You have no chance to survive make your time.\n")
+            for th in nondaemons:
+                th.join()
+
+        func(*targs, **kargs)
+
+    atexit.register(duh)
+

+ 77 - 0
html/bin/clients/mainline/BTL/auth_xmlrpc.py

@@ -0,0 +1,77 @@
+
+# Copyright 2006 BitTorrent, Inc. All Rights Reserved.
+#
+# XML-RPC that supports public key encryption and authentication.
+# Author: David Harrison
+
+from BTL.reactor_magic import reactor
+from twisted.web import xmlrpc
+from twisted.internet.ssl import SSL
+from twisted.internet import ssl
+
+debug = False
+
+## Keep these next two commented out classes.  They can be useful for
+## spying on calls.
+class AuthQueryProtocol(xmlrpc.QueryProtocol):
+    def connectionMade(self):
+        if debug:
+            print "connectionMade"
+        xmlrpc.QueryProtocol.connectionMade(self)
+
+    def handleStatus(self, version, status, message):
+        if debug:
+            print "version=%s\nstats=%s\nmessage=%s" % (version,status,message)
+        xmlrpc.QueryProtocol.handleStatus(self,version,status,message)
+
+    def handleResponse(self, contents):
+        if debug:
+            print "contents=%s" % str(contents)
+        xmlrpc.QueryProtocol.handleResponse(self, contents)
+
+class AuthQueryFactory(xmlrpc.QueryFactory):
+    #protocol = xmlrpc.QueryProtocol
+    protocol = AuthQueryProtocol
+
+    def __init__( self, path, host, method, user=None, password=None, *args):
+        xmlrpc.QueryFactory.__init__(self, path, host, method, user, password, *args)
+## End Comment
+
+class AuthContextFactory(ssl.ClientContextFactory):
+    def __init__(self, certificate_file_name, private_key_file_name):
+        self.certificate_file_name = certificate_file_name
+        self.private_key_file_name = private_key_file_name
+        
+    def getContext(self):
+        ctx = SSL.Context(self.method)
+        ctx.use_certificate_file(self.certificate_file_name)
+        if self.private_key_file_name:
+            ctx.use_privatekey_file(self.private_key_file_name)
+        return ctx
+
+class AuthProxy(xmlrpc.Proxy):
+    def __init__(self, url, certificate_file_name, private_key_file_name = None,
+                 user=None, password=None):
+        xmlrpc.Proxy.__init__(self, url, user, password)
+        self.certificate_file_name = certificate_file_name
+        self.private_key_file_name = private_key_file_name
+        
+    def callRemote(self, method, *args):
+        factory = AuthQueryFactory(self.path, self.host, method,
+                                   self.user, self.password, *args)
+        #factory = xmlrpc.QueryFactory(self.path, self.host, method,
+        #                           self.user, self.password, *args)
+        if self.secure:
+            from twisted.internet import ssl
+            #print "Connecting using ssl to host", self.host, "port", (self.port or 443)
+            reactor.connectSSL(self.host, self.port or 443, factory, 
+                               AuthContextFactory(self.certificate_file_name,
+                                                  self.private_key_file_name))
+            #reactor.connectSSL(self.host, self.port or 443, factory, 
+            #    ssl.DefaultOpenSSLContextFactory("", self.certificate_file_name))
+
+        else:
+            reactor.connectTCP(self.host, self.port or 80, factory)
+        return factory.deferred
+
+

+ 310 - 0
html/bin/clients/mainline/BTL/bdistutils.py

@@ -0,0 +1,310 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# by David Harrison
+import sys, os, shutil
+from distutils import core
+if sys.platform == "win32":
+    setup = core.setup
+    # no need to import anything further.  None of the remaining
+    # functionality is available in windows.
+else:
+    import pwd
+    from distutils.sysconfig import get_python_lib
+    import distutils.sysconfig
+    from stat import S_IMODE, S_IRUSR, S_IXUSR, S_IRGRP, S_IXGRP, S_IROTH, S_IXOTH
+    from daemon import getuid_from_username, getgid_from_username
+    from daemon import getgid_from_groupname
+    
+    months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul',
+                  'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+    
+    class SetupException(Exception):
+        pass
+    
+    def getuid_for_path(path):
+        return os.stat(path).st_uid
+    
+    def seteugid_to_login():
+        """set effective user id and effective group id to the user and group ids
+           of the user logged into this terminal."""
+        uid = pwd.getpwnam(os.getlogin())[2]  # search /etc/passwd for uid and
+        gid = pwd.getpwnam(os.getlogin())[3]  # gid of user logged into this
+                                              # terminal.
+        os.setegid(gid)
+        os.seteuid(uid)                       # Is there a better way? --Dave
+    
+    def get_svn_change_code():
+        """Returns the svn repository's date and revision number for the current
+           working directory.  The returned string has the format 'YYYY_MM_DD_revXXXX'
+           where XXXX is the revision number."""
+        def to_dict(lines):
+            # FRAGILE XXX
+            splitted = [l.split(':') for l in lines]
+            pairs = [(s[0].strip(), ':'.join(s[1:]).strip()) for s in splitted]
+            d = dict(pairs)
+            return d
+    
+        # returns date and revision number
+        d = to_dict(os.popen("svn info").readlines())
+        url = d["URL"]
+        revision = int(d["Last Changed Rev"])
+        date = d["Last Changed Date"]
+        date = date.split(' ')[0]         # keep only "YYYY-MM-DD"
+        date = "_".join(date.split('-'))  # replace dash with underscore
+        date_rev = "%s_rev%.4d" % (date,revision)
+        return date_rev
+    
+    def get_cdv_change_code():
+    
+        # cdv won't run on the dev machines as root.  nfs does not allow
+        # root access to mounted drives.  --Dave
+        if os.getuid() == 0 and getuid_for_path(".") != 0:
+            seteugid_to_login()
+    
+        # fragile. XXXX
+        l = os.popen("cdv history -c 1").readlines()[0].split(" ")
+        if os.getuid() == 0:
+            os.seteuid(0)
+            #os.setegid(oldgid)
+    
+        l = [x.strip() for x in l if x.strip() != '']  # remove empty strings.
+        x,code,x,x,x,x,dow,mo,dom,t,y = l
+        month = "%.2d" % (months.index(mo)+1)
+        dom = "%.2d" % int(dom)    # single digit day of month like 3 becomes 03
+        t = "_".join(t.split(':')) # convert ':' to underscores in time.
+        return y+"_"+month+"_"+dom+"_"+t+"_"+code
+    
+    def get_install_prefix( appname ):
+        """Generates directory name /opt/appname_YYYY_MM_DD_revXXXX"""
+    
+        # fragile. XXXX
+        #change = get_cdv_change_code()
+        change = get_svn_change_code()
+        path = os.path.join("/opt", appname+"_"+change)
+        return os.path.normpath(path)
+    
+    def get_unique_install_prefix( appname ):
+        """Generates a directory name /opt/appname_YYYY_MM_DD_revXX or
+           /opt/appname_YYYY_MM_DD_revXX_vVVV if the prior exists.
+           VVV is a counter that is incremented with each install of
+           the distribution with the same svn change code.
+    
+           Unlike get_install_prefix, this does not assume that cdv exists
+           on the system, but instead assumes there is a version.txt
+           file in the distribution root directory containing the cdv change
+           date and code information.  This file is created in the install
+           directory whenever bdistutils is run with the installdev option."""
+        vfile = os.path.join(sys.path[0], "version.txt")
+        if not os.path.exists(vfile):
+            raise SetupException( "Cannot derive install prefix from cdv change date "
+                                  "code, because there is no version.txt file in the "
+                                  "root of the distribution tree." )
+        cfp = open(vfile, 'r')
+        change_str = cfp.readline().strip()
+        prefix = os.path.join("/opt", appname+"_"+change_str)
+        while os.path.exists(prefix):
+            path, name = os.path.split(prefix)
+            code_or_cnt = prefix.split("_")[-1]
+            if code_or_cnt[0] == 'v':
+                cnt = int(code_or_cnt[1:])
+                cnt += 1
+                prefix = "_".join(prefix.split("_")[:-1])
+            else:
+                cnt = 1
+            prefix = "%s_v%03.f" % (prefix, cnt)
+        return os.path.normpath(prefix)
+    
+    def setup( **kwargs ):
+        """site-specific setup.
+    
+           If sys.argv[1] is not installdev then this behaves
+           as python's distutils.core.setup.
+    
+           If sys.argv[1] is installdev then this installs into a
+           directory like:
+    
+           /opt/Mitte_2006_10_16_14_39_51_78a5
+    
+           The date and time is the commit time for this version in the svn repository
+           and 78a5 is the code for the version in svn.
+    
+           Also creates a symbolic link like /opt/mitte pointing to
+           /opt/Mitte_2006_10_16_14_39_51_78a5.
+           """
+    
+        name = kwargs['name']
+    
+        # setup doesn't like kwargs it doesn't know.
+        destname = kwargs.get('destname', name)
+        if kwargs.has_key('destname'): del kwargs['destname']
+        username = kwargs.get('username',None)
+        if kwargs.has_key('username'): del kwargs['username']
+        groupname = kwargs.get('groupname',None)
+        if kwargs.has_key('groupname'): del kwargs['groupname']
+        symlinks = kwargs.get('symlinks',None)
+        if kwargs.has_key('symlinks'): del kwargs['symlinks']
+    
+        installdev=False
+        installprod = False
+        old_prefix = None
+    
+        if len(sys.argv)>1 and sys.argv[1] == "force-installdev":
+            # force install simply installs in a new directory.
+            sys.prefix = get_unique_install_prefix(destname)
+            distutils.sysconfig.PREFIX=sys.prefix
+            print "get_unique_install_prefix returned sys.prefix=", sys.prefix
+            installdev = True
+            sys.argv[1] = "install"
+    
+            # determine old install directory.
+            if os.path.exists( os.path.join("/opt/",destname) ):
+                old_prefix = os.path.realpath(os.path.join("/opt/", destname))
+                old_prefix = os.path.split(old_prefix)[0]
+    
+        elif len(sys.argv)>1 and sys.argv[1] == "installdev":
+            installdev=True
+            sys.argv[1] = "install"
+    
+            # create change code file.
+            code = get_svn_change_code()
+            if code:
+                # may fail if root and destination is nfs mounted.
+                try:
+                    cfp = open(os.path.join(sys.path[0],"version.txt"), 'w')
+                    cfp.write( code )
+                    cfp.close()
+                except IOError:
+                    # try again as login username.
+                    old_uid = os.geteuid()
+                    seteugid_to_login()
+                    cfp = open(os.path.join(sys.path[0],"version.txt"), 'w')
+                    cfp.write( code )
+                    cfp.close()
+                    os.seteuid(old_uid)  # require root access to install into /opt or python site-packages.
+    
+            # determine install directory
+            sys.prefix = get_install_prefix(destname)
+            distutils.sysconfig.PREFIX=sys.prefix
+            if os.path.exists(sys.prefix):
+                raise SetupException( "This code revision has already been installed %s."
+                                 "  If you want to install it again then move the "
+                                 "existing directory or use force-installdev." % sys.prefix )
+    
+            # determine old install directory.
+            if os.path.exists( os.path.join("/opt/",destname) ):
+                old_prefix = os.path.realpath(os.path.join("/opt/", destname))
+                old_prefix = os.path.split(old_prefix)[0]
+    
+        if len(sys.argv)>1 and sys.argv[1] == "install":
+            # building with root privilege can fail if the destination of the
+            # build is nfs mounted.
+            sys.argv[1] = "build"
+            try:
+                # try as root if I am root.
+                core.setup(**kwargs)
+            except:
+                # try using login username
+                old_uid = os.geteuid()
+                seteugid_to_login()
+                core.setup(**kwargs)
+                os.seteuid(old_uid)
+            sys.argv[1] = "install"
+    
+        try:
+            core.setup(**kwargs)
+        except:
+            # try using login username
+            old_uid = os.geteuid()
+            seteugid_to_login()
+            core.setup(**kwargs)
+            os.seteuid(old_uid)
+    
+        if installdev:
+            print "installdev is True."
+    
+            # shortened the directory path.
+            #long_path = os.path.join(sys.path[0], "build", "lib", name)
+            long_path = os.path.join(sys.prefix, "lib", "python2.4", "site-packages", name)
+            print "long_path=",long_path
+            dest = os.path.join(sys.prefix,name)
+            print "dest=", dest
+            if os.path.exists(long_path):
+                print "copytree from ", long_path, " to ", dest
+                shutil.copytree(long_path,dest)
+            #shutil.rmtree(os.path.join(sys.prefix, "lib" ))
+    
+            # copy all files not in packages into /opt.
+            for f in os.listdir('.'):
+                if f == "build": continue
+                if f == ".cdv": continue
+                if f == ".svn": continue
+                if f == "lib": continue
+                if not os.path.exists( os.path.join(sys.prefix,f)):
+                    if os.path.isdir(f):
+                        shutil.copytree(f,os.path.join(sys.prefix,f),False)
+                    else:
+                        shutil.copyfile(f,os.path.join(sys.prefix,f))
+    
+            # create symlink from /opt/blah to /opt/blah_YYYY_MM_DD_HH:MM:SS_code
+            link_to = sys.prefix
+            symlnk = os.path.join( '/opt', destname )
+            print "removing symlink from", symlnk
+            if os.path.islink(symlnk):
+                print "removing", symlnk
+                os.remove(symlnk)
+            print "creating symlink", symlnk, "to", link_to
+            os.symlink(link_to, symlnk)
+    
+            if username:
+                uid = getuid_from_username(username)
+            else:
+                uid = -1
+            if groupname:
+                gid = getgid_from_groupname(groupname)
+            elif username:
+                gid = getgid_from_username(username)
+            else:
+                gid = -1
+    
+            # recursively change owner and group name of install directory.
+            ## Turns out that this is a bad idea.  The account in which the
+            ## service runs should not own its own install directory, because
+            ## it could modify its own code.
+            #if uid != -1 or gid != -1:
+            #    os.chown(sys.prefix,uid,gid)
+            #    dirs = os.walk(sys.prefix)
+            #    for path, dirnames, filenames in dirs:
+            #        for dir in dirnames:
+            #            os.chown(os.path.join(path, dir),uid,gid)
+            #        for fname in filenames:
+            #            os.chown(os.path.join(path, fname),uid,gid)
+    
+            # make world readable and make directories world cd'able (i.e., world executable)
+            dirs = os.walk(sys.prefix)
+            for path, dirnames, filenames in dirs:
+                for dir in dirnames:
+                    dir = os.path.join(path,dir)
+                    mode = os.stat(dir).st_mode
+                    mode = S_IMODE(mode)
+                    mode |= S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH
+                    os.chmod(dir,mode)
+                for fname in filenames:
+                    fname = os.path.join(path, fname)
+                    mode = os.stat(fname).st_mode
+                    mode |= S_IRUSR | S_IRGRP | S_IROTH
+                    os.chmod(fname, mode)
+    
+            # create pid dir.
+            pid_dir = os.path.join("/var/run/", name )
+            if not os.path.exists(pid_dir):
+                os.mkdir(pid_dir)
+                os.chown(pid_dir,uid,gid)
+    

+ 131 - 0
html/bin/clients/mainline/BTL/bencode.py

@@ -0,0 +1,131 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Petru Paler
+
+from BTL import BTFailure
+
+
+def decode_int(x, f):
+    f += 1
+    newf = x.index('e', f)
+    n = int(x[f:newf])
+    if x[f] == '-':
+        if x[f + 1] == '0':
+            raise ValueError
+    elif x[f] == '0' and newf != f+1:
+        raise ValueError
+    return (n, newf+1)
+
+def decode_string(x, f):
+    colon = x.index(':', f)
+    n = int(x[f:colon])
+    if x[f] == '0' and colon != f+1:
+        raise ValueError
+    colon += 1
+    return (x[colon:colon+n], colon+n)
+
+def decode_list(x, f):
+    r, f = [], f+1
+    while x[f] != 'e':
+        v, f = decode_func[x[f]](x, f)
+        r.append(v)
+    return (r, f + 1)
+
+def decode_dict(x, f):
+    r, f = {}, f+1
+    while x[f] != 'e':
+        k, f = decode_string(x, f)
+        r[k], f = decode_func[x[f]](x, f)
+    return (r, f + 1)
+
+decode_func = {}
+decode_func['l'] = decode_list
+decode_func['d'] = decode_dict
+decode_func['i'] = decode_int
+decode_func['0'] = decode_string
+decode_func['1'] = decode_string
+decode_func['2'] = decode_string
+decode_func['3'] = decode_string
+decode_func['4'] = decode_string
+decode_func['5'] = decode_string
+decode_func['6'] = decode_string
+decode_func['7'] = decode_string
+decode_func['8'] = decode_string
+decode_func['9'] = decode_string
+
+def bdecode(x):
+    try:
+        r, l = decode_func[x[0]](x, 0)
+    except (IndexError, KeyError, ValueError):
+        raise BTFailure("not a valid bencoded string")
+    if l != len(x):
+        raise BTFailure("invalid bencoded value (data after valid prefix)")
+    return r
+
+from types import StringType, IntType, LongType, DictType, ListType, TupleType
+
+
+class Bencached(object):
+
+    __slots__ = ['bencoded']
+
+    def __init__(self, s):
+        self.bencoded = s
+
+def encode_bencached(x,r):
+    r.append(x.bencoded)
+
+def encode_int(x, r):
+    r.extend(('i', str(x), 'e'))
+
+def encode_bool(x, r):
+    if x:
+        encode_int(1, r)
+    else:
+        encode_int(0, r)
+        
+def encode_string(x, r):
+    r.extend((str(len(x)), ':', x))
+
+def encode_list(x, r):
+    r.append('l')
+    for i in x:
+        encode_func[type(i)](i, r)
+    r.append('e')
+
+def encode_dict(x,r):
+    r.append('d')
+    ilist = x.items()
+    ilist.sort()
+    for k, v in ilist:
+        r.extend((str(len(k)), ':', k))
+        encode_func[type(v)](v, r)
+    r.append('e')
+
+encode_func = {}
+encode_func[Bencached] = encode_bencached
+encode_func[IntType] = encode_int
+encode_func[LongType] = encode_int
+encode_func[StringType] = encode_string
+encode_func[ListType] = encode_list
+encode_func[TupleType] = encode_list
+encode_func[DictType] = encode_dict
+
+try:
+    from types import BooleanType
+    encode_func[BooleanType] = encode_bool
+except ImportError:
+    pass
+
+def bencode(x):
+    r = []
+    encode_func[type(x)](x, r)
+    return ''.join(r)

+ 96 - 0
html/bin/clients/mainline/BTL/bitfield.py

@@ -0,0 +1,96 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# Written by Bram Cohen, Uoti Urpala, and John Hoffman
+
+from array import array
+
+#counts = [chr(sum([(i >> j) & 1 for j in xrange(8)])) for i in xrange(256)]
+counts = []
+for i in xrange(256):
+    t = 0
+    for j in xrange(8):
+        t = t + ((i >> j) & 1)
+    counts.append(chr(t))
+counts = ''.join(counts)
+
+class Bitfield:
+
+    def __init__(self, length, bitstring=None):
+        self.length = length
+        rlen, extra = divmod(length, 8)
+        if bitstring is None:
+            self.numfalse = length
+            if extra:
+                self.bits = array('B', chr(0) * (rlen + 1))
+            else:
+                self.bits = array('B', chr(0) * rlen)
+        else:
+            if extra:
+                if len(bitstring) != rlen + 1:
+                    raise ValueError("%s != %s" % (len(bitstring), rlen + 1))
+                if (ord(bitstring[-1]) << extra) & 0xFF != 0:
+                    raise ValueError("%s != %s" %
+                                     ((ord(bitstring[-1]) << extra) & 0xFF, 0))
+            else:
+                if len(bitstring) != rlen:
+                    raise ValueError("%s != %s" % (len(bitstring), rlen))
+            self.numfalse = length - sum(array('B',
+                                               bitstring.translate(counts)))
+            if self.numfalse != 0:
+                self.bits = array('B', bitstring)
+            else:
+                self.bits = None
+
+    def __setitem__(self, index, val):
+        assert val
+        pos = index >> 3
+        mask = 128 >> (index & 7)
+        if self.bits[pos] & mask:
+            return
+        self.bits[pos] = self.bits[pos] | mask
+        self.numfalse = self.numfalse - 1
+        if self.numfalse == 0:
+            self.bits = None
+
+    def __getitem__(self, index):
+        bits = self.bits
+        if bits is None:
+            return 1
+        return bits[index >> 3] & 128 >> (index & 7)
+
+    def __len__(self):
+        return self.length
+
+    def tostring(self):
+        if self.bits is None:
+            rlen, extra = divmod(self.length, 8)
+            r = chr(0xFF) * rlen
+            if extra:
+                r = r + chr((0xFF << (8 - extra)) & 0xFF)
+            return r
+        else:
+            return self.bits.tostring()
+
+    def __getstate__(self):
+        d = {}
+        d['length'] = self.length
+        d['s'] = self.tostring()
+        return d
+
+    def __setstate__(self, d):
+        Bitfield.__init__(self, d['length'], d['s'])
+
+old_Bitfield = Bitfield
+try:
+    import BTL.cBitfield
+    Bitfield = BTL.cBitfield.Bitfield
+except ImportError:
+    pass

+ 85 - 0
html/bin/clients/mainline/BTL/brpc.py

@@ -0,0 +1,85 @@
+### brpc
+
+## query = bencode({'y':'q', 'q':'<method>', 'a':[<params>])
+## response = bencode({'y':'r', 'r':<return value>}}
+## fault = bencode({'y':'e','c':'<fault code>', 's':'<fault string>'
+
+from xmlrpclib import Error, Fault
+from types import TupleType
+
+from BTL.bencode import bencode, bdecode
+
+def dump_fault(code, msg):
+    return bencode({'y':'e', 'c':code, 's':msg})
+
+
+def dumps(params, methodname=None, methodresponse=None, encoding=None, allow_none=False):
+    if methodresponse and isinstance(params, TupleType):
+        assert len(params) == 1, "response tuple must be a singleton"
+    if methodname:
+        out = bencode({'y':'q', 'q':methodname, 'a':params})
+    elif isinstance(params, Fault):
+        out = bencode({'y':'e', 'c':params.faultCode, 's':params.faultString})
+    elif methodresponse:
+        out = bencode({'y':'r', 'r':params[0]})
+    else:
+        raise Error("")
+    return out
+
+def loads(data):
+    d = bdecode(data)
+    if d['y'] == 'e':
+        raise Fault(d['c'], d['s']) # the server raised a fault
+    elif d['y'] == 'r':
+        # why is this return value so weird?
+        # because it's the way that loads works in xmlrpclib
+        return (d['r'],), None
+    elif d['y'] == 'q':
+        return d['a'], d['q']
+    raise ValueError
+    
+
+
+class DFault(Exception):
+    """Indicates an Datagram BRPC fault package."""
+
+    # If you return a DFault with tid=None from within a function called via
+    # twispread's TBRPC.callRemote then TBRPC will insert the tid for the call.
+    def __init__(self, faultCode, faultString, tid=None):
+        self.faultCode = faultCode
+        self.faultString = faultString
+        self.tid = tid
+        self.args = (faultCode, faultString)
+        
+    def __repr__(self):
+        return (
+            "<Fault %s: %s>" %
+            (self.faultCode, repr(self.faultString))
+            )
+
+### datagram interface
+### has transaction ID as third return valuebt
+### slightly different API, returns a tid as third argument in query/response
+def dumpd(params, methodname=None, methodresponse=None, encoding=None, allow_none=False, tid=None):
+    assert tid is not None, "need a transaction identifier"
+    if methodname:
+        out = bencode({'y':'q', 't':tid, 'q':methodname, 'a':params})
+    elif isinstance(params, DFault):
+        out = bencode({'y':'e', 't':tid, 'c':params.faultCode, 's':params.faultString})
+    elif methodresponse:
+        out = bencode({'y':'r', 't':tid, 'r':params})
+    else:
+        raise Error("")
+    return out
+
+def loadd(data):
+    d = bdecode(data)
+    if d['y'] == 'e':
+        raise DFault(d['c'], d['s'], d['t'])
+    elif d['y'] == 'r':
+        return d['r'], None, d['t']
+    elif d['y'] == 'q':
+        return d['a'], d['q'], d['t']
+    raise ValueError
+    
+

+ 133 - 0
html/bin/clients/mainline/BTL/brpclib.py

@@ -0,0 +1,133 @@
+# by Greg Hazel
+
+import xmlrpclib
+from xmlrpclib2 import *
+from BTL import brpc
+
+old_PyCurlTransport = PyCurlTransport
+class PyCurlTransport(old_PyCurlTransport):
+
+    def set_connection_params(self, h):
+        h.add_header('User-Agent', "brpclib.py/1.0")
+        h.add_header('Connection', "Keep-Alive")
+        h.add_header('Content-Type', "application/octet-stream")
+    
+    def _parse_response(self, response):
+        # read response from input file/socket, and parse it
+        return brpc.loads(response.getvalue())[0]
+
+# --------------------------------------------------------------------
+# request dispatcher
+
+class _Method:
+    # some magic to bind an B-RPC method to an RPC server.
+    # supports "nested" methods (e.g. examples.getStateName)
+    def __init__(self, send, name):
+        self.__send = send
+        self.__name = name
+    def __getattr__(self, name):
+        return _Method(self.__send, "%s.%s" % (self.__name, name))
+    def __call__(self, *args, **kwargs):
+        args = (args, kwargs)
+        return self.__send(self.__name, args)
+    # ARG! prevent repr(_Method()) from submiting an RPC call!
+    def __repr__(self):
+        return "<%s instance at 0x%08X>" % (self.__class__, id(self))
+
+
+# Double underscore is BAD!
+class BRPC_ServerProxy(xmlrpclib.ServerProxy):
+    """uri [,options] -> a logical connection to an B-RPC server
+
+    uri is the connection point on the server, given as
+    scheme://host/target.
+
+    The standard implementation always supports the "http" scheme.  If
+    SSL socket support is available (Python 2.0), it also supports
+    "https".
+
+    If the target part and the slash preceding it are both omitted,
+    "/RPC2" is assumed.
+
+    The following options can be given as keyword arguments:
+
+        transport: a transport factory
+        encoding: the request encoding (default is UTF-8)
+
+    All 8-bit strings passed to the server proxy are assumed to use
+    the given encoding.
+    """
+
+    def __init__(self, uri, transport=None, encoding=None, verbose=0,
+                 allow_none=0):
+        # establish a "logical" server connection
+
+        # get the url
+        import urllib
+        type, uri = urllib.splittype(uri)
+        if type not in ("http", "https"):
+            raise IOError, "unsupported B-RPC protocol"
+        self.__host, self.__handler = urllib.splithost(uri)
+        if not self.__handler:
+            self.__handler = "/RPC2"
+
+        if transport is None:
+            if type == "https":
+                transport = xmlrpclib.SafeTransport()
+            else:
+                transport = xmlrpclib.Transport()
+        self.__transport = transport
+
+        self.__encoding = encoding
+        self.__verbose = verbose
+        self.__allow_none = allow_none
+
+    def __request(self, methodname, params):
+        # call a method on the remote server
+
+        request = brpc.dumps(params, methodname, encoding=self.__encoding,
+                              allow_none=self.__allow_none)
+
+        response = self.__transport.request(
+            self.__host,
+            self.__handler,
+            request,
+            verbose=self.__verbose
+            )
+
+        if len(response) == 1:
+            response = response[0]
+
+        return response
+
+    def __repr__(self):
+        return (
+            "<ServerProxy for %s%s>" %
+            (self.__host, self.__handler)
+            )
+
+    __str__ = __repr__
+
+    def __getattr__(self, name):
+        # magic method dispatcher
+        return _Method(self.__request, name)
+    
+def new_server_proxy(url):
+    c = cache_set.get_cache(PyCURL_Cache, url)
+    t = PyCurlTransport(c)
+    return BRPC_ServerProxy(url, transport=t)
+
+ServerProxy = new_server_proxy
+
+
+if __name__ == '__main__':
+    s = ServerProxy('https://greg.mitte.bittorrent.com:7080/')
+    def ping(*a, **kw):
+        (a2, kw2) = s.ping(*a, **kw)
+        assert a2 == list(a), '%s list is not %s' % (r, list(a))
+        assert kw2 == dict(kw), '%s dict is not %s' % (kw2, dict(kw))
+    ping(0, 1, 1, name="potato")
+    ping(0, 1, 1, name="anime")
+    ping("phish", 0, 1, 1)
+    ping("games", 0, 1, 1)
+    

+ 88 - 0
html/bin/clients/mainline/BTL/btformats.py

@@ -0,0 +1,88 @@
+import re
+from BTL.translation import _
+
+from BTL import BTFailure
+
+allowed_path_re = re.compile(r'^[^/\\.~][^/\\]*$')
+
+ints = (long, int)
+
+def check_info(info, check_paths=True):
+    if not isinstance(info, dict):
+        raise BTFailure, _("bad metainfo - not a dictionary")
+    pieces = info.get('pieces')
+    if type(pieces) != str or len(pieces) % 20 != 0 or len(pieces) == 0:
+        raise BTFailure, _("bad metainfo - bad pieces key")
+    piecelength = info.get('piece length')
+    if type(piecelength) not in ints or piecelength <= 0:
+        raise BTFailure, _("bad metainfo - illegal piece length")
+    name = info.get('name')
+    if not isinstance(name, str):
+        raise BTFailure, _("bad metainfo - bad name")
+    #if not allowed_path_re.match(name):
+    #    raise BTFailure, _("name %s disallowed for security reasons") % name
+    if info.has_key('files') == info.has_key('length'):
+        raise BTFailure, _("single/multiple file mix") 
+    if info.has_key('length'): 
+        length = info.get('length')
+        if type(length) not in ints or length < 0:
+            raise BTFailure, _("bad metainfo - bad length") 
+    else:
+        files = info.get('files')
+        if type(files) != list:
+            raise BTFailure, _('bad metainfo - "files" is not a list of files')
+        for f in files:
+            if type(f) != dict:
+                raise BTFailure, _("bad metainfo - file entry must be a dict") 
+            length = f.get('length')
+            if type(length) not in ints or length < 0:
+                raise BTFailure, _("bad metainfo - bad length")
+            path = f.get('path')
+            if type(path) != list or path == []:
+                raise BTFailure, _("bad metainfo - bad path")
+            for p in path:
+                if type(p) != str:
+                    raise BTFailure, _("bad metainfo - bad path dir")
+                if check_paths and not allowed_path_re.match(p):
+                    raise BTFailure, _("path %s disallowed for security reasons") % p
+        f = ['/'.join(x['path']) for x in files]
+        f.sort()
+        i = iter(f)
+        try:
+            name2 = i.next()
+            while True:
+                name1 = name2
+                name2 = i.next()
+                if name2.startswith(name1):
+                    if name1 == name2:
+                        raise BTFailure, _("bad metainfo - duplicate path")
+                    elif name2[len(name1)] == '/':
+                        raise BTFailure(_("bad metainfo - name used as both"
+                                          "file and subdirectory name"))
+        except StopIteration:
+            pass
+
+def check_message(message, check_paths=True):
+    if type(message) != dict:
+        raise BTFailure, _("bad metainfo - wrong object type")
+    check_info(message.get('info'), check_paths)
+    if type(message.get('announce')) != str and type(message.get('nodes')) != list:
+        raise BTFailure, _("bad metainfo - no announce URL string")
+    if message.has_key('title') and type(message.get('title')) != str:
+        raise BTFailure, _("bad metainfo - bad title - should be a string" )
+
+    if message.has_key('nodes'):
+        check_nodes(message.get('nodes'))
+
+def check_nodes(nodes):
+    ## note, these strings need changing
+    for node in nodes:
+        if type(node) != list:
+            raise BTFailure, _("bad metainfo - node is not a list")
+        if len(node) != 2:
+            raise BTFailure, _("bad metainfo - node list must have only two elements")
+        host, port = node
+        if type(host) != str:
+            raise BTFailure, _("bad metainfo - node host must be a string")
+        if type(port) != int:
+            raise BTFailure, _("bad metainfo - node port must be an integer")

+ 103 - 0
html/bin/clients/mainline/BTL/btl_string.py

@@ -0,0 +1,103 @@
+
+# author: David Harrison
+
+def split( s, delimiter = ' ', quote=['"',"'"], keep_quote = True):
+    """analogous to str.split() except it supports quoted strings.
+
+       Delimiter can be any positive length string.
+
+       A quote begins on any character in 'quote', and ends on that
+       same character.  A quoted string is not split even if it
+       contains character c or other quote characters in the quote
+       argument.
+
+       Iff keep_quote is true then quote's leading and trailing
+       quote characters are left in the strings in the returned list."""
+    assert type(s) == str
+    assert type(delimiter) == str and len(delimiter) >= 1, "c='%s'" % c
+    l = []
+    sub = []
+    quoted = None
+    i = 0
+    while i < len(s):
+        c = s[i]
+        # check for end-quote
+        if quoted:
+            if c == quoted:
+              quoted = None
+              if keep_quote:
+                sub.append(c)
+            else:
+              sub.append(c)
+        # check for start-quote.
+        elif c in quote:
+            quoted = c
+            if keep_quote:
+                sub.append(c)
+        elif s[i:i+len(delimiter)] != delimiter:
+            sub.append(c)
+        else:
+            i += (len(delimiter)-1)
+            l.append("".join(sub))
+            sub = []
+        i += 1
+    l.append("".join(sub))
+    return l
+
+
+def remove(s,c):
+  l = [i for i in s if i != c]
+  return "".join(l)
+
+def printable(s):
+    """make a string printable.  Converts all non-printable ascii characters and all
+       non-space whitespace to periods.  This keeps a string to a fixed width when
+       printing it.  This is not meant for canonicalization.  It is far more
+       restrictive since it removes many things that might be representable.
+       It is appropriate for generating debug output binary strings that might
+       contain ascii substrings, like peer-id's.  It explicitly excludes quotes
+       and double quotes so that the string can be enclosed in quotes.
+       """
+    l = []
+    for c in s:
+        if ord(c) >= 0x20 and ord(c) < 0x7F and c != '"' and c != "'":
+            l.append(c)
+        else:
+            l.append('.')
+    return "".join(l)
+
+def str2(s, default = "<not str convertable>" ):
+    """converts passed object to a printable string, to repr, or
+       returns provided default in that order of precendence."""
+    try:
+        return printable(str(s))
+    except:
+        try:
+            return repr(s)
+        except:
+            return default
+
+
+
+if __name__ == "__main__":
+    assert split( "" ) == [''], split( "" )
+    assert split( "a b c" ) == ['a','b','c'], split( "a b c" )
+    assert split( "a" ) == ['a'], split( "a" )
+    assert split( " a", ',' ) == [' a'], split( " a", ',')
+    assert split( "a,b,c", ',' ) == ['a','b','c'], split( "a,b,c", ',' )
+    assert split( "a,b,", ',' ) == ['a','b',''], split( "a,b,", ',' )
+    assert split( "'a',b", ',' ) == ["'a'",'b'], split( "'a',b", ',' )
+    assert split( "'a,b'", ',' ) == ["'a,b'"], split( "'a,b'", ',' )
+    assert split( "a,'b,\"cd\",e',f", ',', keep_quote=False) == ['a', 'b,"cd",e', 'f']
+    assert split( 'a,"b,\'cd\',e",f', ',', keep_quote=False) == ['a', "b,'cd',e", 'f']
+    assert split( "a - b - c", " - " ) == ['a','b','c'], split( "a - b - c", " - " )
+    s = "Aug 19 06:26:29 tracker-01 hypertracker.event - 6140 - INFO - ihash=ed25f"
+    assert split( s, ' - ' ) == ['Aug 19 06:26:29 tracker-01 hypertracker.event',
+                                 '6140', 'INFO', 'ihash=ed25f'], split( s, ' - ')
+
+    assert str2('foo') == 'foo'
+    assert str2(u'foo') == 'foo'
+    assert str2(None) == "None"
+
+    print "passed all tests"
+

+ 180 - 0
html/bin/clients/mainline/BTL/buffer.py

@@ -0,0 +1,180 @@
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+ctypes = None
+try:
+    import ctypes
+except ImportError:
+    pass
+
+
+from cStringIO import StringIO
+
+class cStringIO_Buffer(object):
+
+    def __init__(self):
+        self.buffer = StringIO()
+        for attr in dir(self.buffer):
+            self.__dict__[attr] = getattr(self.buffer, attr)
+
+    def __getattr__(self, attr):
+        return getattr(self.buffer, attr)
+
+    def _slice_to_a_b(self, i):
+        if not isinstance(i, slice):
+            if i >= len(self):
+                raise IndexError("buffer index out of range")
+            i = slice(i, i+1)
+        o = self.tell()
+        if i.start is None:
+            a = 0
+        else:
+            a = i.start
+        if i.stop is None:
+            b = o
+        else:
+            b = i.stop
+        if b < 0:
+            b = o + b
+        b = max(a, b - a)
+        return o, a, b            
+
+    def __setitem__(self, i, d):
+        o, a, b = self._slice_to_a_b(i)
+        self.seek(a)
+        self.write(d)
+        self.seek(o)
+        
+    def __getitem__(self, i):
+        o, a, b = self._slice_to_a_b(i)
+        self.seek(a)
+        d = self.read(b)
+        self.seek(o)
+        return d
+
+    def drop(self, size):
+        v = self.getvalue()
+        self.truncate(0)
+        self.write(buffer(v, size))
+
+    def __len__(self):
+        o = self.tell()
+        self.seek(0, 2)
+        x = self.tell()
+        self.seek(o)
+        return x
+
+    def __str__(self):
+        return self.getvalue()
+
+Buffer = cStringIO_Buffer
+
+# slow, has dependencies
+if False: # ctypes:
+    
+    class ctypes_Buffer(object):
+
+        def __init__(self):
+            self.length = 32
+            self.data = ctypes.create_string_buffer(self.length)
+            self.written = 0
+            self.offset = 0
+
+        def __setitem__(self, i, y):
+            if isinstance(i, slice):
+                return self.data.__setslice__(i.start, i.stop, y)
+            else:
+                return self.data.__setitem__(i, y)
+
+        # TODO: call PyBuffer_FromMemory!
+        def __getitem__(self, i):
+            if isinstance(i, slice):
+                if i.stop < 0:
+                    i = slice(i.start, self.written + i.stop)
+                return self.data.__getslice__(i.start or 0, i.stop or self.written)
+            else:
+                return self.data.__getitem__(i)
+            
+        def __getattr__(self, attr):
+            return getattr(self.data, attr)
+
+        def __str__(self):
+            return self.data[:self.written]
+
+        def __len__(self):
+            return self.written
+
+        def _oversize(self, l):
+            o = self.length
+            while l > self.length:
+                self.length *= 2
+            if self.length > o:
+                d = self.data
+                self.data = ctypes.create_string_buffer(self.length)
+                # which is faster?
+                #self.data[0:self.written] = d[:self.written]
+                self.data[0:o] = d
+
+        def write(self, s):
+            l = len(s)
+            self._oversize(self.offset + l)
+            self.data[self.offset:self.offset + l] = s
+            self.offset += l
+            self.written = max(self.written, self.offset)
+            return l
+        
+        def seek(self, offset):
+            self.offset = min(self.written - 1, max(0, offset))
+
+        def truncate(self, size=None):
+            if size is None:
+                size = self.offset
+            self.written = size
+            self.offset = min(size, self.offset)
+
+        def drop(self, size):
+            if size < 0:
+                raise ValueError("cannot discard negative bytes")
+            size = min(size, self.written)
+            new_written = self.written - size
+            # ow
+            try:
+                self.data[:new_written] = self.data[size:self.written]
+            except ValueError:
+                print new_written, size, self.written
+            self.written = new_written
+            self.offset = min(self.written, self.offset)
+
+    Buffer = ctypes_Buffer
+    
+
+    
+b = Buffer()
+b.write("ghello")
+b.seek(0)
+b.write(buffer("ghell"))
+b.drop(1)
+b[2:3] = 'b'
+assert str(b) == "heblo"
+assert b[0] == "h"
+#print repr(b[:-1])
+assert b[:-1] == "hebl"
+#assert len(b) <= b.length
+assert len(b) == len(str(b))
+b.drop(1)
+b.seek(0)
+b.write('foo')
+assert b[0] == 'f'
+try:
+    b[100]
+except IndexError:
+    pass
+else:
+    assert False

+ 76 - 0
html/bin/clients/mainline/BTL/cache.py

@@ -0,0 +1,76 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+from BTL.platform import bttime as time
+from collections import deque
+
+# The main drawback of this cache is that it only supports one time out 
+# delay.  If you experience heavy load of cache inserts at time t then
+# at t + expiration time, you will experience a heavy load due to
+# expirations.  An alternative is to randomize the timeouts.  With 
+# exponentially distributed timeouts we would expect load roughly obeying
+# a Poisson process.  However, one can do better if the randomness if a
+# function of load such that excess arrivals are redistributed evenly over the
+# interval.
+
+class Cache:
+    # fixed TTL cache.  This assumes all entries have the same
+    # TTL.
+    def __init__(self, touch_on_access = False):
+        self.data = {}
+        self.q = deque()
+        self.touch = touch_on_access
+        
+    def __getitem__(self, key):
+        if self.touch:
+            v = self.data[key][1]
+            self[key] = v
+        return self.data[key][1]
+
+    def __setitem__(self, key, value):
+        t = time()
+        self.data[key] = (t, value)
+        self.q.appendleft((t, key, value))
+
+    def __delitem__(self, key):
+        del(self.data[key])
+
+    def has_key(self, key):
+        return self.data.has_key(key)
+    
+    def keys(self):
+        return self.data.keys()
+
+    def expire(self, expire_time):
+        try:
+            while self.q[-1][0] < expire_time:
+                x = self.q.pop()
+                assert(x[0] < expire_time)
+                try:
+                    t, v = self.data[x[1]]
+                    if v == x[2] and t == x[0]:
+                        del(self.data[x[1]]) # only eliminates one reference to the
+                                             # object.  If there is more than one
+                                             # reference (for example if an
+                                             # elements was "touched" by getitem)
+                                             # then the item persists in the cache
+                                             # until the last reference expires.
+                                             # Note: frequently touching a cache entry
+                                             # with long timeout intervals could be
+                                             # viewed as a memory leak since the
+                                             # cache can grow quite large.
+                                             # This class is best used without
+                                             # touch_on_access or with short expirations.
+                except KeyError:
+                    pass
+        except IndexError:
+            pass
+        
+        

+ 370 - 0
html/bin/clients/mainline/BTL/cache_map.py

@@ -0,0 +1,370 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# author: David Harrison
+
+if __name__ == "__main__":
+    import sys
+    sys.path.append("..")
+
+import sys #DEBUG
+from BTL.platform import bttime as time
+from BTL.reactor_magic import reactor
+from BTL.Map import IndexedMultiMap
+#from refcnt import getrc
+
+RECENT_SIZE = 10   # number of items in the recently accessed set.
+LEAK_TEST = False
+
+class CacheMap:
+    """this cache class allows caching with arbitrary expiration times.  This is different
+       from BTL.Cache which assumes all items placed in the cache remain valid for the same
+       duration.
+
+       Like a BTL.Map, there can only be one instance of any given key in
+       the cache at a time.  Subsequent inserts (__setitem__ or set)
+       with the same key, will update the ttl and value for that
+       key.
+
+       Unlike a BTL.Map, CacheMap does not perform in-order iteration based on key, and
+       key lookups (__getitem__) take average O(1) time.
+
+       The map also has the option to have bounded size in which case it imposes the 
+       following replacement algorithm: remove the oldest entries first unless
+       those entries are in the recent access set.  Here 'old' refers to duration
+       in the cache.  Recent set has bounded size.
+       """
+
+    # BTL.Cache places the cache entries in a queue.  We instead maintain an
+    # IndexedMultiMap ordered based on expiration times.  The index allows nodes in the
+    # map to be looked up in O(1) time based on value.
+    def __init__(self, default_ttl = None, expire_interval = 60, touch_on_access = False,
+                 max_items = None, recent_items = RECENT_SIZE ):
+        """
+           @param default_ttl: time to live when using __setitem__ rather than set.
+           @param expire_interval:  time between removals of expired items in seconds. Otherwise,
+               expired items are removed lazily.
+           @param touch_on_access: refresh item expire time by ttl when item is accessed.
+           @param max_items: maximum size of cache.  (see replacement algorithm above)
+        """
+        self._exp = IndexedMultiMap()  # expiration times.  Multiple items can have the same expiration
+                                       # times, but there can only be one instance of any one key
+                                       # in the CacheMap.
+        self._data = {}
+        self._ttl = default_ttl
+        self._touch = touch_on_access
+        self._max_items = max_items
+        self._expire_interval = expire_interval
+        if max_items is not None:
+            self._recent = _BoundedCacheSet(int(min(recent_items,max_items)))
+        else:
+            self._recent = None
+        reactor.callLater(self._expire_interval, self._expire)
+        
+    def __getitem__(self, key):  # O(1) if not touch and not newly expired, else O(log n)
+        """Raises KeyError if the key is not in the cache.
+           This can happen if the entry was deleted or expired."""
+        ttl,v = self._data[key]  # ttl is duration, not absolute time.
+        i = self._exp.find_key_by_value(key)     # O(1). Key in exp is time. 'key' variable
+                                                 # is exp's value. :-)
+        if i.at_end():
+            raise KeyError()
+        t = time()
+        if i.key() < t:                          # expired.
+            del self[key]                        # O(log n)
+            raise KeyError()
+        if self._recent:
+            self._recent.add(key)
+        if self._touch:
+            self._exp.update_key(i,t+ttl)  # O(1) if no reordering else O(log n)
+        return v
+
+    def __setitem__(self, key, value): # O(log n).  actually O(log n + RECENT_SIZE)
+        assert self._ttl > 0, "no default TTL defined.  Perhaps the caller should call set " \
+               "rather than __setitem__."
+        t = time()
+        if self._data.has_key(key):
+            ttl,_ = self._data[key]
+        else:
+            ttl = self._ttl
+        self.set(key,value,ttl)
+        if self._recent:
+            self._recent.add(key)
+
+        # perform cache replacement if necessary.
+        if self._max_items is not None and len(self._data) > self._max_items:
+            to_remove = []
+            for t,k in self._exp.iteritems():
+                                   # worst case is O(RECENT_SIZE), but it is highly unlikely
+                                   # that all members of the recent access set are the oldest
+                                   # in the cache.
+                if k not in self._recent:
+                    to_remove.append(k)
+                if len(to_remove) >= len(self._data) - self._max_items:
+                    break
+            for k in to_remove:
+                del self[k]                    
+
+    def set(self, key, value, ttl):
+        """Set using non-default TTL.  ttl is a duration, not an absolute
+           time."""
+        t = time()
+        self._data[key] = (ttl, value)
+        i = self._exp.find_key_by_value(key)
+        if i.at_end():
+            self._exp[t+ttl] = key
+        else:
+            assert i.value() == key
+            self._exp.update_key(i,t+ttl)
+
+    def __delitem__(self, key): # O(log n)
+        del self._data[key]
+        i = self._exp.find_key_by_value(key)
+        if not i.at_end():  # No KeyError is generated if item is not in
+                            # Cache because it could've been expired.
+            self._exp.erase(i)
+
+    def __len__(self):
+        """Returns number of entries in the cache.  Includes any
+           expired entries that haven't been removed yet.
+           Takes O(1) time."""
+        return len(self._data)
+
+    def num_unexpired(self):
+        """Returns number of unexpired entries in the cache.
+           Any expired entries are removed before computing the length.
+           Takes worst case O(n) time where n = the number of expired
+           entries in the cache when this is called."""
+        self._expire2()
+        return len(self._data)
+    
+    def has_key(self, key):
+        return self._data.has_key(key)
+
+    def __contains__(self, key):
+        return self._data.has_key(key)
+    
+    def keys(self):
+        return self._data.keys()
+
+    def _expire(self):
+        self._expire2()              
+        reactor.callLater(self._expire_interval, self._expire)
+
+    def _expire2(self):
+        t = time()
+        #try:
+        while True:
+          i = self._exp.begin()
+          if i.at_end():
+              break
+          if i.key() < t:
+              key = i.value()
+              self._exp.erase(i)
+              del self._data[key]
+          else:
+              break
+        assert len(self._data) == len(self._exp)
+        #except:
+        #    pass  # for example if an iterator is invalidated
+                  # while expiring.
+
+class _BoundedCacheSet:
+    # implements LRU.  I could've implemented this using
+    # a set and then removed a random item from the set whenever the set
+    # got too large.  Hmmm...
+    def __init__(self, max_items):
+        assert max_items > 1
+        self._max_items = max_items
+        self._data = IndexedMultiMap()         # recent accesses.
+
+    def add(self, key): # O(log n)
+        i = self._data.find_key_by_value(key)
+        t = time()
+        if i.at_end():
+            self._data[t] = key
+        else:
+            self._data.update_key(i,t)
+            
+        while len(self._data) > self._max_items:
+            j = self._data.begin()
+            assert not j.at_end()
+            self._data.erase(j)
+
+    def __contains__(self, key):
+        i = self._data.find_key_by_value(key)
+        return not i.at_end()
+        
+    def remove(self, key):
+        i = self._data.find_key_by_value(key)
+        if i.at_end():
+            raise KeyError()
+        self._data.erase(i)
+
+    def __str__(self):
+        return str(self._data)
+                         
+
+if __name__ == "__main__":
+    from defer import Deferred
+    import random
+    from yielddefer import launch_coroutine, wrap_task
+    def coro(f, *args, **kwargs):
+        return launch_coroutine(wrap_task(reactor.callLater), f, *args, **kwargs)
+    
+    def run():
+        coro(_run)
+        
+    def _run():
+        TTL = 1
+        SET_TTL = 2      # TTL used when explicitly setting TTL using "def set."
+        EXPIRE_INTERVAL = .3
+        EPSILON = .5
+
+        ###
+        # BoundedCacheSet correctness tests.
+        c = _BoundedCacheSet(2)
+        c.add(10)
+        assert 10 in c
+        c.add(15)
+        assert 15 in c
+        c.add(16)
+        assert 16 in c
+        assert 10 not in c
+        assert 15 in c
+        c.remove(15)
+        assert 15 not in c
+        try:
+            c.remove(23)
+            assert False
+        except KeyError:
+            pass
+
+        ###
+        # basic CacheMap correctness tests.
+        c = CacheMap(default_ttl=TTL,expire_interval=EPSILON)
+        class K(object):
+            def __init__(self):
+                self.x = range(10000)
+        class V(object):
+            def __init__(self):
+                self.x = range(10000)
+        
+        k = K()
+        v = V()
+        t = time()
+        c.set(k, v, SET_TTL)
+        assert len(c) == 1
+        assert c.num_unexpired() == 1
+        assert c._exp.begin().key() < t + SET_TTL + EPSILON and \
+               c._exp.begin().key() > t + SET_TTL - EPSILON, \
+               "First item in c._exp should have expiration time that is close to the " \
+               "current time + SET_TTL which is %s, but the expiration time is %s." \
+               % (t+SET_TTL, c._exp.begin().key())               
+        assert c.has_key(k)
+        assert not c.has_key( "blah" )
+        assert c[k] == v
+        c._expire2()  # should not expire anything because little time has passed.
+        assert len(c) == 1
+        assert c.num_unexpired() == 1
+        try:
+            y = c[10]
+            assert False, "should've raised KeyError."
+        except KeyError:
+            pass
+        v2 = V()
+        c[k] = v2
+        assert c._exp.begin().key() < t + SET_TTL + EPSILON and \
+               c._exp.begin().key() > t + SET_TTL - EPSILON, \
+               "First item in c._exp should have expiration time that is close to the " \
+               "current time + SET_TTL, but the expiration time is %s." % c._exp.begin().key()
+        assert not c[k] == v
+        assert c[k] == v2
+        assert len(c) == 1
+        assert c.num_unexpired() == 1
+        k2 = K()
+        t = time()
+        c[k2] = v2
+        assert c._exp.begin().key() < t + TTL + EPSILON and \
+               c._exp.begin().key() > t + TTL - EPSILON, \
+               "First item in c._exp should have expiration time that is close to the " \
+               "current time + TTL, but the expiration time is %s." % c._exp.begin().key()
+        assert c[k2] == v2
+        assert not c[k] == v  # shouldn't be a problem with two items having the same value.
+        assert len(c) == 2
+        assert c.num_unexpired() == 2
+
+        # wait long enough for the cache entries to expire.
+        df = Deferred()
+        reactor.callLater(SET_TTL+EPSILON, df.callback, None)
+        yield df
+        df.getResult()
+
+        assert c.num_unexpired() == 0, "Should have expired all entries, but there are %d " \
+               "unexpired items and %d items in c._data. " % (c.num_unexpired(), len(c._data))
+        assert len(c) == 0
+        assert len(c._exp) == 0
+        assert len(c._data) == 0
+        assert k not in c
+        assert k2 not in c
+
+        # basic correctness of bounded-size cache map.
+        c = CacheMap(default_ttl=TTL,expire_interval=1000,max_items = 2)
+        c[k] = v
+        assert len(c) == 1
+        assert c[k] == v
+        c[k2] = v2
+        assert len(c) == 2
+        assert c[k2] == v2
+        c[10] = 15
+        assert len(c) == 2
+        assert c[10] == 15
+        assert c[k2] == v2   # order from most recent access is now [(k2,v2), (10,15), (k,v)].
+        try:
+            a = c[k]
+            assert False, "when cache with size bound of 2 exceeded 2 elements, " \
+                   "the oldest should've been removed."
+        except KeyError:
+            pass
+        c[56] = 1          # order from most recent access ...
+        assert len(c) == 2
+        assert 56 in c
+        assert 10 not in c
+            
+        
+        ###
+        # test expirations and for memory leaks.
+        # Watch memory consumption (e.g., using top) and see if it grows.
+        if LEAK_TEST:
+            c = CacheMap(default_ttl=TTL,expire_interval=EPSILON)
+            i = 0
+            while True:
+                for x in xrange(100):
+                    i += 1
+                    if i % 20 == 0:
+                        print len(c)
+                    c[i] = K()
+                    if i % 5 == 0:
+                        try:
+                            l = len(c)
+                            del c[i]
+                            assert len(c) == l-1
+                        except KeyError:
+                            pass
+    
+                # allow time for expirations.
+                df = Deferred()
+                reactor.callLater(TTL+EPSILON,df.callback,None)
+                yield df
+                df.getResult()
+
+
+    reactor.callLater(0,run)
+    reactor.run()

+ 196 - 0
html/bin/clients/mainline/BTL/circular_list.py

@@ -0,0 +1,196 @@
+# circular doubly linked list
+#
+# by Greg Hazel
+
+import random
+
+
+class Link(object):
+
+    __slots__ = ['prev', 'data', 'next']    
+
+    def __init__(self, data):
+        self.prev = self
+        self.data = data
+        self.next = self
+
+    def __str__(self):
+        p = id(self.prev)
+        n = id(self.next)
+        return 'link:(%s, (%s, %s), %s)' % (p, id(self), self.data, n)
+
+
+class CircularList(object):
+
+    def __init__(self):
+        self.iter = None
+        self.link_refs = {} # data: link
+
+    def prepend(self, data):
+        link = Link(data)
+        assert data not in self.link_refs
+        self.link_refs[data] = link
+        if not self.iter:
+            self.iter = link
+        else:
+            self._insert_before(self.iter, link)
+     
+    def append(self, data):
+        link = Link(data)
+        assert data not in self.link_refs
+        self.link_refs[data] = link
+        if not self.iter:
+            self.iter = link
+        else:
+            self._insert_after(self.iter, link)
+
+    def remove(self, data):
+        link = self.link_refs.pop(data)
+        if len(self.link_refs) == 0:
+            self.iter = None
+            return
+        prev = link.prev
+        next = link.next
+        assert next is not None and prev is not None
+        prev.next = next
+        next.prev = prev
+        if link == self.iter:
+            self.iter = next
+
+    ## stuff I consider to be link-related
+    ########
+    def _double_link(self, link1, link2):
+        # was a single item loop, move to a double
+        assert link1.prev == link1 and link1.next == link1
+        link1.prev = link2
+        link1.next = link2
+        link2.next = link1
+        link2.prev = link1        
+
+    def _insert_after(self, link1, link2):
+        assert link1 != link2
+        if link1.next == link1:
+            self._double_link(link1, link2)
+        else:
+            link2.next = link1.next
+            link2.prev = link1
+            link1.next.prev = link2
+            link1.next = link2
+
+    def _insert_before(self, link1, link2):
+        assert link1 != link2
+        if link1.prev == link1:
+            self._double_link(link1, link2)
+        else:
+            link2.prev = link1.prev
+            link2.next = link1
+            link1.prev.next = link2
+            link1.prev = link2
+    ########
+
+    def iterator(self):
+        for i in iter(self):
+            yield i
+
+    def __iter__(self):
+        if not self.iter:
+            return
+        while True:
+            yield self.iter.data
+            # someone could remove an item during iteration
+            if not self.iter:
+                return
+            self.iter = self.iter.next
+
+    def __len__(self):
+        return len(self.link_refs)
+
+    def __str__(self):
+        n = len(self.link_refs)
+        a = []
+        # don't interrupt iteration for a print
+        first = self.iter
+        next = first
+        while next:
+            a.append(str(next))
+            next = next.next
+            if next.data == first.data:
+                break
+        items = '\n'.join(a)
+        return "iter: %s \n[\n%s\n]" % (self.iter, items)
+    
+
+if __name__ == '__main__':
+    import time
+
+    length = 80000
+    class ltype(list):
+        def prepend(self, i):
+            self.insert(0, i)
+    from BTL.Lists import QList
+    class qtype(QList):
+        def prepend(self, i):
+            self.append(i)
+        def iterator(self):
+            if len(self) == 0:
+                return
+            while True:
+                yield self[0]
+                if len(self) == 0:
+                    return
+                self.append(self.popleft())
+
+    #CircularList = ltype
+    #CircularList = qtype
+    print CircularList
+
+    s = time.clock()    
+    l = CircularList()
+    for i in xrange(length):
+        l.append(i)
+    #print l
+    print 'append ', time.clock() - s
+
+    s = time.clock()    
+    l = CircularList()
+    for i in xrange(length):
+        l.prepend(i)
+    #print l
+    print 'prepend', time.clock() - s
+
+    s = time.clock()    
+    l = CircularList()
+    for i in xrange(length):
+        if i % 2 == 0:
+            l.prepend(i)
+        else:
+            l.append(i)
+    #print l
+    print 'sort   ', time.clock() - s
+
+    #fair = {}
+    s = time.clock()    
+    l = CircularList()
+    it = l.iterator()
+    for i in xrange(length):
+        l.prepend(i)
+        #fair[i] = 0
+        x = it.next()
+        #print x, i
+        #fair[x] += 1
+        #assert x == i, '%s %s' % (x, i)
+    #print l
+    print 'iter   ', time.clock() - s
+    #for k in fair:
+    #    print k, fair[k]
+
+    l = CircularList()
+    print l
+    l.prepend(0)
+    print l
+    l.prepend(1)
+    print l
+    l.remove(1)
+    print l
+    l.remove(0)
+    print l

+ 80 - 0
html/bin/clients/mainline/BTL/connection_cache.py

@@ -0,0 +1,80 @@
+import sys
+import time
+import random
+import Queue
+import traceback
+from LIFOQueue import LIFOQueue
+import pycurllib
+
+max_wait = 5
+max_connections = 1
+inf_wait_max_connections = 1000
+
+class ConnectionCache(object):
+    def __init__(self, max=15):
+        self.size = 0
+        self.max = max
+        self.cache = LIFOQueue(maxsize = self.max)
+
+    def get_connection(self):
+
+        if self.size > max_connections:
+            # ERROR: Should log this!
+            #sys.stderr.write("ConnectionCache queue exceeds %d (%d)\n" %
+            #                 (max_connections, self.cache.qsize()))
+            pass
+
+        try:
+            return self.cache.get_nowait()
+        except Queue.Empty:
+            pass
+
+        # I chose not to lock here. Max is advisory, if two threads
+        # eagerly await a connection near max, I say allow them both
+        # to make one
+        if self.size < self.max:
+            self.size += 1
+            return self._make_connection()
+
+        try:
+            return self.cache.get(True, max_wait)
+        except Queue.Empty:
+            # ERROR: Should log this!
+            #sys.stderr.write("ConnectionCache waited more than "
+            #                 "%d seconds for one of %d connections!\n" %
+            #                 (max_wait, self.size))
+            pass
+
+        if self.size > inf_wait_max_connections:
+            return self.cache.get()
+
+        self.size += 1
+        return self._make_connection()
+
+    def put_connection(self, c):
+        self.cache.put(c)
+
+
+class PyCURL_Cache(ConnectionCache):
+
+    def __init__(self, uri, max):
+        self.uri = uri
+        ConnectionCache.__init__(self, max)
+
+    def _make_connection(self):
+        r = pycurllib.Request(self.uri)
+        #r.set_timeout(20)
+        return r
+
+class CacheSet(object):
+    def __init__(self, max_per_cache = max_connections):
+        self.cache = {}
+        self.max_per_cache = max_per_cache
+
+    def get_cache(self, cachetype, url):
+        if url not in self.cache:
+            self.cache[url] = cachetype(url, max=self.max_per_cache)
+        return self.cache[url]
+
+cache_set = CacheSet()        
+

+ 79 - 0
html/bin/clients/mainline/BTL/coro.py

@@ -0,0 +1,79 @@
+# coroutine convenience functions
+
+from BTL.reactor_magic import reactor
+from BTL.yielddefer import launch_coroutine
+from BTL.defer import wrap_task, Deferred
+
+def coro(f, *args, **kwargs):
+    return launch_coroutine(wrap_task(reactor.callLater), f, *args, **kwargs)
+
+def coroutine(_f):
+    """Use the following as a decorator. Ex:
+
+          @coroutine
+          def mycoro():
+             ...
+             yield df
+             df.getResult()
+
+          ...
+          df = mycoro()
+          ...
+
+       Unlike the coroutine decorator in greenlet_yielddefer, this works without
+       greenlets.  This is also typically cleaner than using coro().
+       """
+    def replacement(*a, **kw):
+        return launch_coroutine(wrap_task(reactor.callLater), _f, *a, **kw)
+    return replacement
+
+def wait(n):
+    df = Deferred()
+    reactor.callLater(n, df.callback, 0)
+    return df
+
+
+@coroutine
+def init_yield(clss, *args, **kwargs):
+    """Instantiate an object of type clss and then call its asynchronous initializer
+       (__dfinit__). The __dfinit__ returns a Deferred.  When the deferred's callback is called
+       execution resumes at init_yield and the fully initialized object is returned."""
+    kwargs['__magic_init_yield'] = True
+    obj = clss(*args, **kwargs)       # synchronous initialization.
+    df = obj.__dfinit__()                # asynchronous (deferred) initialization.
+    yield df
+    df.getResult()
+    yield obj
+
+def use_init_yield( init ):
+    """Use this as a decorator to any class's __init__ to require that
+       class be initialized using init_yield.  This guarantees that the
+       asynchronous initializer __dfinit__ gets called.  Ex:
+
+           class Foo(object):
+               @use_init_yield
+               def __init__( self, a, b, c):
+                   ...
+                   some_synchronous_initialization()
+                   ...
+               def __dfinit__( self ):
+                   ...
+                   df = some_initialization()
+                   return df
+                   ...
+
+       Now to instantiate an object of type Foo, we use init_yield:
+
+           df = init_yield(Foo,a,b,c)
+           yield df
+           foo = df.getResult()
+
+       If we try to instantiate Foo directly, we get an exception:
+
+           foo = Foo(a,b,c)  # causes an AssertionException.
+       """
+    def look_for_magic( *a, **kw ):
+        assert '__magic_init_yield' in kw, "Instantiate using init_yield"
+        del kw['__magic_init_yield']
+        init(*a, **kw)   # __init__ returns nothing.
+    return look_for_magic

+ 157 - 0
html/bin/clients/mainline/BTL/cpu_meter.py

@@ -0,0 +1,157 @@
+# Author: David Harrison
+# Multi-cpu and Windows version: Greg Hazel
+
+import os
+if os.name == "nt":
+    import win32pdh
+    import win32api
+
+class CPUMeterBase(object):
+
+    def __init__(self, update_interval = 2):
+        from twisted.internet import reactor
+        self.reactor = reactor
+        self._util = 0.0
+        self._util_each = []
+        self._interval = update_interval
+        self.reactor.callLater(self._interval, self._update)
+
+    def _update(self):
+        self.update()
+        self.reactor.callLater(self._interval, self._update)
+        
+    def update(self):
+        raise NotImplementedError
+
+    def get_utilization(self):
+        return self._util
+
+    def get_utilization_each(self):
+        return self._util_each
+
+    def get_interval(self):
+        return self._interval
+
+
+class CPUMeterUnix(CPUMeterBase):
+    """Averages CPU utilization over an update_interval."""
+    
+    def __init__(self, update_interval = 2):
+        self._old_stats = self._get_stats()
+        CPUMeterBase.__init__(self, update_interval)
+
+    def _get_stats(self):
+        fp = open("/proc/stat")
+        ln = fp.readline()
+        stats = ln[4:].strip().split()[:4]
+        total = [long(x) for x in stats]
+        cpus = []
+        ln = fp.readline()
+        while ln.startswith("cpu"):
+            stats = ln[4:].strip().split()[:4]
+            cpu = [long(x) for x in stats]
+            cpus.append(cpu)
+            ln = fp.readline()
+        return total, cpus
+
+    def _get_util(self, oldl, newl):
+        old_user, old_nice, old_sys, old_idle = oldl
+        user, nice, sys, idle = newl
+        user -= old_user
+        nice -= old_nice
+        sys -= old_sys
+        idle -= old_idle
+        total = user + nice + sys + idle
+        return float((user + nice + sys)) / total        
+        
+    def update(self):
+        old_total, old_cpus = self._old_stats
+        total, cpus = self._old_stats = self._get_stats()
+        self._util = self._get_util(old_total, total)
+        self._util_each = []
+        for old_cpu, cpu in zip(old_cpus, cpus):
+            self._util_each.append(self._get_util(old_cpu, cpu))
+
+
+class CPUMeterWin32(CPUMeterBase):
+    """Averages CPU utilization over an update_interval."""
+    
+    def __init__(self, update_interval = 2):
+        self.format = win32pdh.PDH_FMT_DOUBLE
+        self.hcs = []
+        self.hqs = []
+        self._setup_query("_Total")
+        num_cpus = win32api.GetSystemInfo()[4]
+        for x in xrange(num_cpus):
+            self._setup_query(x)
+        CPUMeterBase.__init__(self, update_interval)
+
+    def __del__(self):
+        self.close()
+
+    def _setup_query(self, which):
+        inum = -1
+        instance = None
+        machine = None
+        object = "Processor(%s)" % which
+        counter = "% Processor Time"
+        path = win32pdh.MakeCounterPath( (machine, object, instance,
+                                          None, inum, counter) )
+        hq = win32pdh.OpenQuery()
+        self.hqs.append(hq)
+        try:
+            hc = win32pdh.AddCounter(hq, path)
+            self.hcs.append(hc)
+        except:
+            self.close()
+            raise
+
+    def close(self):
+        for hc in self.hcs:
+            if not hc:
+                continue
+            try:
+                win32pdh.RemoveCounter(hc)
+            except:
+                pass
+        self.hcs = []
+        for hq in self.hqs:
+            if not hq:
+                continue
+            try:
+                win32pdh.CloseQuery(hq)
+            except:
+                pass
+        self.hqs = []
+
+    def _get_util(self, i):
+        win32pdh.CollectQueryData(self.hqs[i])
+        type, val = win32pdh.GetFormattedCounterValue(self.hcs[i], self.format)
+        val = val / 100.0
+        return val
+
+    def update(self):
+        self._util = self._get_util(0)
+        self._util_each = []
+        for i in xrange(1, len(self.hcs)):
+            self._util_each.append(self._get_util(i))
+
+
+if os.name == "nt":
+    CPUMeter = CPUMeterWin32
+else:
+    CPUMeter = CPUMeterUnix
+        
+if __name__ == "__main__":
+
+    from twisted.internet import reactor
+    cpu = CPUMeter(1)
+
+    def print_util():
+        print cpu.get_utilization()
+        print cpu.get_utilization_each()
+        reactor.callLater(1, print_util)
+    
+    reactor.callLater(1, print_util)
+    reactor.run()
+

+ 157 - 0
html/bin/clients/mainline/BTL/crypto_message.py

@@ -0,0 +1,157 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# by Benjamin C. Wiley Sittler
+
+import Crypto.Cipher.AES as _AES
+import sha as _sha
+import os as _os
+import hmac as _hmac
+import string as _string
+
+_urlbase64 = _string.maketrans('+/-_', '-_+/')
+
+def pad(data, length):
+    '''
+    PKCS #7-style padding with the given block length
+    '''
+    assert length < 256
+    assert length > 0
+    padlen = length - len(data) % length
+    assert padlen <= length
+    assert padlen > 0
+    return data + padlen * chr(padlen)
+
+def unpad(data, length):
+    '''
+    PKCS #7-style unpadding with the given block length
+    '''
+    assert length < 256
+    assert length > 0
+    padlen = ord(data[-1])
+    assert padlen <= length
+    assert padlen > 0
+    assert data[-padlen:] == padlen * chr(padlen)
+    return data[:-padlen]
+
+def ascii(data):
+    '''
+    Encode data as URL-safe variant of Base64.
+    '''
+    return data.encode('base64').translate(_urlbase64, '\r\n')
+
+def unascii(data):
+    '''
+    Decode data from URL-safe variant of Base64.
+    '''
+    decoded = data.translate(_urlbase64).decode('base64')
+    assert ascii(decoded) == data
+    return decoded
+
+def encode(data, secret, salt = None):
+    '''
+    Encode and return the data as a random-IV-prefixed AES-encrypted
+    HMAC-SHA1-authenticated padded message corresponding to the given
+    data string and secret, which should be at least 36 randomly
+    chosen bytes agreed upon by the encoding and decoding parties.
+    '''
+    assert len(secret) >= 36
+    if salt is None:
+        salt = _os.urandom(16)
+    aes = _AES.new(secret[:16], _AES.MODE_CBC, salt)
+    padded_data = pad(20 * '\0' + data, 16)[20:]
+    mac = _hmac.HMAC(key = secret[16:], msg = padded_data, digestmod = _sha).digest()
+    encrypted = aes.encrypt(mac + padded_data)
+    return salt + encrypted
+
+def decode(data, secret):
+    '''
+    Decode and return the data from random-IV-prefixed AES-encrypted
+    HMAC-SHA1-authenticated padded message corresponding to the given
+    data string and secret, which should be at least 36 randomly
+    chosen bytes agreed upon by the encoding and decoding parties.
+    '''
+    assert len(secret) >= 36
+    salt = data[:16]
+    encrypted = data[16:]
+    aes = _AES.new(secret[:16], _AES.MODE_CBC, salt)
+    decrypted = aes.decrypt(encrypted)
+    mac = decrypted[:20]
+    padded_data = decrypted[20:]
+    mac2 = _hmac.HMAC(key = secret[16:], msg = padded_data, digestmod = _sha).digest()
+    assert mac == mac2
+    return unpad(20 * '\0' + padded_data, 16)[20:]
+
+def test():
+    '''
+    Trivial smoke test to make sure this module works.
+    '''
+    secret = unascii('D_4j_P5Fh-UWUuH2U3IYw2erxRab5QX0zOR7eYlucT0GfuuwxgoGcfKI_rnyStbllZTPBbCESbKv0kMsUB9tOnLvAU2k7bCcMy7ylUqFwgc=')
+    secret2 = unascii('e3YUIIA3APP66cMJrKNRAHVm0nd7BRAxZqyiYadTML78v2yS')
+    salt = unascii('yRja3Cj5qc2xhYoSJtCBSw==')
+    for data, message, message2 in (
+        ('Hello, world!',
+         'yRja3Cj5qc2xhYoSJtCBSxqHihP8mZ8TNuiLv_i41uaHM8jUu4N2cpU_XmlH0raoq-6FLOHE3ScV9aPnQ9Ulsg==',
+         'yRja3Cj5qc2xhYoSJtCBS8MPPvak9ZDXydyMlACoQ7WSlM7X4PunKhJa775itirxJPD1eFgSnWHjAjmZn_8bvg==',
+         
+         ),
+        ('',
+         'yRja3Cj5qc2xhYoSJtCBS6vWZ3nvvsp3gM2-G-co6fVCvkLv6pRrfLQg2vm1yNzr',
+         'yRja3Cj5qc2xhYoSJtCBSy9XX0E8Re0XumS1wMMEJFwSkTIQBGqbWGH4_GPMwdrR',
+         ),
+        ('\0',
+         'yRja3Cj5qc2xhYoSJtCBSyEz2FFkaC3bRhMV03csag5MMIrVaWeWK2J1IXIaK_UQ',
+         'yRja3Cj5qc2xhYoSJtCBS-05SxrZqgT9XhcEWp0eTLCrdQpnzBGKLL8qvIsc6nx6',
+         ),
+        ('Hi there!',
+         'yRja3Cj5qc2xhYoSJtCBS8oy34UlBkk3v__LUHTa557U04HT_-M80DunhcKbFh-q',
+         'yRja3Cj5qc2xhYoSJtCBS-6M4ylGA0jmaPjWRiEoBy3j1R1o17_KbsAH_0CiZRhx',
+         ),
+        ):
+        assert unascii(ascii(data)) == data
+        assert ascii(unascii(message)) == message
+        assert len(pad(data, 16)) % 16 == 0
+        assert unpad(pad(data, 16), 16) == data
+        assert message == ascii(encode(data, secret, salt))
+        assert decode(unascii(message), secret) == data
+        assert decode(encode(data, secret), secret) == data
+        assert message2 == ascii(encode(data, secret2, salt))
+        assert decode(unascii(message2), secret2) == data
+        assert decode(encode(data, secret2), secret2) == data
+
+test()
+
+def main(sys):
+    progname = sys.argv[0]
+    secret = _os.urandom(36)
+    salt = None
+    if len(sys.argv) < 2:
+        sys.stderr.write('%s: secret is %s\n' % (progname, ascii(secret)))
+        sys.stderr.flush()
+    elif len(sys.argv) < 3:
+        progname, secret = sys.argv
+        secret = unascii(secret)
+    else:
+        progname, secret, salt = sys.argv
+        secret = unascii(secret)
+        salt = unascii(salt)
+    while True:
+        line = sys.stdin.readline()
+        if not line:
+            break
+        try:
+            sys.stdout.write('%s' % decode(unascii(line.rstrip('\r\n')), secret))
+        except:
+            sys.stdout.write('%s\n' % ascii(encode(line, secret, salt)))
+        sys.stdout.flush()
+
+if __name__ == '__main__':
+    import sys
+    main(sys)

+ 222 - 0
html/bin/clients/mainline/BTL/daemon.py

@@ -0,0 +1,222 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+# by David Harrison
+
+import errno
+import os
+import sys
+import errno
+import pwd
+import grp
+import stat
+import logging
+from twisted.python.util import switchUID
+#from twisted.scripts import twistd
+#try:
+#  from twisted.scripts.twistd import checkPID
+#except:
+#  from twisted.scripts._twistd_unix import checkPID
+
+from BTL.log import injectLogger, ERROR, INFO, DEBUG
+from BTL.platform import app_name
+log = logging.getLogger("daemon")
+
+noisy = False
+def getuid_from_username(username):
+    return pwd.getpwnam(username)[2]
+
+def getgid_from_username(username):
+    return pwd.getpwnam(username)[3]
+
+def getgid_from_groupname(groupname):
+    return grp.getgrnam(groupname)[2]
+
+def daemon(
+    username = None, groupname = None,
+    use_syslog = None, log_file = None, logfile = None, # HACK for backwards compat.
+    verbose = False,
+    capture_output = True,
+    twisted_error_log_level = ERROR,
+    twisted_info_log_level = INFO,
+    capture_stderr_log_level = ERROR,
+    capture_stdout_log_level = INFO,
+    capture_stderr_name = 'stderr',
+    capture_stdout_name = 'stdout',
+    log_level = DEBUG,
+    log_twisted = True,
+    pidfile = None,
+    use_localtime=False):
+    """When this function returns, you are a daemon.
+
+       If use_syslog or a log_file is specified then this installs a logger.
+
+       Iff capture_output is specified then stdout and stderr
+       are also directed to syslog.
+
+       If use_syslog is None then it defaults to True if no log_file
+       is provided and the platform is not Darwin.
+
+       The following arguments are passed through to BTL.log.injectLogger.
+
+        use_syslog, log_file, verbose,
+        capture_output, twisted_error_log_level,
+        twisted_info_log_level, capture_stderr_log_level,
+        capture_stdout_log_level, capture_stderr_name,
+        capture_stdout_name, log_level, log_twisted
+
+       daemon no longer removes pid file.  Ex: If
+       a monitor sees that a pidfile exists and the process is not
+       running then the monitor restarts the process.
+       If you want the process to REALLY die then the
+       pid file should be removed external to the program,
+       e.g., by an init.d script that is passed "stop".
+    """
+    assert log_file is None or logfile is None, "logfile was provided for backwards " \
+           "compatibility.  You cannot specify both log_file and logfile."
+    if log_file is None:
+        log_file = logfile
+
+    try:
+        if os.name == 'mac':
+            raise NotImplementedError( "Daemonization doesn't work on macs." )
+
+        if noisy:
+            print "in daemon"
+
+        uid = os.getuid()
+        gid = os.getgid()
+        if uid == 0 and username is None:
+            raise Exception( "If you start with root privileges you need to "
+                "provide a username argument so that daemon() can shed those "
+                "privileges before returning." )
+        if username:
+            uid = getuid_from_username(username)
+            if noisy:
+                print "setting username to uid of '%s', which is %d." % ( username, uid )
+            if uid != os.getuid() and os.getuid() != 0:
+                raise Exception( "When specifying a uid other than your own "
+                   "you must be running as root for setuid to work. "
+                   "Your uid is %d, while the specified user '%s' has uid %d."
+                   % ( os.getuid(), username, uid ) )
+            gid = getgid_from_username(username) # uses this user's group
+        if groupname:
+            if noisy:
+                print "setting groupname to gid of '%s', which is %d." % (groupname,gid)
+            gid = getgid_from_groupname(groupname)
+
+        pid_dir = os.path.split(pidfile)[0]
+        if pid_dir and not os.path.exists(pid_dir):
+            os.mkdir(pid_dir)
+            os.chown(pid_dir,uid,gid)
+        checkPID(pidfile)
+        if use_syslog is None:
+            use_syslog = sys.platform != 'darwin' and not log_file
+        if log_file:
+            if use_syslog:
+                raise Exception( "You have specified both a log_file and "
+                    "that the daemon should use_syslog.  Specify one or "
+                    "the other." )
+            print "Calling injectLogger"
+            injectLogger(use_syslog=False, log_file = log_file, log_level = log_level,
+                         capture_output = capture_output, verbose = verbose,
+                         capture_stdout_name = capture_stdout_name,
+                         capture_stderr_name = capture_stderr_name,
+                         twisted_info_log_level = twisted_info_log_level,
+                         twisted_error_log_level = twisted_error_log_level,
+                         capture_stdout_log_level = capture_stdout_log_level,
+                         capture_stderr_log_level = capture_stderr_log_level,
+                         use_localtime = use_localtime )
+        elif use_syslog:
+            injectLogger(use_syslog=True, log_level = log_level, verbose = verbose,
+                         capture_output = capture_output,
+                         capture_stdout_name = capture_stdout_name,
+                         capture_stderr_name = capture_stderr_name,
+                         twisted_info_log_level = twisted_info_log_level,
+                         twisted_error_log_level = twisted_error_log_level,
+                         capture_stdout_log_level = capture_stdout_log_level,
+                         capture_stderr_log_level = capture_stderr_log_level )
+        else:
+            raise Exception( "You are attempting to daemonize without a log file,"
+                             "and with use_syslog set to false.  A daemon must "
+                             "output to syslog, a logfile, or both." )
+        if pidfile is None:
+            pid_dir = os.path.join("/var/run/", app_name )
+            pidfile = os.path.join( pid_dir, app_name + ".pid")
+        daemonize()  # forks, moves into its own process group, forks again,
+                     # middle process exits with status 0.  Redirects stdout,
+                     # stderr to /dev/null.
+
+        # I should now be a daemon.
+
+        open(pidfile,'wb').write(str(os.getpid()))
+        if not os.path.exists(pidfile):
+            raise Exception( "pidfile %s does not exist" % pidfile )
+        os.chmod(pidfile, stat.S_IRUSR|stat.S_IWUSR|stat.S_IROTH|stat.S_IRGRP)
+
+        if os.getuid() == 0:
+            if uid is not None or gid is not None:
+                switchUID(uid, gid)
+        if os.getuid() != uid:
+            raise Exception( "failed to setuid to uid %d" % uid )
+        if os.getgid() != gid:
+            raise Exception( "failed to setgid to gid %d" % gid )
+    except:
+        log.exception("daemonizing may have failed")
+        import traceback
+        traceback.print_exc()
+        raise
+
+# Copied from twistd.... see daemonize for reason.
+def checkPID(pidfile):
+    if not pidfile:
+        return
+    if os.path.exists(pidfile):
+        try:
+            pid = int(open(pidfile).read())
+        except ValueError:
+            sys.exit('Pidfile %s contains non-numeric value' % pidfile)
+        try:
+            os.kill(pid, 0)
+        except OSError, why:
+            if why[0] == errno.ESRCH:
+                # The pid doesnt exists.
+                log.warning('Removing stale pidfile %s' % pidfile)
+                os.remove(pidfile)
+            else:
+                sys.exit("Can't check status of PID %s from pidfile %s: %s" %
+                         (pid, pidfile, why[1]))
+        else:
+            sys.exit("""\
+Another twistd server is running, PID %s\n
+This could either be a previously started instance of your application or a
+different application entirely. To start a new one, either run it in some other
+directory, or use the --pidfile and --logfile parameters to avoid clashes.
+""" %  pid)
+
+# Copied from twistd.  twistd considers this an internal function
+# and across versions it got moved.  To prevent future breakage,
+# I just assume incorporate daemonize directly.
+def daemonize():
+    # See http://www.erlenstar.demon.co.uk/unix/faq_toc.html#TOC16
+    if os.fork():   # launch child and...
+        os._exit(0) # kill off parent
+    os.setsid()
+    if os.fork():   # launch child and...
+        os._exit(0) # kill off parent again.
+    os.umask(077)
+    null=os.open('/dev/null', os.O_RDWR)
+    for i in range(3):
+        try:
+            os.dup2(null, i)
+        except OSError, e:
+            if e.errno != errno.EBADF:
+                raise
+    os.close(null)

+ 19 - 0
html/bin/clients/mainline/BTL/decorate.py

@@ -0,0 +1,19 @@
+# usage:
+#
+# o.method = decorate_func(somefunc, o.method)
+#
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+def decorate_func(new, old):
+    def runner(*a, **kw):
+        new(*a, **kw)
+        return old(*a, **kw)
+    return runner

+ 177 - 0
html/bin/clients/mainline/BTL/defer.py

@@ -0,0 +1,177 @@
+# The contents of this file are subject to the Python Software Foundation
+# License Version 2.3 (the License).  You may not copy or use this file, in
+# either source code or executable form, except in compliance with the License.
+# You may obtain a copy of the License at http://www.python.org/license.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+import sys
+import weakref
+import traceback
+import BTL.stackthreading as threading
+from twisted.internet import defer
+from twisted.python import failure
+debug = False
+
+tp_Failure = failure.Failure
+
+# used to emulate sys.exc_info()
+def exc_info(self):
+    return self.type, self.value, self.tb
+tp_Failure.exc_info = exc_info
+
+# Maybe Dangerous. If you're having memory leaks, look here.
+# used to prevent traceback stripping for standard re-raises
+#old_cleanFailure = tp_Failure.cleanFailure
+#def cleanFailure(self):
+#    self.tb2 = self.tb
+#    old_cleanFailure(self)
+#    self.tb = self.tb2
+#tp_Failure.cleanFailure = cleanFailure
+
+class Failure(tp_Failure):
+    def __init__(self, *a, **kw):
+        tp_Failure.__init__(self, *a, **kw)
+        # magic to allow re-raise of failures to do proper stack appending
+        if hasattr(self.value, 'failure'):
+            self.stack = self.stack[:-2] + self.value.failure.stack
+            self.frames = self.frames[:-2] + self.value.failure.frames
+        self.value.failure = self
+failure.Failure = Failure
+
+fail = defer.fail
+succeed = defer.succeed
+execute = defer.execute
+maybeDeferred = defer.maybeDeferred
+timeout = defer.timeout
+
+DeferredQueue = defer.DeferredQueue
+Deferred = defer.Deferred
+
+def getResult(self):
+    if isinstance(self.result, tp_Failure):
+        r = self.result
+        self.addErrback(lambda fuckoff: None)
+        r.raiseException()
+    return self.result
+Deferred.getResult = getResult
+
+Deferred_errback = Deferred.errback
+def errback(self, fail):
+    assert isinstance(fail, (tp_Failure, Exception)), repr(fail)
+    # this can check the wrong failure type if the imports occur in the
+    # wrong order.
+    #Deferred_errback(self, fail)
+    if not isinstance(fail, tp_Failure):
+        fail = Failure(fail)
+    self._startRunCallbacks(fail)
+errback.__doc__ = Deferred_errback.__doc__
+Deferred.errback = errback
+
+def addLogback(self, logger, logmsg):
+    if not callable(logger):
+        logger = logger.error
+    def logback(failure):
+        logger(logmsg, exc_info=failure.exc_info())
+    return self.addErrback(logback)
+Deferred.addLogback = addLogback
+
+# not totally safe, but a start.
+# This lets you call callback/errback from any thread.
+# The next step would be for addCallbak and addErrback to be safe.
+class ThreadableDeferred(Deferred):
+    def __init__(self, queue_func):
+        assert callable(queue_func)
+        self.queue_func = queue_func
+        Deferred.__init__(self)
+
+    def callback(self, result):
+        self.queue_func(Deferred.callback, self, result)
+
+    def errback(self, result):
+        self.queue_func(Deferred.errback, self, result)
+
+
+# go ahead and forget to call start()!
+class ThreadedDeferred(Deferred):
+
+    def __init__(self, queue_func, f, *args, **kwargs):
+        Deferred.__init__(self)
+        daemon = False
+        if 'daemon' in kwargs:
+            daemon = kwargs.pop('daemon')
+        self.f = f
+        start = True
+        if queue_func is None:
+            start = False
+            queue_func = lambda f, *a, **kw : f(*a, **kw)
+        self.queue_func = queue_func
+        self.args = args
+        self.kwargs = kwargs
+        self.t = threading.Thread(target=self.run)
+        self.t.setDaemon(daemon)
+        if start:
+            self.start()
+
+    def start(self):
+        self.t.start()
+
+    def run(self):
+        try:
+            r = self.f(*self.args, **self.kwargs)
+            self.queue_func(self.callback, r)
+        except:
+            self.queue_func(self.errback, Failure())
+
+
+class DeferredEvent(Deferred, threading._Event):
+    def __init__(self, *a, **kw):
+        threading._Event.__init__(self)
+        Deferred.__init__(self, *a, **kw)
+
+    def set(self):
+        threading._Event.set(self)
+        self.callback(None) # hmm, None?
+
+
+def run_deferred(df, f, *a, **kw):
+    try:
+        v = f(*a, **kw)
+    except:
+        df.errback(Failure())
+    else:
+        df.callback(v)
+    return df
+
+
+def run_deferred_and_queue(df, queue_task, f, *args, **kwargs):
+    try:
+        v = f(*args, **kwargs)
+    except:
+        queue_task(df.errback, Failure())
+        del df
+    else:
+        if isinstance(v, Deferred):
+            # v is owned by the caller, so add the callback
+            # now, but the task itself should queue.
+            # lamdba over df here would break 'del df' above
+            # so do it with a local function.
+            def make_queueback(func):
+                return lambda r : queue_task(func, r)
+            v.addCallback(make_queueback(df.callback))
+            v.addErrback(make_queueback(df.errback))
+        else:
+            queue_task(df.callback, v)
+
+
+def defer_to_thread(local_queue_task, thread_queue_task, f, *args, **kwargs):
+    df = Deferred()
+    thread_queue_task(run_deferred_and_queue, df, local_queue_task,
+                      f, *args, **kwargs)
+    return df
+
+def wrap_task(add_task):
+    return lambda _f, *args, **kwargs : add_task(0, _f, *args, **kwargs)

+ 219 - 0
html/bin/clients/mainline/BTL/dlock.py

@@ -0,0 +1,219 @@
+#!/usr/bin/python
+#
+# Copyright 2006-2007 BitTorrent, Inc. All Rights Reserved.
+#
+# Written by Ben Teitelbaum
+
+import os
+import sys
+import socket
+
+from time import asctime, gmtime, time, sleep
+
+from twisted.internet import reactor, task
+
+class dlock(object):
+
+    def __init__(self, deadlockfile, update_period=300, myhost=None, debug=None):
+        if myhost == None: myhost = socket.gethostname()
+        self.host = myhost
+        self.pid   = os.getpid()
+        self.deadlockfile  = deadlockfile
+        self.refresher = task.LoopingCall(self.refresh)
+        self.update_period = update_period
+        self.debug = debug
+
+    # Block until lock is acquired, then refresh the lock file every
+    # update_period seconds.
+    #
+    # Nota Bene: while blocked on acquiring the lock, this sleeps the
+    # whole process; once the lock is acquired, an event-driven model
+    # (twisted reactor) is presumed.  The intended use (see test at
+    # bottom) is to block on acquire before running the Twisted
+    # reactor.
+    #
+    def acquire(self):
+        while True:
+            while self.islocked():
+                if self.debug:
+                    lock = self._readlock()
+                    print '%s locked by %s' % (self.deadlockfile, self._lockdict2string(lock))
+                sleep(self.update_period)
+            try:
+                # Use link count hack to work around NFS's broken
+                # file locking.
+                tempfile = '.' + str(self.pid) + self.host + str(time()) + '.tmp'
+                lockfile = self.deadlockfile + '.lock'
+
+                # Create temp lock file
+                fh = open(tempfile, "w")
+                fh.close()
+
+                # Atomicallly create lockfile as a hard link
+                try:
+                    os.link(tempfile, lockfile)
+                except:
+                    if self.debug:
+                        print "tempfile: " + tempfile
+                        print "lockfile: " + lockfile
+                    raise
+
+                # Check the number of links
+                if os.stat(tempfile)[3] == os.stat(lockfile)[3]:
+                    # Hooray, I have the write lock on the deadlock file!
+                    self._timestamp_deadlockfile(time())
+                    if self.debug:
+                        lock = self._readlock()
+                        print '%s acquired by %s' % (self.deadlockfile, self._lockdict2string(lock))
+                        self.refresher.start(self.update_period)
+                    # Release the lock
+                    os.unlink(tempfile)
+                    os.unlink(lockfile)
+
+                    return self
+                else:
+                    # Failed to grab write lock on deadlock file, keep looping
+                    if self.debug:
+                        print '%d failed to grab write lock on deadlock file: %s (will retry)' % (self.pid, self.deadlockfile)
+            except:
+                if self.debug:
+                    print 'File Lock Error: %s@%s could not acquire %s' % (self.pid, self.host, self.deadlockfile)
+                raise
+
+    def refresh(self):
+        assert self.ownlock()
+        # No need to grab a write lock on the deadlock file, since it's not stale
+        self._timestamp_deadlockfile(time())
+
+    def _timestamp_deadlockfile(self, ts):
+        try:
+            fh = open(self.deadlockfile, 'w')
+            fh.write(self._lockstr(ts))
+            fh.close()
+            os.chmod(self.deadlockfile, 0644)
+        except:
+            if self.debug:
+                print 'File Lock Error: %s@%s could not write %s' % (self.pid, self.host, self.deadlockfile)
+            raise
+
+    def release(self):
+        if self.ownlock():
+            try:
+                self.refresher.stop()
+                self._timestamp_deadlockfile(0)
+                if self.debug:
+                    print '%s@%s released lock %s' % (self.pid, self.host, self.deadlockfile)
+            except:
+                if self.debug:
+                    print 'File Lock Error: %s@%s could not release %s' % (self.pid, self.host, self.deadlockfile)
+                raise
+        return self
+
+    def islocked(self):
+        try:
+            if self._isstale():
+                # Lock seems stale, wait for one more update period and check again
+                sleep(self.update_period)
+                return not self._isstale()
+            else:
+                return True
+        except:
+            if self.debug:
+                print "islocked exception"
+            return False
+
+    def _isstale(self):
+        lock = self._readlock()
+        if time() - lock['timestamp'] > self.update_period:
+            return True
+        else:
+            return False
+
+    def _readlock(self):
+        try:
+            lock = {}
+            fh   = open(self.deadlockfile)
+            data = fh.read().split()
+            fh.close()
+            assert len(data) == 3
+            lock['pid'] = int(data[0])
+            lock['host'] = data[1]
+            lock['timestamp'] = float(data[2])
+            return lock
+        except:
+            if self.debug:
+                print 'File Lock Error: %s@%s reading %s' % (self.pid, self.host, self.deadlockfile)
+            raise
+
+    # Public method to read a lockfile.
+    @classmethod
+    def readlock(cls, lockfile):
+        lock = cls(deadlockfile=lockfile, myhost='dummy')
+        return lock._readlock()
+
+    def _lockdict2string(self, lock):
+        return '%s@%s at %s' % (lock['pid'], lock['host'], asctime(gmtime(lock['timestamp'])))
+
+    def _lockstr(self, ts):
+        return '%d %s %f'%(self.pid, self.host, ts)
+
+    def ownlock(self):
+        lock = self._readlock()
+        return (self.host == lock['host'] and
+                self.pid == lock['pid'])
+
+    def __del__(self):
+        self.release()
+
+
+# Tests
+#
+# Run several in parallel on multiple machines, but have at most one
+# whack the deadlock file on initialization.
+#
+def run_tests(argv=None):
+    if argv is None:
+        argv = sys.argv
+
+    deadlockfile = './dlock_test'
+    l = dlock(deadlockfile, 5, debug=True)
+
+    # Stupid argv handling; just grab first arg and run that test
+    if len(argv) > 1:
+        if argv[1] == 'none':
+            print "Removing deadlock file."
+            os.unlink(deadlockfile)
+        elif argv[1] == 'old':
+            print "Creating stale deadlock file owned by no one."
+            fh = open(l.deadlockfile, 'w')
+            fh.write('%d %s %f'%(0, 0, 0))
+            fh.close()
+        elif argv[1] == 'new':
+            print "Creating fresh deadlock file owned by no one."
+            fh = open(l.deadlockfile, 'w')
+            fh.write('%d %s %f'%(0, 0, time()))
+            fh.close()
+        else:
+            print "Un-known arg--starting with old deadlock file."
+    else:
+            print "Starting with old deadlock file."
+
+    # Tease for a while, then release the lock
+    def tease(l, n):
+        if n > 0:
+            assert l.ownlock()
+            print 'I (%d) have the lock--ha, ha ha!'%os.getpid()
+            reactor.callLater(1, tease, l, n - 1)
+        else:
+            l.release()
+
+    # Start teasing once reactor is run
+    reactor.callLater(1, tease, l, 20)
+
+    # But first, grab the lock (this blocks)
+    l.acquire()
+
+    reactor.run()
+
+if __name__ == "__main__":
+    sys.exit(run_tests())

+ 367 - 0
html/bin/clients/mainline/BTL/ebencode.py

@@ -0,0 +1,367 @@
+# The contents of this file are subject to the BitTorrent Open Source License
+# Version 1.1 (the License).  You may not copy or use this file, in either
+# source code or executable form, except in compliance with the License.  You
+# may obtain a copy of the License at http://www.bittorrent.com/license/.
+#
+# Software distributed under the License is distributed on an AS IS basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied.  See the License
+# for the specific language governing rights and limitations under the
+# License.
+
+from binascii import a2b_hex, b2a_hex
+import types
+import decimal
+
+class EBError(ValueError):
+    pass
+
+class EBObject(object):
+    def __init__(self):
+        pass
+    
+    def get_int(self):
+        raise EBError
+        return 0
+
+    def get_string(self):
+        raise EBError
+        return ''
+
+    def get_ustring(self):
+        raise EBError
+        return u''
+
+    def get_list(self):
+        raise EBError
+        return [EBObject()]
+
+    def get_dict(self):
+        raise EBError
+        return {u'': EBObject()}
+
+class IntEBObject(EBObject):
+    def __init__(self, i):
+        self.v = i
+
+    def get_int(self):
+        return self.v
+
+class StringEBObject(EBObject):
+    def __init__(self, s):
+        self.v = s
+
+    def get_string(self):
+        return self.v
+
+class UStringEBObject(EBObject):
+    def __init__(self, u):
+        self.v = u
+
+    def get_ustring(self):
+        return self.v
+
+class ListEBObject(EBObject):
+    def __init__(self, l):
+        self.v = l
+
+    def get_list(self):
+        return self.v
+
+class DictEBObject(EBObject):
+    def __init__(self, d):
+        self.v = d
+
+    def get_dict(self):
+        return self.v
+
+def toint(s):
+    return int(b2a_hex(s), 16)
+
+def tostr(i):
+    if i == 0:
+        return ''
+    h = hex(i)[2:]
+    if h[-1] == 'L':
+        h = h[:-1]
+    if len(h) & 1 == 1:
+        h = '0' + h
+    return a2b_hex(h)
+
+def read_int(s, pos):
+    y = ord(s[pos])
+    pos += 1
+    if not y & 0x80:
+        return y, pos
+    elif not y & 0x40:
+        y = y & 0x7f
+        return toint(s[pos:pos + y]), pos + y
+    else:
+        y = y & 0x3F
+        z = toint(s[pos:pos + y])
+        pos += y
+        return toint(s[pos:pos + z]), pos + z
+
+def decode_none(s, pos):
+    return None, pos
+
+def decode_int(s, pos):
+    i, pos = read_int(s, pos)
+    return i, pos
+
+def decode_decimal(s, pos):
+    i, pos = read_int(s, pos)
+    r = s[pos:pos + i]
+    return decimal.Decimal(r), pos + i
+
+def decode_bool(s, pos):
+    i, pos = read_int(s, pos)
+    return bool(i), pos
+
+def decode_negative_int(s, pos):
+    i, pos = read_int(s, pos)
+    return -i, pos
+
+def decode_string(s, pos):
+    i, pos = read_int(s, pos)
+    r = s[pos:pos + i]
+    return r, pos + i
+
+def decode_float(s, pos):
+    r, newpos = decode_string(s, pos)
+    f = float(r)
+    return f, newpos
+
+def decode_ustring(s, pos):
+    i, pos = read_int(s, pos)
+    r = s[pos:pos + i].decode('utf-8')
+    return r, pos + i
+
+def decode_list(s, pos):
+    r = []
+    while s[pos] != ']':
+        next, pos = decode_obj(s, pos)
+        r.append(next)
+    return r, pos + 1
+
+def decode_dict(s, pos):
+    r = {}
+    while s[pos] != '}':
+        key, pos = decode_obj(s, pos)
+        val, pos = decode_obj(s, pos)
+        r[key] = val
+    return r, pos + 1
+
+def decode_obj(s, pos):
+    c = s[pos]
+    pos += 1
+    if c == 'n':
+        return decode_none(s, pos)
+    elif c == 'i':
+        return decode_int(s, pos)
+    elif c == 'd':
+        return decode_decimal(s, pos)
+    elif c == 'b':
+        return decode_bool(s, pos)
+    elif c == '-':
+        return decode_negative_int(s, pos)
+    elif c == 'f':
+        return decode_float(s, pos)
+    elif c == 's':
+        return decode_string(s, pos)
+    elif c == 'u':
+        return decode_ustring(s, pos)
+    elif c == '[':
+        return decode_list(s, pos)
+    elif c == '{':
+        return decode_dict(s, pos)
+    else:
+        raise EBError('invalid type character: %s' % str(c))
+
+class EBIndexError(IndexError, EBError):
+    pass
+
+def ebdecode(x):
+    try:
+        r, pos = decode_obj(x, 0)
+    except IndexError:
+        raise EBIndexError('apparently truncated string')
+    except UnicodeDecodeError:
+        raise EBError('invalid utf-8')
+    if pos != len(x):
+        raise EBError('excess data after valid prefix')
+    return r
+
+class EBencached(object):
+
+    __slots__ = ['bencoded']
+
+    def __init__(self, s):
+        self.bencoded = s
+
+def encode_bencached(x,r):
+    r.append(x.bencoded)
+
+def make_int(i):
+    if i < 0x80:
+        return chr(i)
+    s = tostr(i)
+    if len(s) < 0x40:
+        return chr(0x80 | len(s)) + s
+    s2 = tostr(len(s))
+    return chr(0xC0 | len(s2)) + s2 + s
+
+def encode_none(v, r):
+    r.extend(('n', ''))
+
+def encode_int(i, r):
+    if i >= 0:
+        r.extend(('i', make_int(i)))
+    else:
+        r.extend(('-', make_int(-i)))
+
+def encode_decimal(d, r):
+    s = str(d)
+    r.extend(('d', make_int(len(s)), str(s)))
+
+def encode_bool(b, r):
+    r.extend(('b', make_int(int(bool(b)))))
+
+def encode_float(f, r):
+    s = repr(f)
+    r.extend(('f', make_int(len(s)), s))
+        
+def encode_string(s, r):
+    r.extend(('s', make_int(len(s)), s))
+
+def encode_unicode_string(u, r):
+    s = u.encode('utf-8')
+    r.extend(('u', make_int(len(s)), s))
+
+def encode_list(x, r):
+    r.append('[')
+    for i in x:
+        encode_func[type(i)](i, r)
+    r.append(']')
+
+def encode_dict(x, r):
+    r.append('{')
+    ilist = x.items()
+    ilist.sort()
+    for k, v in ilist:
+        encode_func[type(k)](k, r)
+        encode_func[type(v)](v, r)
+    r.append('}')
+
+encode_func = {}
+encode_func[EBencached] = encode_bencached
+encode_func[types.NoneType] = encode_none
+encode_func[int] = encode_int
+encode_func[long] = encode_int
+encode_func[decimal.Decimal] = encode_decimal
+encode_func[bool] = encode_bool
+encode_func[float] = encode_float
+encode_func[str] = encode_string
+encode_func[unicode] = encode_unicode_string
+encode_func[list] = encode_list
+encode_func[tuple] = encode_list
+encode_func[dict] = encode_dict
+
+def encode_wrapped(x, r):
+    encode_func[type(x.v)](x.v, r)
+
+encode_func[IntEBObject] = encode_wrapped
+encode_func[StringEBObject] = encode_wrapped
+encode_func[UStringEBObject] = encode_wrapped
+encode_func[ListEBObject] = encode_wrapped
+encode_func[DictEBObject] = encode_wrapped
+
+def ebencode(x):
+    r = []
+    encode_func[type(x)](x, r)
+    return ''.join(r)
+
+def c(v):
+    s = ebencode(v)
+    r = ebdecode(s)
+    assert v == r
+    if isinstance(v, bool):
+        assert isinstance(r, bool)
+    elif isinstance(v, (int, long)) and isinstance(r, (int, long)):
+        # assume it's right
+        pass
+    else:
+        assert type(v) == type(r), '%s is not %s' % (type(v), type(r))
+    assert ebencode(r) == s
+
+c(None)
+c(0)
+c(3)
+c(3l)
+c(500)
+c(-4)
+c(True)
+c(False)
+c(4.0)
+c(-4.0)
+c(2 ** 5000 + 27)
+c('abc')
+c(decimal.Decimal('4.5'))
+c(u'pqr')
+c([1, 2])
+c([2, 'abc', u'pqr'])
+c({})
+c([[]])
+c({u'a': 2})
+c({u'abc': 2, u'pqr': 4})
+c([[1, 2], ['abc', 'pqr']])
+
+##class StreamEbdecode:
+##    def __init__(self):
+##        self.buf = ''
+##        self.bufint = None
+##        self.returns = []
+##
+##    def add(self, stuff):
+##        self.buf += stuff
+##        try:
+##            while True:
+##                if self.bufint is None:
+##                    mylength, pos = read_int(self.buf, 0)
+##                else:
+##                    mylength, pos = self.bufint, 0
+##                if pos + mylength > len(self.buf):
+##                    self.bufint = mylength
+##                    self.buf = self.buf[pos:]
+##                    break
+##                mything = ebdecode(self.buf[pos:pos + mylength])
+##                self.returns.append(mything)
+##                self.buf = self.buf[pos + mylength:]
+##                self.bufint = None
+##        except IndexError:
+##            pass
+##
+##    def next(self):
+##        return self.returns.pop(0)
+##
+##def streamwrap(thing):
+##    x = ebencode(thing)
+##    return make_int(len(x)) + x
+##
+##def c2(v):
+##    b = ''
+##    for i in v:
+##        b += streamwrap(i)
+##    r = []
+##    mystream = StreamEbdecode()
+##    for i in xrange(0, len(b), 11):
+##        mystream.add(b[i:min(i + 11, len(b))])
+##        try:
+##            while True:
+##                r.append(mystream.next())
+##        except IndexError:
+##            pass
+##    assert r == v
+##
+##c2(['a'])
+##c2(range(5000))
+##c2([''.join(str(i) for i in xrange(j)) for j in xrange(300)])

+ 85 - 0
html/bin/clients/mainline/BTL/ebrpc.py

@@ -0,0 +1,85 @@
+### ebrpc
+
+## query = ebencode({'y':'q', 'q':'<method>', 'a':[<params>])
+## response = ebencode({'y':'r', 'r':<return value>}}
+## fault = ebencode({'y':'e','c':'<fault code>', 's':'<fault string>'
+
+from xmlrpclib import Error, Fault
+from types import TupleType
+
+from BTL.ebencode import ebencode, ebdecode
+
+def dump_fault(code, msg):
+    return ebencode({'y':'e', 'c':code, 's':msg})
+
+
+def dumps(params, methodname=None, methodresponse=None, encoding=None, allow_none=False):
+    if methodresponse and isinstance(params, TupleType):
+        assert len(params) == 1, "response tuple must be a singleton"
+    if methodname:
+        out = ebencode({'y':'q', 'q':methodname, 'a':params})
+    elif isinstance(params, Fault):
+        out = ebencode({'y':'e', 'c':params.faultCode, 's':params.faultString})
+    elif methodresponse:
+        out = ebencode({'y':'r', 'r':params[0]})
+    else:
+        raise Error("")
+    return out
+
+def loads(data):
+    d = ebdecode(data)
+    if d['y'] == 'e':
+        raise Fault(d['c'], d['s']) # the server raised a fault
+    elif d['y'] == 'r':
+        # why is this return value so weird?
+        # because it's the way that loads works in xmlrpclib
+        return (d['r'],), None
+    elif d['y'] == 'q':
+        return d['a'], d['q']
+    raise ValueError
+    
+
+
+class DFault(Exception):
+    """Indicates an Datagram EBRPC fault package."""
+
+    # If you return a DFault with tid=None from within a function called via
+    # twispread's TEBRPC.callRemote then TEBRPC will insert the tid for the call.
+    def __init__(self, faultCode, faultString, tid=None):
+        self.faultCode = faultCode
+        self.faultString = faultString
+        self.tid = tid
+        self.args = (faultCode, faultString)
+        
+    def __repr__(self):
+        return (
+            "<Fault %s: %s>" %
+            (self.faultCode, repr(self.faultString))
+            )
+
+### datagram interface
+### has transaction ID as third return valuebt
+### slightly different API, returns a tid as third argument in query/response
+def dumpd(params, methodname=None, methodresponse=None, encoding=None, allow_none=False, tid=None):
+    assert tid is not None, "need a transaction identifier"
+    if methodname:
+        out = ebencode({'y':'q', 't':tid, 'q':methodname, 'a':params})
+    elif isinstance(params, DFault):
+        out = ebencode({'y':'e', 't':tid, 'c':params.faultCode, 's':params.faultString})
+    elif methodresponse:
+        out = ebencode({'y':'r', 't':tid, 'r':params})
+    else:
+        raise Error("")
+    return out
+
+def loadd(data):
+    d = ebdecode(data)
+    if d['y'] == 'e':
+        raise DFault(d['c'], d['s'], d['t'])
+    elif d['y'] == 'r':
+        return d['r'], None, d['t']
+    elif d['y'] == 'q':
+        return d['a'], d['q'], d['t']
+    raise ValueError
+    
+

+ 133 - 0
html/bin/clients/mainline/BTL/ebrpclib.py

@@ -0,0 +1,133 @@
+# by Greg Hazel
+
+import xmlrpclib
+from xmlrpclib2 import *
+from BTL import ebrpc
+
+old_PyCurlTransport = PyCurlTransport
+class PyCurlTransport(old_PyCurlTransport):
+
+    def set_connection_params(self, h):
+        h.add_header('User-Agent', "ebrpclib.py/1.0")
+        h.add_header('Connection', "Keep-Alive")
+        h.add_header('Content-Type', "application/octet-stream")
+    
+    def _parse_response(self, response):
+        # read response from input file/socket, and parse it
+        return ebrpc.loads(response.getvalue())[0]
+
+# --------------------------------------------------------------------
+# request dispatcher
+
+class _Method:
+    # some magic to bind an EB-RPC method to an RPC server.
+    # supports "nested" methods (e.g. examples.getStateName)
+    def __init__(self, send, name):
+        self.__send = send
+        self.__name = name
+    def __getattr__(self, name):
+        return _Method(self.__send, "%s.%s" % (self.__name, name))
+    def __call__(self, *args, **kwargs):
+        args = (args, kwargs)
+        return self.__send(self.__name, args)
+    # ARG! prevent repr(_Method()) from submiting an RPC call!
+    def __repr__(self):
+        return "<%s instance at 0x%08X>" % (self.__class__, id(self))
+
+
+# Double underscore is BAD!
+class EBRPC_ServerProxy(xmlrpclib.ServerProxy):
+    """uri [,options] -> a logical connection to an EB-RPC server
+
+    uri is the connection point on the server, given as
+    scheme://host/target.
+
+    The standard implementation always supports the "http" scheme.  If
+    SSL socket support is available (Python 2.0), it also supports
+    "https".
+
+    If the target part and the slash preceding it are both omitted,
+    "/RPC2" is assumed.
+
+    The following options can be given as keyword arguments:
+
+        transport: a transport factory
+        encoding: the request encoding (default is UTF-8)
+
+    All 8-bit strings passed to the server proxy are assumed to use
+    the given encoding.
+    """
+
+    def __init__(self, uri, transport=None, encoding=None, verbose=0,
+                 allow_none=0):
+        # establish a "logical" server connection
+
+        # get the url
+        import urllib
+        type, uri = urllib.splittype(uri)
+        if type not in ("http", "https"):
+            raise IOError, "unsupported EB-RPC protocol"
+        self.__host, self.__handler = urllib.splithost(uri)
+        if not self.__handler:
+            self.__handler = "/RPC2"
+
+        if transport is None:
+            if type == "https":
+                transport = xmlrpclib.SafeTransport()
+            else:
+                transport = xmlrpclib.Transport()
+        self.__transport = transport
+
+        self.__encoding = encoding
+        self.__verbose = verbose
+        self.__allow_none = allow_none
+
+    def __request(self, methodname, params):
+        # call a method on the remote server
+
+        request = ebrpc.dumps(params, methodname, encoding=self.__encoding,
+                              allow_none=self.__allow_none)
+
+        response = self.__transport.request(
+            self.__host,
+            self.__handler,
+            request,
+            verbose=self.__verbose
+            )
+
+        if len(response) == 1:
+            response = response[0]
+
+        return response
+
+    def __repr__(self):
+        return (
+            "<ServerProxy for %s%s>" %
+            (self.__host, self.__handler)
+            )
+
+    __str__ = __repr__
+
+    def __getattr__(self, name):
+        # magic method dispatcher
+        return _Method(self.__request, name)
+    
+def new_server_proxy(url):
+    c = cache_set.get_cache(PyCURL_Cache, url)
+    t = PyCurlTransport(c)
+    return EBRPC_ServerProxy(url, transport=t)
+
+ServerProxy = new_server_proxy
+
+
+if __name__ == '__main__':
+    s = ServerProxy('https://greg.mitte.bittorrent.com:7080/')
+    def ping(*a, **kw):
+        (a2, kw2) = s.ping(*a, **kw)
+        assert a2 == list(a), '%s list is not %s' % (r, list(a))
+        assert kw2 == dict(kw), '%s dict is not %s' % (kw2, dict(kw))
+    ping(0, 1, 1, name="potato")
+    ping(0, 1, 1, name="anime")
+    ping("phish", 0, 1, 1)
+    ping("games", 0, 1, 1)
+    

+ 223 - 0
html/bin/clients/mainline/BTL/epollreactor.py

@@ -0,0 +1,223 @@
+# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+"""A epoll() based implementation of the twisted main loop.
+This is just like pollreactor, but it uses epoll.
+
+To install the event loop (and you should do this before any connections,
+listeners or connectors are added)::
+
+    from BTL import epollreactor
+    epollreactor.install()
+
+API Stability: stable
+
+Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
+"""
+
+# System imports
+##try:
+##    import epoll as select
+##except ImportError:
+##    import select
+# do or do not, there is no try
+import epoll as select
+
+########################################################
+## http://twistedmatrix.com/trac/ticket/1953#comment:20
+
+from twisted.python import log, failure
+from twisted.internet.tcp import BaseClient
+
+def failIfNotConnected(self, err):
+    if (self.connected or self.disconnected or 
+        not hasattr(self, "connector")):
+        return
+
+    self.connector.connectionFailed(failure.Failure(err))
+    if hasattr(self, "reactor"):
+        # this doesn't happen if we failed in __init__
+        self.stopReading()
+        self.stopWriting()
+        del self.connector
+    try:
+        self._closeSocket()
+    except AttributeError:
+        pass
+    else:
+        del self.socket, self.fileno
+
+BaseClient.failIfNotConnected = failIfNotConnected
+
+########################################################
+
+
+import errno, sys
+
+from zope.interface import implements
+
+# Twisted imports
+from twisted.python import log, threadable, failure
+from twisted.internet import main, posixbase, error
+from twisted.internet.interfaces import IReactorFDSet
+
+# globals
+reads = {}
+writes = {}
+selectables = {}
+poller = select.poll()
+
+POLL_DISCONNECTED = (select.POLLHUP | select.POLLERR | select.POLLNVAL)
+
+
+class PollReactor(posixbase.PosixReactorBase):
+    """A reactor that uses poll(2)."""
+    implements(IReactorFDSet)
+    
+    def _updateRegistration(self, fd):
+        """Register/unregister an fd with the poller."""
+        try:
+            poller.unregister(fd)
+        except KeyError:
+            pass
+
+        mask = 0
+        if reads.has_key(fd): mask = mask | select.POLLIN
+        if writes.has_key(fd): mask = mask | select.POLLOUT
+        if mask != 0:
+            poller.register(fd, mask)
+        else:
+            if selectables.has_key(fd): del selectables[fd]
+
+    def _dictRemove(self, selectable, mdict):
+        try:
+            # the easy way
+            fd = selectable.fileno()
+            # make sure the fd is actually real.  In some situations we can get
+            # -1 here.
+            mdict[fd]
+        except:
+            # the hard way: necessary because fileno() may disappear at any
+            # moment, thanks to python's underlying sockets impl
+            for fd, fdes in selectables.items():
+                if selectable is fdes:
+                    break
+            else:
+                # Hmm, maybe not the right course of action?  This method can't
+                # fail, because it happens inside error detection...
+                return
+        if mdict.has_key(fd):
+            del mdict[fd]
+            self._updateRegistration(fd)
+
+    def addReader(self, reader):
+        """Add a FileDescriptor for notification of data available to read.
+        """
+        fd = reader.fileno()
+        if not reads.has_key(fd):
+            selectables[fd] = reader
+            reads[fd] =  1
+            self._updateRegistration(fd)
+
+    def addWriter(self, writer, writes=writes, selectables=selectables):
+        """Add a FileDescriptor for notification of data available to write.
+        """
+        fd = writer.fileno()
+        if not writes.has_key(fd):
+            selectables[fd] = writer
+            writes[fd] =  1
+            self._updateRegistration(fd)
+
+    def removeReader(self, reader, reads=reads):
+        """Remove a Selectable for notification of data available to read.
+        """
+        return self._dictRemove(reader, reads)
+
+    def removeWriter(self, writer, writes=writes):
+        """Remove a Selectable for notification of data available to write.
+        """
+        return self._dictRemove(writer, writes)
+
+    def removeAll(self, reads=reads, writes=writes, selectables=selectables):
+        """Remove all selectables, and return a list of them."""
+        if self.waker is not None:
+            self.removeReader(self.waker)
+        result = selectables.values()
+        fds = selectables.keys()
+        reads.clear()
+        writes.clear()
+        selectables.clear()
+        for fd in fds:
+            poller.unregister(fd)
+            
+        if self.waker is not None:
+            self.addReader(self.waker)
+        return result
+
+    def doPoll(self, timeout,
+               reads=reads,
+               writes=writes,
+               selectables=selectables,
+               select=select,
+               log=log,
+               POLLIN=select.POLLIN,
+               POLLOUT=select.POLLOUT):
+        """Poll the poller for new events."""
+        if timeout is not None:
+            timeout = int(timeout * 1000) # convert seconds to milliseconds
+
+        try:
+            l = poller.poll(timeout)
+        except select.error, e:
+            if e[0] == errno.EINTR:
+                return
+            else:
+                raise
+        _drdw = self._doReadOrWrite
+        for fd, event in l:
+            try:
+                selectable = selectables[fd]
+            except KeyError:
+                # Handles the infrequent case where one selectable's
+                # handler disconnects another.
+                continue
+            log.callWithLogger(selectable, _drdw, selectable, fd, event, POLLIN, POLLOUT, log)
+
+    doIteration = doPoll
+
+    def _doReadOrWrite(self, selectable, fd, event, POLLIN, POLLOUT, log, 
+        faildict={
+            error.ConnectionDone: failure.Failure(error.ConnectionDone()),
+            error.ConnectionLost: failure.Failure(error.ConnectionLost())
+        }):
+        why = None
+        inRead = False
+        if event & POLL_DISCONNECTED and not (event & POLLIN):
+            why = main.CONNECTION_LOST
+        else:
+            try:
+                if event & POLLIN:
+                    why = selectable.doRead()
+                    inRead = True
+                if not why and event & POLLOUT:
+                    why = selectable.doWrite()
+                    inRead = False
+                if not selectable.fileno() == fd:
+                    why = error.ConnectionFdescWentAway('Filedescriptor went away')
+                    inRead = False
+            except:
+                log.deferr()
+                why = sys.exc_info()[1]
+        if why:
+            self._disconnectSelectable(selectable, why, inRead)
+
+
+def install():
+    """Install the poll() reactor."""
+    p = PollReactor()
+    from twisted.internet import main
+    main.installReactor(p)
+
+
+__all__ = ["PollReactor", "install"]

Alguns arquivos não foram mostrados porque muitos arquivos mudaram nesse diff