diff --git a/.gitignore b/.gitignore index 5c293461..c6483dbc 100644 --- a/.gitignore +++ b/.gitignore @@ -4,11 +4,12 @@ /wren # Intermediate files. -build/ -deps/ -obj/ +/build +/obj /.sass-cache *.pyc +/util/vs2017/build +/util/vs2017/obj # I leave a temporary Wren script at the top level so that I can quickly test # stuff. diff --git a/Makefile b/Makefile index 17f8dbe1..f952c283 100644 --- a/Makefile +++ b/Makefile @@ -11,9 +11,10 @@ endif # Executables are built to bin/. Libraries are built to lib/. # A normal, optimized release build for the current CPU architecture. +# For convenience, also copies the interpreter to the top level. release: $(V) $(MAKE) -f util/wren.mk - $(V) cp bin/wren wren # For convenience, copy the interpreter to the top level. + $(V) cp bin/wren wren # A debug build for the current architecture. debug: @@ -74,10 +75,6 @@ clean: $(V) rm -rf build $(V) rm -rf lib -# Remove all build outputs, intermediate files, and downloaded dependencies. -cleanall: clean - $(V) rm -rf deps - # Run the tests against the debug build of Wren. test: debug $(V) $(MAKE) -f util/wren.mk MODE=debug test diff --git a/deps/libuv/.gitignore b/deps/libuv/.gitignore new file mode 100644 index 00000000..bbe00b89 --- /dev/null +++ b/deps/libuv/.gitignore @@ -0,0 +1,78 @@ +*.swp +*.[oa] +*.l[oa] +*.opensdf +*.orig +*.pyc +*.sdf +*.suo +.vs/ +*.VC.db +*.VC.opendb +core +vgcore.* +.buildstamp +.dirstamp +.deps/ +/.libs/ +/aclocal.m4 +/ar-lib +/autom4te.cache/ +/compile +/config.guess +/config.log +/config.status +/config.sub +/configure +/depcomp +/install-sh +/libtool +/libuv.a +/libuv.dylib +/libuv.pc +/libuv.so +/ltmain.sh +/missing +/test-driver +Makefile +Makefile.in + +# Generated by gyp for android +*.target.mk + +/out/ +# /build/gyp (We do want to commit GYP in Wren's repo) + +/test/.libs/ +/test/run-tests +/test/run-tests.exe +/test/run-tests.dSYM +/test/run-benchmarks +/test/run-benchmarks.exe +/test/run-benchmarks.dSYM + +*.sln +*.sln.cache +*.ncb +*.vcproj +*.vcproj*.user +*.vcxproj +*.vcxproj.filters +*.vcxproj.user +_UpgradeReport_Files/ +UpgradeLog*.XML +Debug +Release +ipch + +# sphinx generated files +/docs/build/ + +# Clion / IntelliJ project files +/.idea/ + +*.xcodeproj +*.xcworkspace + +# make dist output +libuv-*.tar.* diff --git a/deps/libuv/.mailmap b/deps/libuv/.mailmap new file mode 100644 index 00000000..4966caf5 --- /dev/null +++ b/deps/libuv/.mailmap @@ -0,0 +1,40 @@ +Aaron Bieber +Alan Gutierrez +Andrius Bentkus +Bert Belder +Bert Belder +Brandon Philips +Brian White +Brian White +Caleb James DeLisle +Christoph Iserlohn +Devchandra Meetei Leishangthem +Fedor Indutny +Frank Denis +Imran Iqbal +Isaac Z. Schlueter +Jason Williams +Justin Venus +Keno Fischer +Keno Fischer +Leith Bade +Leonard Hecker +Maciej Małecki +Marc Schlaich +Michael +Michael Neumann +Nicholas Vavilov +Rasmus Christian Pedersen +Rasmus Christian Pedersen +Robert Mustacchi +Ryan Dahl +Ryan Emery +Sam Roberts +San-Tai Hsu +Santiago Gimeno +Saúl Ibarra Corretgé +Shigeki Ohtsu +Timothy J. Fontaine +Yasuhiro Matsumoto +Yazhong Liu +Yuki Okumura diff --git a/deps/libuv/AUTHORS b/deps/libuv/AUTHORS new file mode 100644 index 00000000..67aa1762 --- /dev/null +++ b/deps/libuv/AUTHORS @@ -0,0 +1,279 @@ +# Authors ordered by first contribution. +Ryan Dahl +Bert Belder +Josh Roesslein +Alan Gutierrez +Joshua Peek +Igor Zinkovsky +San-Tai Hsu +Ben Noordhuis +Henry Rawas +Robert Mustacchi +Matt Stevens +Paul Querna +Shigeki Ohtsu +Tom Hughes +Peter Bright +Jeroen Janssen +Andrea Lattuada +Augusto Henrique Hentz +Clifford Heath +Jorge Chamorro Bieling +Luis Lavena +Matthew Sporleder +Erick Tryzelaar +Isaac Z. Schlueter +Pieter Noordhuis +Marek Jelen +Fedor Indutny +Saúl Ibarra Corretgé +Felix Geisendörfer +Yuki Okumura +Roman Shtylman +Frank Denis +Carter Allen +Tj Holowaychuk +Shimon Doodkin +Ryan Emery +Bruce Mitchener +Maciej Małecki +Yasuhiro Matsumoto +Daisuke Murase +Paddy Byers +Dan VerWeire +Brandon Benvie +Brandon Philips +Nathan Rajlich +Charlie McConnell +Vladimir Dronnikov +Aaron Bieber +Bulat Shakirzyanov +Brian White +Erik Dubbelboer +Keno Fischer +Ira Cooper +Andrius Bentkus +Iñaki Baz Castillo +Mark Cavage +George Yohng +Xidorn Quan +Roman Neuhauser +Shuhei Tanuma +Bryan Cantrill +Trond Norbye +Tim Holy +Prancesco Pertugio +Leonard Hecker +Andrew Paprocki +Luigi Grilli +Shannen Saez +Artur Adib +Hiroaki Nakamura +Ting-Yu Lin +Stephen Gallagher +Shane Holloway +Andrew Shaffer +Vlad Tudose +Ben Leslie +Tim Bradshaw +Timothy J. Fontaine +Marc Schlaich +Brian Mazza +Elliot Saba +Ben Kelly +Nils Maier +Nicholas Vavilov +Miroslav Bajtoš +Sean Silva +Wynn Wilkes +Andrei Sedoi +Alex Crichton +Brent Cook +Brian Kaisner +Luca Bruno +Reini Urban +Maks Naumov +Sean Farrell +Chris Bank +Geert Jansen +Christoph Iserlohn +Steven Kabbes +Alex Gaynor +huxingyi +Tenor Biel +Andrej Manduch +Joshua Neuheisel +Alexis Campailla +Yazhong Liu +Sam Roberts +River Tarnell +Nathan Sweet +Trevor Norris +Oguz Bastemur +Dylan Cali +Austin Foxley +Benjamin Saunders +Geoffry Song +Rasmus Christian Pedersen +William Light +Oleg Efimov +Lars Gierth +Rasmus Christian Pedersen +Justin Venus +Kristian Evensen +Linus Mårtensson +Navaneeth Kedaram Nambiathan +Yorkie +StarWing +thierry-FreeBSD +Isaiah Norton +Raul Martins +David Capello +Paul Tan +Javier Hernández +Tonis Tiigi +Norio Kobota +李港平 +Chernyshev Viacheslav +Stephen von Takach +JD Ballard +Luka Perkov +Ryan Cole +HungMingWu +Jay Satiro +Leith Bade +Peter Atashian +Tim Cooper +Caleb James DeLisle +Jameson Nash +Graham Lee +Andrew Low +Pavel Platto +Tony Kelman +John Firebaugh +lilohuang +Paul Goldsmith +Julien Gilli +Michael Hudson-Doyle +Recep ASLANTAS +Rob Adams +Zachary Newman +Robin Hahling +Jeff Widman +cjihrig +Tomasz Kołodziejski +Unknown W. Brackets +Emmanuel Odeke +Mikhail Mukovnikov +Thorsten Lorenz +Yuri D'Elia +Manos Nikolaidis +Elijah Andrews +Michael Ira Krufky +Helge Deller +Joey Geralnik +Tim Caswell +Logan Rosen +Kenneth Perry +John Marino +Alexey Melnichuk +Johan Bergström +Alex Mo +Luis Martinez de Bartolome +Michael Penick +Michael +Massimiliano Torromeo +TomCrypto +Brett Vickers +Ole André Vadla Ravnås +Kazuho Oku +Ryan Phillips +Brian Green +Devchandra Meetei Leishangthem +Corey Farrell +Per Nilsson +Alan Rogers +Daryl Haresign +Rui Abreu Ferreira +João Reis +farblue68 +Jason Williams +Igor Soarez +Miodrag Milanovic +Cheng Zhao +Michael Neumann +Stefano Cristiano +heshamsafi +A. Hauptmann +John McNamee +Yosuke Furukawa +Santiago Gimeno +guworks +RossBencina +Roger A. Light +chenttuuvv +Richard Lau +ronkorving +Corbin Simpson +Zachary Hamm +Karl Skomski +Jeremy Whitlock +Willem Thiart +Ben Trask +Jianghua Yang +Colin Snover +Sakthipriyan Vairamani +Eli Skeggs +nmushell +Gireesh Punathil +Ryan Johnston +Adam Stylinski +Nathan Corvino +Wink Saville +Angel Leon +Louis DeJardin +Imran Iqbal +Petka Antonov +Ian Kronquist +kkdaemon +Yuval Brik +Joran Dirk Greef +Andrey Mazo +sztomi +Martin Bark +Dave +Alexis Murzeau +Didiet +Nan Xiang <514580344@qq.com> +Samuel Lorétan +Nándor István Krácser +Katsutoshi Horie +Lukasz Jagiello +Robert Chiras +Kári Tristan Helgason +Krishnaraj Bhat +Enno Boland +Michael Fero +Robert Jefe Lindstaedt +Myles Borins +Tony Theodore +Jason Ginchereau +Nicolas Cavallari +Pierre-Marie de Rodat +Brian Maher +neevek +John Barboza +liuxiaobo +Michele Caini +Bartosz Sosnowski +Matej Knopp +sunjin.lee +Matt Clarkson +Jeffrey Clark +Bart Robinson +Vit Gottwald +Vladimír Čunát +Alex Hultman +Brad King +Philippe Laferriere +Will Speak diff --git a/deps/libuv/CONTRIBUTING.md b/deps/libuv/CONTRIBUTING.md new file mode 100644 index 00000000..aa97506d --- /dev/null +++ b/deps/libuv/CONTRIBUTING.md @@ -0,0 +1,169 @@ +# CONTRIBUTING + +The libuv project welcomes new contributors. This document will guide you +through the process. + + +### FORK + +Fork the project [on GitHub](https://github.com/libuv/libuv) and check out +your copy. + +``` +$ git clone https://github.com/username/libuv.git +$ cd libuv +$ git remote add upstream https://github.com/libuv/libuv.git +``` + +Now decide if you want your feature or bug fix to go into the master branch +or the stable branch. As a rule of thumb, bug fixes go into the stable branch +while new features go into the master branch. + +The stable branch is effectively frozen; patches that change the libuv +API/ABI or affect the run-time behavior of applications get rejected. + +In case of doubt, open an issue in the [issue tracker][], post your question +to the [libuv mailing list], or contact one of [project maintainers][] on [IRC][]. + +Especially do so if you plan to work on something big. Nothing is more +frustrating than seeing your hard work go to waste because your vision +does not align with that of a project maintainers. + + +### BRANCH + +Okay, so you have decided on the proper branch. Create a feature branch +and start hacking: + +``` +$ git checkout -b my-feature-branch -t origin/v1.x +``` + +(Where v1.x is the latest stable branch as of this writing.) + +### CODE + +Please adhere to libuv's code style. In general it follows the conventions from +the [Google C/C++ style guide]. Some of the key points, as well as some +additional guidelines, are enumerated below. + +* Code that is specific to unix-y platforms should be placed in `src/unix`, and + declarations go into `include/uv-unix.h`. + +* Source code that is Windows-specific goes into `src/win`, and related + publicly exported types, functions and macro declarations should generally + be declared in `include/uv-win.h`. + +* Names should be descriptive and concise. + +* All the symbols and types that libuv makes available publicly should be + prefixed with `uv_` (or `UV_` in case of macros). + +* Internal, non-static functions should be prefixed with `uv__`. + +* Use two spaces and no tabs. + +* Lines should be wrapped at 80 characters. + +* Ensure that lines have no trailing whitespace, and use unix-style (LF) line + endings. + +* Use C89-compliant syntax. In other words, variables can only be declared at + the top of a scope (function, if/for/while-block). + +* When writing comments, use properly constructed sentences, including + punctuation. + +* When documenting APIs and/or source code, don't make assumptions or make + implications about race, gender, religion, political orientation or anything + else that isn't relevant to the project. + +* Remember that source code usually gets written once and read often: ensure + the reader doesn't have to make guesses. Make sure that the purpose and inner + logic are either obvious to a reasonably skilled professional, or add a + comment that explains it. + + +### COMMIT + +Make sure git knows your name and email address: + +``` +$ git config --global user.name "J. Random User" +$ git config --global user.email "j.random.user@example.com" +``` + +Writing good commit logs is important. A commit log should describe what +changed and why. Follow these guidelines when writing one: + +1. The first line should be 50 characters or less and contain a short + description of the change prefixed with the name of the changed + subsystem (e.g. "net: add localAddress and localPort to Socket"). +2. Keep the second line blank. +3. Wrap all other lines at 72 columns. + +A good commit log looks like this: + +``` +subsystem: explaining the commit in one line + +Body of commit message is a few lines of text, explaining things +in more detail, possibly giving some background about the issue +being fixed, etc etc. + +The body of the commit message can be several paragraphs, and +please do proper word-wrap and keep columns shorter than about +72 characters or so. That way `git log` will show things +nicely even when it is indented. +``` + +The header line should be meaningful; it is what other people see when they +run `git shortlog` or `git log --oneline`. + +Check the output of `git log --oneline files_that_you_changed` to find out +what subsystem (or subsystems) your changes touch. + + +### REBASE + +Use `git rebase` (not `git merge`) to sync your work from time to time. + +``` +$ git fetch upstream +$ git rebase upstream/v1.x # or upstream/master +``` + + +### TEST + +Bug fixes and features should come with tests. Add your tests in the +`test/` directory. Each new test needs to be registered in `test/test-list.h`. If you add a new test file, it needs to be registered in two places: +- `Makefile.am`: add the file's name to the `test_run_tests_SOURCES` list. +- `uv.gyp`: add the file's name to the `sources` list in the `run-tests` target. + +Look at other tests to see how they should be structured (license boilerplate, +the way entry points are declared, etc.). + +Check README.md file to find out how to run the test suite and make sure that +there are no test regressions. + +### PUSH + +``` +$ git push origin my-feature-branch +``` + +Go to https://github.com/username/libuv and select your feature branch. Click +the 'Pull Request' button and fill out the form. + +Pull requests are usually reviewed within a few days. If there are comments +to address, apply your changes in a separate commit and push that to your +feature branch. Post a comment in the pull request afterwards; GitHub does +not send out notifications when you add commits. + + +[issue tracker]: https://github.com/libuv/libuv/issues +[libuv mailing list]: http://groups.google.com/group/libuv +[IRC]: http://webchat.freelibuv.net/?channels=libuv +[Google C/C++ style guide]: https://google.github.io/styleguide/cppguide.html +[project maintainers]: https://github.com/libuv/libuv/blob/master/MAINTAINERS.md diff --git a/deps/libuv/ChangeLog b/deps/libuv/ChangeLog new file mode 100644 index 00000000..8c667596 --- /dev/null +++ b/deps/libuv/ChangeLog @@ -0,0 +1,2818 @@ +2016.10.25, Version 1.10.0 (Stable) + +Changes since version 1.9.1: + +* Now working on version 1.9.2 (Saúl Ibarra Corretgé) + +* doc: add cjihrig GPG ID (cjihrig) + +* win,build: fix compilation on old Windows / MSVC (Saúl Ibarra Corretgé) + +* darwin: fix setting fd to non-blocking in select(() trick (Saúl Ibarra + Corretgé) + +* unix: allow nesting of kqueue fds in uv_poll_start (Ben Noordhuis) + +* doc: fix generation the first time livehtml runs (Saúl Ibarra Corretgé) + +* test: fix test_close_accept flakiness on Centos5 (Santiago Gimeno) + +* license: libuv is no longer a Node project (Saúl Ibarra Corretgé) + +* license: add license text we've been using for a while (Saúl Ibarra Corretgé) + +* doc: add licensing information to README (Saúl Ibarra Corretgé) + +* win,pipe: fixed formatting, DWORD is long unsigned (Miodrag Milanovic) + +* win: support sub-second precision in uv_fs_futimes() (Jason Ginchereau) + +* unix: ignore EINPROGRESS in uv__close (Saúl Ibarra Corretgé) + +* doc: add Imran Iqbal (iWuzHere) to maintainers (Imran Iqbal) + +* doc: update docs with AIX related information (Imran Iqbal) + +* test: silence build warnings (Kári Tristan Helgason) + +* doc: add iWuzHere GPG ID (Imran Iqbal) + +* linux-core: fix uv_get_total/free_memory on uclibc (Nicolas Cavallari) + +* build: fix build on DragonFly (Michael Neumann) + +* unix: correctly detect named pipes on DragonFly (Michael Neumann) + +* test: make tap output the default (Ben Noordhuis) + +* test: don't dump output for skipped tests (Ben Noordhuis) + +* test: improve formatting of diagnostic messages (Ben Noordhuis) + +* test: remove unused RETURN_TODO macro (Ben Noordhuis) + +* doc: fix stream typos (Pierre-Marie de Rodat) + +* doc: update coding style link (Imran Iqbal) + +* unix,fs: use uint64_t instead of unsigned long (Imran Iqbal) + +* build: check for warnings for -fvisibility=hidden (Imran Iqbal) + +* unix: remove unneeded TODO note (Saúl Ibarra Corretgé) + +* test: skip tty_pty test if pty is not available (Luca Bruno) + +* sunos: set phys_addr of interface_address using ARP (Brian Maher) + +* doc: clarify callbacks won't be called in error case (Saúl Ibarra Corretgé) + +* unix: don't convert stat buffer when syscall fails (Ben Noordhuis) + +* win: compare entire filename in watch events (cjihrig) + +* doc: add a note on safe reuse of uv_write_t (neevek) + +* linux: fix potential event loop stall (Ben Noordhuis) + +* unix,win: make uv_get_process_title() stricter (cjihrig) + +* test: close server before initiating new connection (John Barboza) + +* test: account for multiple handles in one ipc read (John Barboza) + +* unix: fix errno and retval conflict (liuxiaobo) + +* doc: add missing entry in uv_fs_type enum (Michele Caini) + +* unix: preserve loop->data across loop init/done (Ben Noordhuis) + +* win: return UV_EINVAL on bad uv_tty_mode mode arg (Ben Noordhuis) + +* win: simplify memory copy logic in fs.c (Ben Noordhuis) + +* win: fix compilation on mingw (Bartosz Sosnowski) + +* win: ensure 32-bit printf precision (Matej Knopp) + +* darwin: handle EINTR in /dev/tty workaround (Ben Noordhuis) + +* test: fix OOB buffer access (Saúl Ibarra Corretgé) + +* test: don't close CRT fd handed off to uv_pipe_t (Saúl Ibarra Corretgé) + +* test: fix android build error. (sunjin.lee) + +* win: evaluate timers when system wakes up (Bartosz Sosnowski) + +* doc: add supported platforms description (Saúl Ibarra Corretgé) + +* win: fix lstat reparse point without link data (Jason Ginchereau) + +* unix,win: make on_alloc_cb failures more resilient (Saúl Ibarra Corretgé) + +* zos: add support for new platform (John Barboza) + +* test: make tcp_close_while_connecting more resilient (Saúl Ibarra Corretgé) + +* build: use '${prefix}' for pkg-config 'exec_prefix' (Matt Clarkson) + +* build: GNU/kFreeBSD support (Jeffrey Clark) + +* zos: use PLO instruction for atomic operations (John Barboza) + +* zos: use pthread helper functions (John Barboza) + +* zos: implement uv__fs_futime (John Barboza) + +* unix: expand range of values for usleep (John Barboza) + +* zos: track unbound handles and bind before listen (John Barboza) + +* test: improve tap output on test failures (Santiago Gimeno) + +* test: refactor fs_event_close_in_callback (Julien Gilli) + +* zos: implement uv__io_check_fd (John Barboza) + +* unix: unneccessary use const qualifier in container_of (John Barboza) + +* win,tty: add support for ANSI codes in win10 v1511 (Imran Iqbal) + +* doc: add santigimeno to maintainers (Santiago Gimeno) + +* win: fix typo in type name (Saúl Ibarra Corretgé) + +* unix: always define pthread barrier fallback pad (Saúl Ibarra Corretgé) + +* test: use RETURN_SKIP in spawn_setuid_setgid test (Santiago Gimeno) + +* win: add disk read/write count to uv_getrusage (Imran Iqbal) + +* doc: document uv_fs_realpath caveats (Saúl Ibarra Corretgé) + +* test: improve spawn_setuid_setgid test (Santiago Gimeno) + +* test: fix building pty test on Android (Saúl Ibarra Corretgé) + +* doc: uv_buf_t members are not readonly (Saúl Ibarra Corretgé) + +* doc: improve documentation on uv_alloc_cb (Saúl Ibarra Corretgé) + +* fs: fix uv_fs_fstat on platforms using musl libc (Santiago Gimeno) + +* doc: update supported fields for uv_rusage_t (Imran Iqbal) + +* test: fix test-tcp-writealot flakiness on arm (Santiago Gimeno) + +* test: fix fs_event_watch_dir flakiness on arm (Santiago Gimeno) + +* unix: don't use alphasort in uv_fs_scandir() (Ben Noordhuis) + +* doc: fix confusing doc of uv_tcp_nodelay (Bart Robinson) + +* build,osx: fix warnings on tests compilation with gyp (Santiago Gimeno) + +* doc: add ABI tracker link to README (Saúl Ibarra Corretgé) + +* win,tty: fix uv_tty_set_mode race conditions (Bartosz Sosnowski) + +* test: fix fs_fstat on Android (Vit Gottwald) + +* win, test: fix fs_event_watch_dir_recursive (Bartosz Sosnowski) + +* doc: add description of uv_handle_type (Vit Gottwald) + +* build: use -pthreads for tests with autotools (Julien Gilli) + +* win: fix leaky fs request buffer (Jason Ginchereau) + +* doc: note buffer lifetime requirements in uv_write (Vladimír Čunát) + +* doc: add reference to uv_update_time on uv_timer_start (Alex Hultman) + +* win: fix winapi function pointer typedef syntax (Brad King) + +* test: fix tcp_close_while_connecting CI failures (Ben Noordhuis) + +* test: make threadpool_cancel_single deterministic (Ben Noordhuis) + +* test: make threadpool saturation reliable (Ben Noordhuis) + +* unix: don't malloc in uv_thread_create() (Ben Noordhuis) + +* unix: don't include CoreServices globally on macOS (Brad King) + +* unix,win: add uv_translate_sys_error() public API (Philippe Laferriere) + +* win: remove unused static variables (Ben Noordhuis) + +* win: silence -Wmaybe-uninitialized warning (Ben Noordhuis) + +* signal: replace pthread_once with uv_once (Santiago Gimeno) + +* test: fix sign-compare warning (Will Speak) + +* common: fix unused variable warning (Brad King) + + +2016.05.17, Version 1.9.1 (Stable), d989902ac658b4323a4f4020446e6f4dc449e25c + +Changes since version 1.9.0: + +* test: handle root home directories (cjihrig) + +* unix: implement uv__fs_futime for AIX 7.1 (Imran Iqbal) + +* test: skip early bind tests if no IPv6 is supported (Saúl Ibarra Corretgé) + +* win: fix var declaration to be C89 compliant (Michael Fero) + +* unix: use POLL{IN,OUT,etc} constants directly (Ben Noordhuis) + +* doc: add ability to live reload and regenerate HTML (Saúl Ibarra Corretgé) + +* Revert "win,build: remove unused build defines" (cjihrig) + +* linux: fix fd leaks in uv_cpu_info() error paths (Ben Noordhuis) + +* linux: don't abort on malformed /proc/stat (Ben Noordhuis) + +* linux: fix long lines in linux-core.c (Ben Noordhuis) + +* test: fix fs_event_watch_file_current_dir for AIX (Imran Iqbal) + +* unix,fs: code cleanup of uv_fs_event_start for AIX (Imran Iqbal) + +* unix: delay signal handling until after normal i/o (Ben Noordhuis) + +* android: pthread_sigmask() does not set errno (Oguz Bastemur) + +* win: work around sharepoint scandir bug (Ben Noordhuis) + +* unix: guard against clobbering errno in uv__free() (Ben Noordhuis) + +* unix: remove unneeded SAVE_ERRNO wrappers (Ben Noordhuis) + +* test: skip fs_event_close_in_callback on AIX (Imran Iqbal) + +* win: add maxrss, pagefaults to uv_getrusage() (Robert Jefe Lindstaedt) + +* test: set a big send buffer size for tcp_write_queue_order (Andrius Bentkus) + +* unix: error on realpath if PATH_MAX is undefined (Myles Borins) + +* unix: fix bug in barrier fallback implementation (Kári Tristan Helgason) + +* build: bump android ndk version (Kári Tristan Helgason) + +* build: always compile with -fvisibility=hidden (Ben Noordhuis) + +* test: fix -Wformat warnings in platform test (Ben Noordhuis) + +* win: clarify fsevents handling code (Saúl Ibarra Corretgé) + +* test: fix POLLHDRUP related failures for AIX (Imran Iqbal) + +* build, mingw: set LIBS in configure.ac (Tony Theodore) + +* win: improve uv__convert_utf16_to_utf8 (Saúl Ibarra Corretgé) + +* win: simplified UTF16 -> UTF8 conversions (Saúl Ibarra Corretgé) + +* win: remove unneeded condition (Saúl Ibarra Corretgé) + +* darwin: work around condition variable kernel bug (Ben Noordhuis) + +* darwin: make thread stack multiple of page size (Ben Noordhuis) + +* build,win: rename platform to msbuild_platform (João Reis) + +* gitignore: ignore VS temporary database files (João Reis) + +* test: skip emfile on AIX (Imran Iqbal) + +* unix: use system allocator for scandir() (cjihrig) + +* common: release uv_fs_scandir() array (cjihrig) + +* win: call uv__fs_scandir_cleanup() (cjihrig) + +* win,tty: fix read stop in line mode (João Reis) + +* win,tty: don't duplicate handle for line reads (João Reis) + +* win,tty: restore cursor after canceling line read (Alexis Campailla) + + +2016.04.08, Version 1.9.0 (Stable), 229b3a4cc150aebd6561e6bd43076eafa7a03756 + +Changes since version 1.8.0: + +* win: wait for full timeout duration (João Reis) + +* unix: fix support for uClibc-ng (Martin Bark) + +* doc: indicate where new test files need to be added (Dave) + +* test,unix: fix logic error in test runner (Ben Noordhuis) + +* fs: don't nullify req->bufs on EINTR (Dave) + +* osx: set the default thread stack size to RLIMIT_STACK (Saúl Ibarra Corretgé) + +* build: invoke libtoolize with --copy (Ben Noordhuis) + +* test: fixup eintr_handling (Saúl Ibarra Corretgé) + +* osx: avoid compilation warning with Clang (Saúl Ibarra Corretgé) + +* test,win: fix compilation with shared lib (Alexis Murzeau) + +* test: fix race condition in pipe-close-stdout (Imran Iqbal) + +* unix,win: add uv_os_tmpdir() (cjihrig) + +* ios: fix undefined PTHREAD_STACK_MIN (Didiet) + +* test: fix threadpool_multiple_event_loops for AIX (Imran Iqbal) + +* unix: report errors for unpollable fds (Ben Noordhuis) + +* win: fix watching root files (Nicholas Vavilov) + +* build,win: print the Visual Studio version in use (Saúl Ibarra Corretgé) + +* build,win: remove unneeded condition from GYP file (Saúl Ibarra Corretgé) + +* test,win: fix compilation warning (Saúl Ibarra Corretgé) + +* test: use uv_loop_close and assert its result (Nan Xiang) + +* build: map 'AMD64' host arch to 'x64' (Ben Noordhuis) + +* osx: protected use of potentially undefined macro (Samuel Lorétan) + +* linux: fix compilation with musl (Saúl Ibarra Corretgé) + +* doc: describe how to make release builds on Unix (Saúl Ibarra Corretgé) + +* doc: add missing link in README (Saúl Ibarra Corretgé) + +* build: python 2.x/3.x consistent print usage (Rasmus Christian Pedersen) + +* test: assume no IPv6 if interfaces cannot be listed (Nan Xiang) + +* darwin: replace F_FULLFSYNC with fdatasync syscall (Saúl Ibarra Corretgé) + +* doc: add missing write callback to example (Nándor István Krácser) + +* build: compile with -D_THREAD_SAFE on AIX (Imran Iqbal) + +* test: fix threadpool_multiple_event_loops on PPC (Imran Iqbal) + +* test: reduce timeout in tcp_close_while_connecting (Imran Iqbal) + +* unix, win: consistently null-terminate buffers (Saúl Ibarra Corretgé) + +* unix, win: count null byte on UV_ENOBUFS (Saúl Ibarra Corretgé) + +* test: fix deadlocks in uv_cond_wait (Katsutoshi Horie) + +* linux: fix cpu count (Lukasz Jagiello) + +* unix: fix uv__handle_type for AIX (Imran Iqbal) + +* linux: call fclose(), fix fdopen() memory leak (Ben Noordhuis) + +* win: remove unneeded condition (Saúl Ibarra Corretgé) + +* unix: fix compile error in Android using bionic (Robert Chiras) + +* linux: add braces to multi-statement if (Kári Tristan Helgason) + +* doc: add @cjihrig as a maintainer (Saúl Ibarra Corretgé) + +* unix: add fork-safe open file function (Kári Tristan Helgason) + +* linux: replace calls to fopen with uv__open_file (Kári Tristan Helgason) + +* linux: remove redundant call to rewind() (Krishnaraj Bhat) + +* win: remove duplicated code when processing fsevents (Saúl Ibarra Corretgé) + +* test: fix poll_bad_fdtype for AIX (Imran Iqbal) + +* linux: fix error checking in uv__open_file (Saúl Ibarra Corretgé) + +* poll: add UV_DISCONNECT event (Santiago Gimeno) + +* fs: realpath: fix string size before converting (Yuval Brik) + +* win: use native APIs for UTF conversions (cjihrig) + +* doc: clarify uv_loop_close() (Ben Noordhuis) + +* unix: retry ioctl(TIOCGWINSZ) on EINTR (Ben Noordhuis) + +* win,build: remove unused build defines (Saúl Ibarra Corretgé) + +* win: fix buffer overflow in fs events (Joran Dirk Greef) + +* win: fix uv_relative_path and remove dead branch (Joran Dirk Greef) + +* unix: use open(2) with O_CLOEXEC on OS X (Kári Tristan Helgason) + +* test: add missing copyright header (cjihrig) + +* aix: fix 'POLLRDHUP undeclared' build error (Ben Noordhuis) + +* unix,win: add uv_get_passwd() (cjihrig) + +* process: fix uv_spawn edge-case (Santiago Gimeno) + +* test: use %ld for printing uid/gid (Ben Noordhuis) + +* aix: fix ahafs implementation (Imran Iqbal) + +* aix: do not store absolute path to ahafs (Imran Iqbal) + +* process: close process pipes safely (Santiago Gimeno) + +* unix: open ttyname instead of /dev/tty (Enno Boland) + +* unix: remove outdated comment (Kári Tristan Helgason) + + +2015.12.15, Version 1.8.0 (Stable), 5467299450ecf61635657557b6e01aaaf6c3fdf4 + +Changes since version 1.7.5: + +* unix: fix memory leak in uv_interface_addresses (Jianghua Yang) + +* unix: make uv_guess_handle work properly for AIX (Gireesh Punathil) + +* fs: undo uv__req_init when uv__malloc failed (Jianghua Yang) + +* build: remove unused 'component' GYP option (Saúl Ibarra Corretgé) + +* include: remove duplicate extern declaration (Jianghua Yang) + +* win: use the MSVC provided snprintf where possible (Jason Williams) + +* win, test: fix compilation warning (Saúl Ibarra Corretgé) + +* win: fix compilation with VS < 2012 (Ryan Johnston) + +* stream: support empty uv_try_write on unix (Fedor Indutny) + +* unix: fix request handle leak in uv__udp_send (Jianghua Yang) + +* src: replace QUEUE_SPLIT with QUEUE_MOVE (Ben Noordhuis) + +* unix: use QUEUE_MOVE when iterating over lists (Ben Noordhuis) + +* unix: squelch harmless valgrind warning (Ben Noordhuis) + +* test: don't abort on setrlimit() failure (Ben Noordhuis) + +* unix: only undo fs req registration in async mode (Ben Noordhuis) + +* unix: fix uv__getiovmax return value (HungMingWu) + +* unix: make work with Solaris Studio. (Adam Stylinski) + +* test: fix fs_event_watch_file_currentdir flakiness (Santiago Gimeno) + +* unix: skip prohibited syscalls on tvOS and watchOS (Nathan Corvino) + +* test: use FQDN in getaddrinfo_fail test (Wink Saville) + +* docs: clarify documentation of uv_tcp_init_ex (Andrius Bentkus) + +* win: fix comment (Miodrag Milanovic) + +* doc: fix typo in README (Angel Leon) + +* darwin: abort() if (un)locking fs mutex fails (Ben Noordhuis) + +* pipe: enable inprocess uv_write2 on Windows (Louis DeJardin) + +* win: properly return UV_EBADF when _close() fails (Nicholas Vavilov) + +* test: skip process_title for AIX (Imran Iqbal) + +* misc: expose handle print APIs (Petka Antonov) + +* include: add stdio.h to uv.h (Saúl Ibarra Corretgé) + +* misc: remove unnecessary null pointer checks (Ian Kronquist) + +* test,freebsd: skip udp_dual_stack if not supported (Santiago Gimeno) + +* linux: don't retry dup2/dup3 on EINTR (Ben Noordhuis) + +* unix: don't retry dup2/dup3 on EINTR (Ben Noordhuis) + +* test: fix -Wtautological-pointer-compare warnings (Saúl Ibarra Corretgé) + +* win: map ERROR_BAD_PATHNAME to UV_ENOENT (Tony Kelman) + +* test: fix test/test-tty.c for AIX (Imran Iqbal) + +* android: support api level less than 21 (kkdaemon) + +* fsevents: fix race on simultaneous init+close (Fedor Indutny) + +* linux,fs: fix p{read,write}v with a 64bit offset (Saúl Ibarra Corretgé) + +* fs: add uv_fs_realpath() (Yuval Brik) + +* win: fix path for removed and renamed fs events (Joran Dirk Greef) + +* win: do not read more from stream than available (Jeremy Whitlock) + +* test: test that uv_close() doesn't corrupt QUEUE (Andrey Mazo) + +* unix: fix uv_fs_event_stop() from fs_event_cb (Andrey Mazo) + +* test: fix self-deadlocks in thread_rwlock_trylock (Ben Noordhuis) + +* src: remove non ascii character (sztomi) + +* test: fix test udp_multicast_join6 for AIX (Imran Iqbal) + + +2015.09.23, Version 1.7.5 (Stable), a8c1136de2cabf25b143021488cbaab05834daa8 + +Changes since version 1.7.4: + +* unix: Support atomic compare & swap xlC on AIX (nmushell) + +* unix: Fix including uv-aix.h on AIX (nmushell) + +* unix: consolidate rwlock tryrdlock trywrlock errors (Saúl Ibarra Corretgé) + +* unix, win: consolidate mutex trylock errors (Saúl Ibarra Corretgé) + +* darwin: fix memory leak in uv_cpu_info (Jianghua Yang) + +* test: add tests for the uv_rwlock implementation (Bert Belder) + +* win: redo/fix the uv_rwlock APIs (Bert Belder) + +* win: don't fetch function pointers to SRWLock APIs (Bert Belder) + + +2015.09.12, Version 1.7.4 (Stable), a7ad4f52189d89cfcba35f78bfc5ff3b1f4105c4 + +Changes since version 1.7.3: + +* doc: uv_read_start and uv_read_cb clarifications (Ben Trask) + +* freebsd: obtain true uptime through clock_gettime() (Jianghua Yang) + +* win, tty: do not convert \r to \r\n (Colin Snover) + +* build,gyp: add DragonFly to the list of OSes (Michael Neumann) + +* fs: fix bug in sendfile for DragonFly (Michael Neumann) + +* doc: add uv_dlsym() return type (Brian White) + +* tests: fix fs tests run w/o full getdents support (Jeremy Whitlock) + +* doc: fix typo (Devchandra Meetei Leishangthem) + +* doc: fix uv-unix.h location (Sakthipriyan Vairamani) + +* unix: fix error check when closing process pipe fd (Ben Noordhuis) + +* test,freebsd: fix ipc_listen_xx_write tests (Santiago Gimeno) + +* win: fix unsavory rwlock fallback implementation (Bert Belder) + +* doc: clarify repeat timer behavior (Eli Skeggs) + + +2015.08.28, Version 1.7.3 (Stable), 93877b11c8b86e0a6befcda83a54555c1e36e4f0 + +Changes since version 1.7.2: + +* threadpool: fix thread starvation bug (Ben Noordhuis) + + +2015.08.25, Version 1.7.2 (Stable), 4d13a013fcfa72311f0102751fdc7951873f466c + +Changes since version 1.7.1: + +* unix, win: make uv_loop_init return on error (Willem Thiart) + +* win: reset pipe handle for pipe servers (Saúl Ibarra Corretgé) + +* win: fix replacing pipe handle for pipe servers (Saúl Ibarra Corretgé) + +* win: fix setting pipe pending instances after bind (Saúl Ibarra Corretgé) + + +2015.08.20, Version 1.7.1 (Stable), 44f4b6bd82d8ae4583ccc4768a83af778ef69f85 + +Changes since version 1.7.0: + +* doc: document the procedure for verifying releases (Saúl Ibarra Corretgé) + +* doc: add note about Windows binaries to the README (Saúl Ibarra Corretgé) + +* doc: use long GPG IDs in MAINTAINERS.md (Saúl Ibarra Corretgé) + +* Revert "stream: squelch ECONNRESET error if already closed" (Saúl Ibarra + Corretgé) + +* doc: clarify uv_read_stop() is idempotent (Corbin Simpson) + +* unix: OpenBSD's setsockopt needs an unsigned char for multicast (Zachary + Hamm) + +* test: Fix two memory leaks (Karl Skomski) + +* unix,win: return EINVAL on nullptr args in uv_fs_{read,write} (Karl Skomski) + +* win: set accepted TCP sockets as non-inheritable (Saúl Ibarra Corretgé) + +* unix: remove superfluous parentheses in fs macros (Ben Noordhuis) + +* unix: don't copy arguments for sync fs requests (Ben Noordhuis) + +* test: plug small memory leak in unix test runner (Ben Noordhuis) + +* unix,windows: allow NULL loop for sync fs requests (Ben Noordhuis) + +* unix,windows: don't assert on unknown error code (Ben Noordhuis) + +* stream: retry write on EPROTOTYPE on OSX (Brian White) + +* common: fix use of snprintf on Windows (Saúl Ibarra Corretgé) + +* tests: refactored fs watch_dir tests for stability (Jeremy Whitlock) + + +2015.08.06, Version 1.7.0 (Stable), 415a865d6365ba58d02b92b89d46ba5d7744ec8b + +Changes since version 1.6.1: + +* win,stream: add slot to remember CRT fd (Bert Belder) + +* win,pipe: properly close when created from CRT fd (Bert Belder) + +* win,pipe: don't close fd 0-2 (Bert Belder) + +* win,tty: convert fd -> handle safely (Bert Belder) + +* win,tty: properly close when created from CRT fd (Bert Belder) + +* win,tty: don't close fd 0-2 (Bert Belder) + +* win,fs: don't close fd 0-2 (Bert Belder) + +* win: include "malloc.h" (Cheng Zhao) + +* windows: MSVC 2015 has C99 inline (Jason Williams) + +* dragonflybsd: fixes for nonblocking and cloexec (Michael Neumann) + +* dragonflybsd: use sendfile(2) for uv_fs_sendfile (Michael Neumann) + +* dragonflybsd: fix uv_exepath (Michael Neumann) + +* win,fs: Fixes align(8) directive on mingw (Stefano Cristiano) + +* unix, win: prevent replacing fd in uv_{udp,tcp,pipe}_t (Saúl Ibarra Corretgé) + +* win: move logic to set socket non-inheritable to uv_tcp_set_socket (Saúl + Ibarra Corretgé) + +* unix, win: add ability to create tcp/udp sockets early (Saúl Ibarra Corretgé) + +* test: retry select() on EINTR, honor milliseconds (Ben Noordhuis) + +* unix: consolidate tcp and udp bind error (Saúl Ibarra Corretgé) + +* test: conditionally skip udp_ipv6_multicast_join6 (heshamsafi) + +* core: add UV_VERSION_HEX macro (Saúl Ibarra Corretgé) + +* doc: add section with version-checking macros and functions (Saúl Ibarra + Corretgé) + +* tty: cleanup handle if uv_tty_init fails (Saúl Ibarra Corretgé) + +* darwin: save a fd when FSEvents is used (Saúl Ibarra Corretgé) + +* win: fix returning thread id in uv_thread_self (Saúl Ibarra Corretgé) + +* common: use offsetof for QUEUE_DATA (Saúl Ibarra Corretgé) + +* win: remove UV_HANDLE_CONNECTED (A. Hauptmann) + +* docs: add Windows specific note for uv_fs_open (Saúl Ibarra Corretgé) + +* doc: add note about uv_fs_scandir (Saúl Ibarra Corretgé) + +* test,unix: reduce stack size of watchdog threads (Ben Noordhuis) + +* win: add support for recursive file watching (Saúl Ibarra Corretgé) + +* win,tty: support consoles with non-default colors (John McNamee) + +* doc: add missing variable name (Yosuke Furukawa) + +* stream: squelch ECONNRESET error if already closed (Santiago Gimeno) + +* build: remove ancient condition from common.gypi (Saúl Ibarra Corretgé) + +* tests: skip some tests when network is unreachable (Luca Bruno) + +* build: proper support for android cross compilation (guworks) + +* android: add missing include to pthread-fixes.c (RossBencina) + +* test: fix compilation warning (Saúl Ibarra Corretgé) + +* doc: add a note about uv_dirent_t.type (Saúl Ibarra Corretgé) + +* win,test: fix shared library build (Saúl Ibarra Corretgé) + +* test: fix compilation warning (Santiago Gimeno) + +* build: add experimental Windows installer (Roger A. Light) + +* threadpool: send signal only when queue is empty (chenttuuvv) + +* aix: fix uv_exepath with relative paths (Richard Lau) + +* build: fix version syntax in AppVeyor file (Saúl Ibarra Corretgé) + +* unix: allow nbufs > IOV_MAX in uv_fs_{read,write} (ronkorving) + + +2015.06.06, Version 1.6.1 (Stable), 30c8be07bb78a66fdee5141626bf53a49a17094a + +Changes since version 1.6.0: + +* unix: handle invalid _SC_GETPW_R_SIZE_MAX values (cjihrig) + + +2015.06.04, Version 1.6.0 (Stable), adfccad76456061dfcf79b8df8e7dbfee51791d7 + +Changes since version 1.5.0: + +* aix: fix setsockopt for multicast options (Michael) + +* unix: don't block for io if any io handle is primed (Saúl Ibarra Corretgé) + +* windows: MSVC 2015 has snprintf() (Rui Abreu Ferreira) + +* windows: Add VS2015 support to vcbuild.bat (Jason Williams) + +* doc: fix typo in tcp.rst (Igor Soarez) + +* linux: work around epoll bug in kernels < 2.6.37 (Ben Noordhuis) + +* unix,win: add uv_os_homedir() (cjihrig) + +* stream: fix `select()` race condition (Fedor Indutny) + +* unix: prevent infinite loop in uv__run_pending (Saúl Ibarra Corretgé) + +* unix: make sure UDP send callbacks are asynchronous (Saúl Ibarra Corretgé) + +* test: fix `platform_output` netmask printing. (Andrew Paprocki) + +* aix: add ahafs autoconf detection and README notes (Andrew Paprocki) + +* core: add ability to customize memory allocator (Saúl Ibarra Corretgé) + + +2015.05.07, Version 1.5.0 (Stable), 4e77f74c7b95b639b3397095db1bc5bcc016c203 + +Changes since version 1.4.2: + +* doc: clarify that the thread pool primites are not thread safe (Andrius + Bentkus) + +* aix: always deregister closing fds from epoll (Michael) + +* unix: fix glibc-2.20+ macro incompatibility (Massimiliano Torromeo) + +* doc: add Sphinx plugin for generating links to man pages (Saúl Ibarra + Corretgé) + +* doc: link system and library calls to man pages (Saúl Ibarra Corretgé) + +* doc: document uv_getnameinfo_t.{host|service} (Saúl Ibarra Corretgé) + +* build: update the location of gyp (Stephen von Takach) + +* win: name all anonymous structs and unions (TomCrypto) + +* linux: work around epoll bug in kernels 3.10-3.19 (Ben Noordhuis) + +* darwin: fix size calculation in select() fallback (Ole André Vadla Ravnås) + +* solaris: fix setsockopt for multicast options (Julien Gilli) + +* test: fix race condition in multithreaded test (Ben Noordhuis) + +* doc: fix long lines in tty.rst (Ben Noordhuis) + +* test: use UV_TTY_MODE_* values in tty test (Ben Noordhuis) + +* unix: don't clobber errno in uv_tty_reset_mode() (Ben Noordhuis) + +* unix: reject non-tty fds in uv_tty_init() (Ben Noordhuis) + +* win: fix pipe blocking writes (Alexis Campailla) + +* build: fix cross-compiling for iOS (Steven Kabbes) + +* win: remove unnecessary malloc.h + +* include: use `extern "c++"` for defining C++ code (Kazuho Oku) + +* unix: reap child on execvp() failure (Ryan Phillips) + +* windows: fix handle leak on EMFILE (Brian Green) + +* test: fix tty_file, close handle if initialized (Saúl Ibarra Corretgé) + +* doc: clarify what uv_*_open accepts (Saúl Ibarra Corretgé) + +* doc: clarify that we don't maintain external doc resources (Saúl Ibarra + Corretgé) + +* build: add documentation for ninja support (Devchandra Meetei Leishangthem) + +* doc: document uv_buf_t members (Corey Farrell) + +* linux: fix epoll_pwait() fallback on arm64 (Ben Noordhuis) + +* android: fix compilation warning (Saúl Ibarra Corretgé) + +* unix: don't close the fds we just setup (Sam Roberts) + +* test: spawn child replacing std{out,err} to stderr (Saúl Ibarra Corretgé) + +* unix: fix swapping fds order in uv_spawn (Saúl Ibarra Corretgé) + +* unix: fix potential bug if dup2 fails in uv_spawn (Saúl Ibarra Corretgé) + +* test: remove LOG and LOGF variadic macros (Saúl Ibarra Corretgé) + +* win: fix uv_fs_access on directories (Saúl Ibarra Corretgé) + +* win: fix of double free in uv_uptime (Per Nilsson) + +* unix: open "/dev/null" instead of "/" for emfile_fd (Alan Rogers) + +* docs: add some missing words (Daryl Haresign) + +* unix: clean up uv_fs_open() O_CLOEXEC logic (Ben Noordhuis) + +* build: set SONAME for shared library in uv.gyp (Rui Abreu Ferreira) + +* windows: define snprintf replacement as inline instead of static (Rui Abreu + Ferreira) + +* win: fix unlink of readonly files (João Reis) + +* doc: fix uv_run(UV_RUN_DEFAULT) description (Ben Noordhuis) + +* linux: intercept syscall when running under memory sanitizer (Keno Fischer) + +* aix: fix uv_interface_addresses return value (farblue68) + +* windows: defer reporting TCP write failure until next tick (Saúl Ibarra + Corretgé) + +* test: add test for deferred TCP write failure (Saúl Ibarra Corretgé) + + +2015.02.27, Version 1.4.2 (Stable), 1a7391348a11d5450c0f69c828d5302e2cb842eb + +Changes since version 1.4.1: + +* stream: ignore EINVAL for SO_OOBINLINE on OS X (Fedor Indutny) + + +2015.02.25, Version 1.4.1 (Stable), e8e3fc5789cc0f02937879d141cca0411274093c + +Changes since version 1.4.0: + +* win: don't use inline keyword in thread.c (Ben Noordhuis) + +* windows: fix setting dirent types on uv_fs_scandir_next (Saúl Ibarra + Corretgé) + +* unix,windows: make uv_thread_create() return errno (Ben Noordhuis) + +* tty: fix build for SmartOS (Julien Gilli) + +* unix: fix for uv_async data race (Michael Penick) + +* unix, windows: map EHOSTDOWN errno (Ben Noordhuis) + +* stream: use SO_OOBINLINE on OS X (Fedor Indutny) + + +2015.02.10, Version 1.4.0 (Stable), 19fb8a90648f3763240db004b77ab984264409be + +Changes since version 1.3.0: + +* unix: check Android support for pthread_cond_timedwait_monotonic_np (Leith + Bade) + +* test: use modified path in test (cjihrig) + +* unix: implement uv_stream_set_blocking() (Ben Noordhuis) + + +2015.01.29, Version 1.3.0 (Stable), 165685b2a9a42cf96501d79cd6d48a18aaa16e3b + +Changes since version 1.2.1: + +* unix, windows: set non-block mode in uv_poll_init (Saúl Ibarra Corretgé) + +* doc: clarify which flags are supported in uv_fs_event_start (Saúl Ibarra + Corretgé) + +* win,unix: move loop functions which have identical implementations (Andrius + Bentkus) + +* doc: explain how the threadpool is allocated (Alex Mo) + +* doc: clarify uv_default_loop (Saúl Ibarra Corretgé) + +* unix: fix implicit declaration compiler warning (Ben Noordhuis) + +* unix: fix long line introduced in commit 94e628fa (Ben Noordhuis) + +* unix, win: add synchronous uv_get{addr,name}info (Saúl Ibarra Corretgé) + +* linux: fix epoll_pwait() regression with < 2.6.19 (Ben Noordhuis) + +* build: compile -D_GNU_SOURCE on linux (Ben Noordhuis) + +* build: use -fvisibility=hidden in autotools build (Ben Noordhuis) + +* fs, pipe: no trailing terminator in exact sized buffers (Andrius Bentkus) + +* style: rename buf to buffer and len to size for consistency (Andrius Bentkus) + +* test: fix test-spawn on MinGW32 (Luis Martinez de Bartolome) + +* win, pipe: fix assertion when destroying timer (Andrius Bentkus) + +* win, unix: add pipe_peername implementation (Andrius Bentkus) + + +2015.01.29, Version 0.10.33 (Stable), 7a2253d33ad8215a26c1b34f1952aee7242dd687 + +Changes since version 0.10.32: + +* linux: fix epoll_pwait() regression with < 2.6.19 (Ben Noordhuis) + +* test: back-port uv_loop_configure() test (Ben Noordhuis) + + +2015.01.15, Version 1.2.1 (Stable), 4ca78e989062a1099dc4b9ad182a98e8374134b1 + +Changes since version 1.2.0: + +* unix: remove unused dtrace file (Saúl Ibarra Corretgé) + +* test: skip TTY select test if /dev/tty can't be opened (Saúl Ibarra Corretgé) + +* doc: clarify the behavior of uv_tty_init (Saúl Ibarra Corretgé) + +* doc: clarify how uv_async_send behaves (Saúl Ibarra Corretgé) + +* build: make dist now generates a full tarball (Johan Bergström) + +* freebsd: make uv_exepath more resilient (Saúl Ibarra Corretgé) + +* unix: make setting the tty mode to the same value a no-op (Saúl Ibarra + Corretgé) + +* win,tcp: support uv_try_write (Bert Belder) + +* test: enable test-tcp-try-write on windows (Bert Belder) + +* win,tty: support uv_try_write (Bert Belder) + +* unix: set non-block mode in uv_{pipe,tcp,udp}_open (Ben Noordhuis) + + +2015.01.06, Version 1.2.0 (Stable), 09f25b13cd149c7981108fc1a75611daf1277f83 + +Changes since version 1.1.0: + +* linux: fix epoll_pwait() sigmask size calculation (Ben Noordhuis) + +* tty: implement binary I/O terminal mode (Yuri D'Elia) + +* test: fix spawn test with autotools build (Ben Noordhuis) + +* test: skip ipv6 tests when ipv6 is not supported (Ben Noordhuis) + +* common: move STATIC_ASSERT to uv-common.h (Alexey Melnichuk) + +* win/thread: store thread handle in a TLS slot (Alexey Melnichuk) + +* unix: fix ttl, multicast ttl and loop options on IPv6 (Saúl Ibarra Corretgé) + +* linux: fix support for preadv/pwritev-less kernels (Ben Noordhuis) + +* unix: make uv_exepath(size=0) return UV_EINVAL (Ben Noordhuis) + +* darwin: fix uv_exepath(smallbuf) UV_EPERM error (Ben Noordhuis) + +* openbsd: fix uv_exepath(smallbuf) UV_EINVAL error (Ben Noordhuis) + +* linux: fix uv_exepath(size=1) UV_EINVAL error (Ben Noordhuis) + +* sunos: preemptively fix uv_exepath(size=1) (Ben Noordhuis) + +* win: fix and clarify comments in winapi.h (Bert Belder) + +* win: make available NtQueryDirectoryFile (Bert Belder) + +* win: add definitions for directory information types (Bert Belder) + +* win: use NtQueryDirectoryFile to implement uv_fs_scandir (Bert Belder) + +* unix: don't unlink unix socket on bind error (Ben Noordhuis) + +* build: fix bad comment in autogen.sh (Ben Noordhuis) + +* build: add AC_PROG_LIBTOOL to configure.ac (Ben Noordhuis) + +* test: skip udp_options6 if there no IPv6 support (Saúl Ibarra Corretgé) + +* win: add definitions for MUI errors mingw lacks (Bert Belder) + +* build: enable warnings in autotools build (Ben Noordhuis) + +* build: remove -Wno-dollar-in-identifier-extension (Ben Noordhuis) + +* build: move flags from Makefile.am to configure.ac (Ben Noordhuis) + + +2015.01.06, Version 0.10.32 (Stable), 378de30c59aef5fdb6d130fa5cfcb0a68fce571c + +Changes since version 0.10.31: + +* linux: fix epoll_pwait() sigmask size calculation (Ben Noordhuis) + + +2014.12.25, Version 1.1.0 (Stable), 9572f3e74a167f59a8017e57ca3ebe91ffd88e18 + +Changes since version 1.0.2: + +* test: test that closing a poll handle doesn't corrupt the stack (Bert Belder) + +* win: fix compilation of tests (Marc Schlaich) + +* Revert "win: keep a reference to AFD_POLL_INFO in cancel poll" (Bert Belder) + +* win: avoid stack corruption when closing a poll handle (Bert Belder) + +* test: fix test-fs-file-loop on Windows (Bert Belder) + +* test: fix test-cwd-and-chdir (Bert Belder) + +* doc: indicate what version uv_loop_configure was added on (Saúl Ibarra + Corretgé) + +* doc: fix sphinx warning (Saúl Ibarra Corretgé) + +* test: skip spawn_setuid_setgid if we get EACCES (Saúl Ibarra Corretgé) + +* test: silence some Clang warnings (Saúl Ibarra Corretgé) + +* test: relax osx_select_many_fds (Saúl Ibarra Corretgé) + +* test: fix compilation warnings when building with Clang (Saúl Ibarra + Corretgé) + +* win: fix autotools build of tests (Luis Lavena) + +* gitignore: ignore Visual Studio files (Marc Schlaich) + +* win: set fallback message if FormatMessage fails (Marc Schlaich) + +* win: fall back to default language in uv_dlerror (Marc Schlaich) + +* test: improve compatibility for dlerror test (Marc Schlaich) + +* test: check dlerror is "no error" in no error case (Marc Schlaich) + +* unix: change uv_cwd not to return a trailing slash (Saúl Ibarra Corretgé) + +* test: fix cwd_and_chdir test on Unix (Saúl Ibarra Corretgé) + +* test: add uv_cwd output to platform_output test (Saúl Ibarra Corretgé) + +* build: fix dragonflybsd autotools build (John Marino) + +* win: scandir use 'ls' for formatting long strings (Kenneth Perry) + +* build: remove clang and gcc_version gyp defines (Ben Noordhuis) + +* unix, windows: don't treat uv_run_mode as a bitmask (Saúl Ibarra Corretgé) + +* unix, windows: fix UV_RUN_ONCE mode if progress was made (Saúl Ibarra + Corretgé) + + +2014.12.25, Version 0.10.31 (Stable), 4dbd27e2219069a6daa769fb37f98673b77b4261 + +Changes since version 0.10.30: + +* test: test that closing a poll handle doesn't corrupt the stack (Bert Belder) + +* win: fix compilation of tests (Marc Schlaich) + +* Revert "win: keep a reference to AFD_POLL_INFO in cancel poll" (Bert Belder) + +* win: avoid stack corruption when closing a poll handle (Bert Belder) + +* gitignore: ignore Visual Studio files (Marc Schlaich) + +* win: set fallback message if FormatMessage fails (Marc Schlaich) + +* win: fall back to default language in uv_dlerror (Marc Schlaich) + +* test: improve compatibility for dlerror test (Marc Schlaich) + +* test: check dlerror is "no error" in no error case (Marc Schlaich) + +* build: link against -pthread (Logan Rosen) + +* win: scandir use 'ls' for formatting long strings (Kenneth Perry) + + +2014.12.10, Version 1.0.2 (Stable), eec671f0059953505f9a3c9aeb7f9f31466dd7cd + +Changes since version 1.0.1: + +* linux: fix sigmask size arg in epoll_pwait() call (Ben Noordhuis) + +* linux: handle O_NONBLOCK != SOCK_NONBLOCK case (Helge Deller) + +* doc: fix spelling (Joey Geralnik) + +* unix, windows: fix typos in comments (Joey Geralnik) + +* test: canonicalize test runner path (Ben Noordhuis) + +* test: fix compilation warnings (Saúl Ibarra Corretgé) + +* test: skip tty test if detected width and height are 0 (Saúl Ibarra Corretgé) + +* doc: update README with IRC channel (Saúl Ibarra Corretgé) + +* Revert "unix: use cfmakeraw() for setting raw TTY mode" (Ben Noordhuis) + +* doc: document how to get result of uv_fs_mkdtemp (Tim Caswell) + +* unix: add flag for blocking SIGPROF during poll (Ben Noordhuis) + +* unix, windows: add uv_loop_configure() function (Ben Noordhuis) + +* win: keep a reference to AFD_POLL_INFO in cancel poll (Marc Schlaich) + +* test: raise fd limit for OSX select test (Saúl Ibarra Corretgé) + +* unix: remove overzealous assert in uv_read_stop (Saúl Ibarra Corretgé) + +* unix: reset the reading flag when a stream gets EOF (Saúl Ibarra Corretgé) + +* unix: stop reading if an error is produced (Saúl Ibarra Corretgé) + +* cleanup: remove all dead assignments (Maciej Małecki) + +* linux: return early if we have no interfaces (Maciej Małecki) + +* cleanup: remove a dead increment (Maciej Małecki) + + +2014.12.10, Version 0.10.30 (Stable), 5a63f5e9546dca482eeebc3054139b21f509f21f + +Changes since version 0.10.29: + +* linux: fix sigmask size arg in epoll_pwait() call (Ben Noordhuis) + +* linux: handle O_NONBLOCK != SOCK_NONBLOCK case (Helge Deller) + +* doc: update project links (Ben Noordhuis) + +* windows: fix compilation of tests (Marc Schlaich) + +* unix: add flag for blocking SIGPROF during poll (Ben Noordhuis) + +* unix, windows: add uv_loop_configure() function (Ben Noordhuis) + +* win: keep a reference to AFD_POLL_INFO in cancel poll (Marc Schlaich) + + +2014.11.27, Version 1.0.1 (Stable), 0a8e81374e861d425b56c45c8599595d848911d2 + +Changes since version 1.0.0: + +* readme: remove Rust from users (Elijah Andrews) + +* doc,build,include: update project links (Ben Noordhuis) + +* doc: fix typo: Strcutures -> Structures (Michael Ira Krufky) + +* unix: fix processing process handles queue (Saúl Ibarra Corretgé) + +* win: replace non-ansi characters in source file (Bert Belder) + + +2014.11.21, Version 1.0.0 (Stable), feb2a9e6947d892f449b2770c4090f7d8c88381b + +Changes since version 1.0.0-rc2: + +* doc: fix git/svn url for gyp repo in README (Emmanuel Odeke) + +* windows: fix fs_read with nbufs > 1 and offset (Unknown W. Brackets) + +* win: add missing IP_ADAPTER_UNICAST_ADDRESS_LH definition for MinGW + (huxingyi) + +* doc: mention homebrew in README (Mikhail Mukovnikov) + +* doc: add learnuv workshop to README (Thorsten Lorenz) + +* doc: fix parameter name in uv_fs_access (Saúl Ibarra Corretgé) + +* unix: use cfmakeraw() for setting raw TTY mode (Yuri D'Elia) + +* win: fix uv_thread_self() (Alexis Campailla) + +* build: add x32 support to gyp build (Ben Noordhuis) + +* build: remove dtrace probes (Ben Noordhuis) + +* doc: fix link in misc.rst (Manos Nikolaidis) + +* mailmap: remove duplicated entries (Saúl Ibarra Corretgé) + +* gyp: fix comment regarding version info location (Saúl Ibarra Corretgé) + + +2014.10.21, Version 1.0.0-rc2 (Pre-release) + +Changes since version 1.0.0-rc1: + +* build: add missing fixtures to distribution tarball (Rob Adams) + +* doc: update references to current stable branch (Zachary Newman) + +* fs: fix readdir on empty directory (Fedor Indutny) + +* fs: rename uv_fs_readdir to uv_fs_scandir (Saúl Ibarra Corretgé) + +* doc: document uv_alloc_cb (Saúl Ibarra Corretgé) + +* doc: add migration guide from version 0.10 (Saúl Ibarra Corretgé) + +* build: add DragonFly BSD support in autotools (Robin Hahling) + +* doc: document missing stream related structures (Saúl Ibarra Corretgé) + +* doc: clarify uv_loop_t.data field lifetime (Saúl Ibarra Corretgé) + +* doc: add documentation for missing functions and structures (Saúl Ibarra + Corretgé) + +* doc: fix punctuation and grammar in README (Jeff Widman) + +* windows: return libuv error codes in uv_poll_init() (cjihrig) + +* unix, windows: add uv_fs_access() (cjihrig) + +* windows: fix netmask detection (Alexis Campailla) + +* unix, windows: don't include null byte in uv_cwd size (Saúl Ibarra Corretgé) + +* unix, windows: add uv_thread_equal (Tomasz Kołodziejski) + +* windows: fix fs_write with nbufs > 1 and offset (Unknown W. Brackets) + + +2014.10.21, Version 0.10.29 (Stable), 2d728542d3790183417f8f122a110693cd85db14 + +Changes since version 0.10.28: + +* darwin: allocate enough space for select() hack (Fedor Indutny) + +* linux: try epoll_pwait if epoll_wait is missing (Michael Hudson-Doyle) + +* windows: map ERROR_INVALID_DRIVE to UV_ENOENT (Saúl Ibarra Corretgé) + + +2014.09.18, Version 1.0.0-rc1 (Unstable), 0c28bbf7b42882853d1799ab96ff68b07f7f8d49 + +Changes since version 0.11.29: + +* windows: improve timer precision (Alexis Campailla) + +* build, gyp: set xcode flags (Recep ASLANTAS) + +* ignore: include m4 files which are created manually (Recep ASLANTAS) + +* build: add m4 for feature/flag-testing (Recep ASLANTAS) + +* ignore: ignore Xcode project and workspace files (Recep ASLANTAS) + +* unix: fix warnings about dollar symbol usage in identifiers (Recep ASLANTAS) + +* unix: fix warnings when loading functions with dlsym (Recep ASLANTAS) + +* linux: try epoll_pwait if epoll_wait is missing (Michael Hudson-Doyle) + +* test: add test for closing and recreating default loop (Saúl Ibarra Corretgé) + +* windows: properly close the default loop (Saúl Ibarra Corretgé) + +* version: add ability to specify a version suffix (Saúl Ibarra Corretgé) + +* doc: add API documentation (Saúl Ibarra Corretgé) + +* test: don't close connection on write error (Trevor Norris) + +* windows: further simplify the code for timers (Saúl Ibarra Corretgé) + +* gyp: remove UNLIMITED_SELECT from dependent define (Fedor Indutny) + +* darwin: allocate enough space for select() hack (Fedor Indutny) + +* unix, windows: don't allow a NULL callback on timers (Saúl Ibarra Corretgé) + +* windows: simplify code in uv_timer_again (Saúl Ibarra Corretgé) + +* test: use less requests on tcp-write-queue-order (Saúl Ibarra Corretgé) + +* unix: stop child process watcher after last one exits (Saúl Ibarra Corretgé) + +* unix: simplify how process handle queue is managed (Saúl Ibarra Corretgé) + +* windows: remove duplicated field (mattn) + +* core: add a reserved field to uv_handle_t and uv_req_t (Saúl Ibarra Corretgé) + +* windows: fix buffer leak after failed udp send (Bert Belder) + +* windows: make sure sockets and handles are reset on close (Saúl Ibarra Corretgé) + +* unix, windows: add uv_fileno (Saúl Ibarra Corretgé) + +* build: use same CFLAGS in autotools build as in gyp (Saúl Ibarra Corretgé) + +* build: remove unneeded define in uv.gyp (Saúl Ibarra Corretgé) + +* test: fix watcher_cross_stop on Windows (Saúl Ibarra Corretgé) + +* unix, windows: move includes for EAI constants (Saúl Ibarra Corretgé) + +* unix: fix exposing EAI_* glibc-isms (Saúl Ibarra Corretgé) + +* unix: fix tcp write after bad connect freezing (Andrius Bentkus) + + +2014.08.20, Version 0.11.29 (Unstable), 35451fed830807095bbae8ef981af004a4b9259e + +Changes since version 0.11.28: + +* windows: make uv_read_stop immediately stop reading (Jameson Nash) + +* windows: fix uv__getaddrinfo_translate_error (Alexis Campailla) + +* netbsd: fix build (Saúl Ibarra Corretgé) + +* unix, windows: add uv_recv_buffer_size and uv_send_buffer_size (Andrius + Bentkus) + +* windows: add support for UNC paths on uv_spawn (Paul Goldsmith) + +* windows: replace use of inet_addr with uv_inet_pton (Saúl Ibarra Corretgé) + +* unix: replace some asserts with returning errors (Andrius Bentkus) + +* windows: use OpenBSD implementation for uv_fs_mkdtemp (Pavel Platto) + +* windows: fix GetNameInfoW error handling (Alexis Campailla) + +* fs: introduce uv_readdir_next() and report types (Fedor Indutny) + +* fs: extend reported types in uv_fs_readdir_next (Saúl Ibarra Corretgé) + +* unix: read on stream even when UV__POLLHUP set. (Julien Gilli) + + +2014.08.08, Version 0.11.28 (Unstable), fc9e2a0bc487b299c0cd3b2c9a23aeb554b5d8d1 + +Changes since version 0.11.27: + +* unix, windows: const-ify handle in uv_udp_getsockname (Rasmus Pedersen) + +* windows: use UV_ECANCELED for aborted TCP writes (Saúl Ibarra Corretgé) + +* windows: add more required environment variables (Jameson Nash) + +* windows: sort environment variables before calling CreateProcess (Jameson + Nash) + +* unix, windows: move uv_loop_close out of assert (John Firebaugh) + +* windows: fix buffer overflow on uv__getnameinfo_work() (lilohuang) + +* windows: add uv_backend_timeout (Jameson Nash) + +* test: disable tcp_close_accept on Windows (Saúl Ibarra Corretgé) + +* windows: read the PATH env var of the child (Alex Crichton) + +* include: avoid using C++ 'template' reserved word (Iñaki Baz Castillo) + +* include: fix version number (Saúl Ibarra Corretgé) + + +2014.07.32, Version 0.11.27 (Unstable), ffe24f955032d060968ea0289af365006afed55e + +Changes since version 0.11.26: + +* unix, windows: use the same threadpool implementation (Saúl Ibarra Corretgé) + +* unix: use struct sockaddr_storage for target UDP addr (Saúl Ibarra Corretgé) + +* doc: add documentation to uv_udp_start_recv (Andrius Bentkus) + +* common: use common uv__count_bufs code (Andrius Bentkus) + +* unix, win: add send_queue_size and send_queue_count to uv_udp_t (Andrius + Bentkus) + +* unix, win: add uv_udp_try_send (Andrius Bentkus) + +* unix: return UV_EAGAIN if uv_try_write cannot write any data (Saúl Ibarra + Corretgé) + +* windows: fix compatibility with cygwin pipes (Jameson Nash) + +* windows: count queued bytes even if request completed immediately (Saúl + Ibarra Corretgé) + +* windows: disable CRT debug handler on MinGW32 (Saúl Ibarra Corretgé) + +* windows: map ERROR_INVALID_DRIVE to UV_ENOENT (Saúl Ibarra Corretgé) + +* unix: try to write immediately in uv_udp_send (Saúl Ibarra Corretgé) + +* unix: remove incorrect assert (Saúl Ibarra Corretgé) + +* openbsd: avoid requiring privileges for uv_resident_set_memory (Aaron Bieber) + +* unix: guarantee write queue cb execution order in streams (Andrius Bentkus) + +* img: add logo files (Saúl Ibarra Corretgé) + +* aix: improve AIX compatibility (Andrew Low) + +* windows: return bind error immediately when implicitly binding (Saúl Ibarra + Corretgé) + +* windows: don't use atexit for cleaning up the threadpool (Saúl Ibarra + Corretgé) + +* windows: destroy work queue elements when colsing a loop (Saúl Ibarra + Corretgé) + +* unix, windows: add uv_fs_mkdtemp (Pavel Platto) + +* build: handle platforms without multiprocessing.synchronize (Saúl Ibarra + Corretgé) + +* windows: change GENERIC_ALL to GENERIC_WRITE in fs__create_junction (Tony + Kelman) + +* windows: relay TCP bind errors via ipc (Alexis Campailla) + + +2014.07.32, Version 0.10.28 (Stable), 9c14b616f5fb84bfd7d45707bab4bbb85894443e + +Changes since version 0.10.27: + +* windows: fix handling closed socket while poll handle is closing (Saúl Ibarra + Corretgé) + +* unix: return system error on EAI_SYSTEM (Saúl Ibarra Corretgé) + +* unix: fix bogus structure field name (Saúl Ibarra Corretgé) + +* darwin: invoke `mach_timebase_info` only once (Fedor Indutny) + + +2014.06.28, Version 0.11.26 (Unstable), 115281a1058c4034d5c5ccedacb667fe3f6327ea + +Changes since version 0.11.25: + +* windows: add VT100 codes ?25l and ?25h (JD Ballard) + +* windows: add invert ANSI (7 / 27) emulation (JD Ballard) + +* unix: fix handling error on UDP socket creation (Saúl Ibarra Corretgé) + +* unix, windows: getnameinfo implementation (Rasmus Pedersen) + +* heap: fix `heap_remove()` (Fedor Indutny) + +* unix, windows: fix parsing scoped IPv6 addresses (Saúl Ibarra Corretgé) + +* windows: fix handling closed socket while poll handle is closing (Saúl Ibarra + Corretgé) + +* thread: barrier functions (Ben Noordhuis) + +* windows: fix PYTHON environment variable usage (Jay Satiro) + +* unix, windows: return system error on EAI_SYSTEM (Saúl Ibarra Corretgé) + +* windows: fix handling closed socket while poll handle is closing (Saúl Ibarra + Corretgé) + +* unix: don't run i/o callbacks after prepare callbacks (Saúl Ibarra Corretgé) + +* windows: add tty unicode support for input (Peter Atashian) + +* header: introduce `uv_loop_size()` (Andrius Bentkus) + +* darwin: invoke `mach_timebase_info` only once (Fedor Indutny) + + +2014.05.02, Version 0.11.25 (Unstable), 2acd544cff7142e06aa3b09ec64b4a33dd9ab996 + +Changes since version 0.11.24: + +* osx: pass const handle pointer to uv___stream_fd (Chernyshev Viacheslav) + +* unix, windows: pass const handle ptr to uv_tcp_get*name (Chernyshev + Viacheslav) + +* common: pass const sockaddr ptr to uv_ip*_name (Chernyshev Viacheslav) + +* unix, windows: validate flags on uv_udp|tcp_bind (Saúl Ibarra Corretgé) + +* unix: handle case when addr is not initialized after recvmsg (Saúl Ibarra + Corretgé) + +* unix, windows: uv_now constness (Rasmus Pedersen) + + +2014.04.15, Version 0.11.24 (Unstable), ed948c29f6e8c290f79325a6f0bc9ef35bcde644 + +Changes since version 0.11.23: + +* linux: reduce file descriptor count of async pipe (Ben Noordhuis) + +* sunos: support IPv6 qualified link-local addresses (Saúl Ibarra Corretgé) + +* windows: fix opening of read-only stdin pipes (Alexis Campailla) + +* windows: Fix an infinite loop in uv_spawn (Alex Crichton) + +* windows: fix console signal handler refcount (李港平) + +* inet: allow scopeid in uv_inet_pton (Fedor Indutny) + + +2014.04.07, Version 0.11.23 (Unstable), e54de537efcacd593f36fcaaf8b4cb9e64313275 + +Changes since version 0.11.22: + +* fs: avoid using readv/writev where possible (Fedor Indutny) + +* mingw: fix build with autotools (Saúl Ibarra Corretgé) + +* bsd: support IPv6 qualified link-local addresses (Saúl Ibarra Corretgé) + +* unix: add UV_HANDLE_IPV6 flag to tcp and udp handles (Saúl Ibarra Corretgé) + +* unix, windows: do not set SO_REUSEADDR by default on udp (Saúl Ibarra + Corretgé) + +* windows: fix check in uv_tty_endgame() (Maks Naumov) + +* unix, windows: add IPv6 support for uv_udp_multicast_interface (Saúl Ibarra + Corretgé) + +* unix: fallback to blocking writes if reopening a tty fails (Saúl Ibarra + Corretgé) + +* unix: fix handling uv__open_cloexec failure (Saúl Ibarra Corretgé) + +* unix, windows: add IPv6 support to uv_udp_set_membership (Saúl Ibarra + Corretgé) + +* unix, windows: removed unused status parameter (Saúl Ibarra Corretgé) + +* android: add support of ifaddrs in android (Javier Hernández) + +* build: fix SunOS and AIX build with autotools (Saúl Ibarra Corretgé) + +* build: freebsd link with libelf if dtrace enabled (Saúl Ibarra Corretgé) + +* stream: do not leak `alloc_cb` buffers on error (Fedor Indutny) + +* unix: fix setting written size on uv_wd (Saúl Ibarra Corretgé) + + +2014.03.11, Version 0.11.22 (Unstable), cd0c19b1d3c56acf0ade7687006e12e75fbda36d + +Changes since version 0.11.21: + +* unix, windows: map ERANGE errno (Saúl Ibarra Corretgé) + +* unix, windows: make uv_cwd be consistent with uv_exepath (Saúl Ibarra + Corretgé) + +* process: remove debug perror() prints (Fedor Indutny) + +* windows: fall back for volume info query (Isaiah Norton) + +* pipe: allow queueing pending handles (Fedor Indutny) + +* windows: fix winsock status codes for address errors (Raul Martins) + +* windows: Remove unused variable from uv__pipe_insert_pending_socket (David + Capello) + +* unix: workaround broken pthread_sigmask on Android (Paul Tan) + +* error: add ENXIO for O_NONBLOCK FIFO open() (Fedor Indutny) + +* freebsd: use accept4, introduced in version 10 (Saúl Ibarra Corretgé) + +* windows: fix warnings of MinGW -Wall -O3 (StarWing) + +* openbsd, osx: fix compilation warning on scandir (Saúl Ibarra Corretgé) + +* linux: always deregister closing fds from epoll (Geoffry Song) + +* unix: reopen tty as /dev/tty (Saúl Ibarra Corretgé) + +* kqueue: invalidate fd in uv_fs_event_t (Fedor Indutny) + + +2014.02.28, Version 0.11.21 (Unstable), 3ef958158ae1019e027ebaa93114160099db5206 + +Changes since version 0.11.20: + +* unix: fix uv_fs_write when using an empty buffer (Saúl Ibarra Corretgé) + +* unix, windows: add assertion in uv_loop_delete (Saúl Ibarra Corretgé) + + +2014.02.27, Version 0.11.20 (Unstable), 88355e081b51c69ee1e2b6b0015a4e3d38bd0579 + +Changes since version 0.11.19: + +* stream: start thread after assignments (Oguz Bastemur) + +* fs: `uv__cloexec()` opened fd (Fedor Indutny) + +* gyp: qualify `library` variable (Fedor Indutny) + +* unix, win: add uv_udp_set_multicast_interface() (Austin Foxley) + +* unix: fix uv_tcp_nodelay return value in case of error (Saúl Ibarra Corretgé) + +* unix: call setgoups before calling setuid/setgid (Saúl Ibarra Corretgé) + +* include: mark close_cb field as private (Saúl Ibarra Corretgé) + +* unix, windows: map EFBIG errno (Saúl Ibarra Corretgé) + +* unix: correct error when calling uv_shutdown twice (Keno Fischer) + +* windows: fix building on MinGW (Alex Crichton) + +* windows: always initialize uv_process_t (Alex Crichton) + +* include: expose libuv version in header files (Saúl Ibarra Corretgé) + +* fs: vectored IO API for filesystem read/write (Benjamin Saunders) + +* windows: freeze in uv_tcp_endgame (Alexis Campailla) + +* sunos: handle rearm errors (Fedor Indutny) + +* unix: use a heap for timers (Ben Noordhuis) + +* linux: always deregister closing fds from epoll (Geoffry Song) + +* linux: include grp.h for setgroups() (William Light) + +* unix, windows: add uv_loop_init and uv_loop_close (Saúl Ibarra Corretgé) + +* unix, windows: add uv_getrusage() function (Oleg Efimov) + +* win: minor error handle fix to uv_pipe_write_impl (Rasmus Pedersen) + +* heap: fix node removal (Keno Fischer) + +* win: fix C99/C++ comment (Rasmus Pedersen) + +* fs: vectored IO API for filesystem read/write (Benjamin Saunders) + +* unix, windows: add uv_pipe_getsockname (Saúl Ibarra Corretgé) + +* unix, windows: map ENOPROTOOPT errno (Saúl Ibarra Corretgé) + +* errno: add ETXTBSY (Fedor Indutny) + +* fsevent: rename filename field to path (Saúl Ibarra Corretgé) + +* unix, windows: add uv_fs_event_getpath (Saúl Ibarra Corretgé) + +* unix, windows: add uv_fs_poll_getpath (Saúl Ibarra Corretgé) + +* unix, windows: map ERANGE errno (Saúl Ibarra Corretgé) + +* unix, windows: set required size on UV_ENOBUFS (Saúl Ibarra Corretgé) + +* unix, windows: clarify what uv_stream_set_blocking does (Saúl Ibarra + Corretgé) + +* fs: use preadv on Linux if available (Brian White) + + +2014.01.30, Version 0.11.19 (Unstable), 336a1825309744f920230ec3e427e78571772347 + +Changes since version 0.11.18: + +* linux: move sscanf() out of the assert() (Trevor Norris) + +* linux: fix C99/C++ comment (Fedor Indutny) + + +2014.05.02, Version 0.10.27 (Stable), 6e24ce23b1e7576059f85a608eca13b766458a01 + +Changes since version 0.10.26: + +* windows: fix console signal handler refcount (Saúl Ibarra Corretgé) + +* win: always leave crit section in get_proc_title (Fedor Indutny) + + +2014.04.07, Version 0.10.26 (Stable), d864907611c25ec986c5e77d4d6d6dee88f26926 + +Changes since version 0.10.25: + +* process: don't close stdio fds during spawn (Tonis Tiigi) + +* build, windows: do not fail on Windows SDK Prompt (Marc Schlaich) + +* build, windows: fix x64 configuration issue (Marc Schlaich) + +* win: fix buffer leak on error in pipe.c (Fedor Indutny) + +* kqueue: invalidate fd in uv_fs_event_t (Fedor Indutny) + +* linux: always deregister closing fds from epoll (Geoffry Song) + +* error: add ENXIO for O_NONBLOCK FIFO open() (Fedor Indutny) + + +2014.02.19, Version 0.10.25 (Stable), d778dc588507588b12b9f9d2905078db542ed751 + +Changes since version 0.10.24: + +* stream: start thread after assignments (Oguz Bastemur) + +* unix: correct error when calling uv_shutdown twice (Saúl Ibarra Corretgé) + +2014.01.30, Version 0.10.24 (Stable), aecd296b6bce9b40f06a61c5c94e43d45ac7308a + +Changes since version 0.10.23: + +* linux: move sscanf() out of the assert() (Trevor Norris) + +* linux: fix C99/C++ comment (Fedor Indutny) + + +2014.01.23, Version 0.11.18 (Unstable), d47962e9d93d4a55a9984623feaf546406c9cdbb + +Changes since version 0.11.17: + +* osx: Fix a possible segfault in uv__io_poll (Alex Crichton) + +* windows: improved handling of invalid FDs (Alexis Campailla) + +* doc: adding ARCHS flag to OS X build command (Nathan Sweet) + +* tcp: reveal bind-time errors before listen (Alexis Campailla) + +* tcp: uv_tcp_dualstack() (Fedor Indutny) + +* linux: relax assumption on /proc/stat parsing (Luca Bruno) + +* openbsd: fix obvious bug in uv_cpu_info (Fedor Indutny) + +* process: close stdio after dup2'ing it (Fedor Indutny) + +* linux: move sscanf() out of the assert() (Trevor Norris) + + +2014.01.23, Version 0.10.23 (Stable), dbd218e699fec8be311d85e4788be9e28ae884f8 + +Changes since version 0.10.22: + +* linux: relax assumption on /proc/stat parsing (Luca Bruno) + +* openbsd: fix obvious bug in uv_cpu_info (Fedor Indutny) + +* process: close stdio after dup2'ing it (Fedor Indutny) + + +2014.01.08, Version 0.10.22 (Stable), f526c90eeff271d9323a9107b9a64a4671fd3103 + +Changes since version 0.10.21: + +* windows: avoid assertion failure when pipe server is closed (Bert Belder) + + +2013.12.32, Version 0.11.17 (Unstable), 589c224d4c2e79fec65db01d361948f1e4976858 + +Changes since version 0.11.16: + +* stream: allow multiple buffers for uv_try_write (Fedor Indutny) + +* unix: fix a possible memory leak in uv_fs_readdir (Alex Crichton) + +* unix, windows: add uv_loop_alive() function (Sam Roberts) + +* windows: avoid assertion failure when pipe server is closed (Bert Belder) + +* osx: Fix a possible segfault in uv__io_poll (Alex Crichton) + +* stream: fix uv__stream_osx_select (Fedor Indutny) + + +2013.12.14, Version 0.11.16 (Unstable), ae0ed8c49d0d313c935c22077511148b6e8408a4 + +Changes since version 0.11.15: + +* fsevents: remove kFSEventStreamCreateFlagNoDefer polyfill (ci-innoq) + +* libuv: add more getaddrinfo errors (Steven Kabbes) + +* unix: fix accept() EMFILE error handling (Ben Noordhuis) + +* linux: fix up SO_REUSEPORT back-port (Ben Noordhuis) + +* fsevents: fix subfolder check (Fedor Indutny) + +* fsevents: fix invalid memory access (huxingyi) + +* windows/timer: fix uv_hrtime discontinuity (Bert Belder) + +* unix: fix various memory leaks and undef behavior (Fedor Indutny) + +* unix, windows: always update loop time (Saúl Ibarra Corretgé) + +* windows: translate system errors in uv_spawn (Alexis Campailla) + +* windows: uv_spawn code refactor (Alexis Campailla) + +* unix, windows: detect errors in uv_ip4/6_addr (Yorkie) + +* stream: introduce uv_try_write(...) (Fedor Indutny) + + +2013.12.13, Version 0.10.20 (Stable), 04141464dd0fba90ace9aa6f7003ce139b888a40 + +Changes since version 0.10.19: + +* linux: fix up SO_REUSEPORT back-port (Ben Noordhuis) + +* fs-event: fix invalid memory access (huxingyi) + + +2013.11.21, Version 0.11.15 (Unstable), bfe645ed7e99ca5670d9279ad472b604c129d2e5 + +Changes since version 0.11.14: + +* fsevents: report errors to user (Fedor Indutny) + +* include: UV_FS_EVENT_RECURSIVE is a flag (Fedor Indutny) + +* linux: use CLOCK_MONOTONIC_COARSE if available (Ben Noordhuis) + +* build: make systemtap probes work with gyp build (Ben Noordhuis) + +* unix: update events from pevents between polls (Fedor Indutny) + +* fsevents: support japaneese characters in path (Chris Bank) + +* linux: don't turn on SO_REUSEPORT socket option (Ben Noordhuis) + +* queue: strengthen type checks (Ben Noordhuis) + +* include: remove uv_strlcat() and uv_strlcpy() (Ben Noordhuis) + +* build: fix windows smp build with gyp (Geert Jansen) + +* unix: return exec errors from uv_spawn, not async (Alex Crichton) + +* fsevents: use native character encoding file paths (Ben Noordhuis) + +* linux: handle EPOLLHUP without EPOLLIN/EPOLLOUT (Ben Noordhuis) + +* windows: use _snwprintf(), not swprintf() (Ben Noordhuis) + +* fsevents: use FlagNoDefer for FSEventStreamCreate (Fedor Indutny) + +* unix: fix reopened fd bug (Fedor Indutny) + +* core: fix fake watcher list and count preservation (Fedor Indutny) + +* unix: set close-on-exec flag on received fds (Ben Noordhuis) + +* netbsd, openbsd: enable futimes() wrapper (Ben Noordhuis) + +* unix: nicer error message when kqueue() fails (Ben Noordhuis) + +* samples: add socks5 proxy sample application (Ben Noordhuis) + + +2013.11.13, Version 0.10.19 (Stable), 33959f7524090b8d2c6c41e2400ca77e31755059 + +Changes since version 0.10.18: + +* darwin: avoid calling GetCurrentProcess (Fedor Indutny) + +* unix: update events from pevents between polls (Fedor Indutny) + +* fsevents: support japaneese characters in path (Chris Bank) + +* linux: don't turn on SO_REUSEPORT socket option (Ben Noordhuis) + +* build: fix windows smp build with gyp (Geert Jansen) + +* linux: handle EPOLLHUP without EPOLLIN/EPOLLOUT (Ben Noordhuis) + +* unix: fix reopened fd bug (Fedor Indutny) + +* core: fix fake watcher list and count preservation (Fedor Indutny) + + +2013.10.30, Version 0.11.14 (Unstable), d7a6482f45c1b4eb4a853dbe1a9ce8090a35633a + +Changes since version 0.11.13: + +* darwin: create fsevents thread on demand (Ben Noordhuis) + +* fsevents: FSEvents is most likely not thread-safe (Fedor Indutny) + +* fsevents: use shared FSEventStream (Fedor Indutny) + +* windows: make uv_fs_chmod() report errors correctly (Bert Belder) + +* windows: make uv_shutdown() for write-only pipes work (Bert Belder) + +* windows/fs: wrap multi-statement macros in do..while block (Bert Belder) + +* windows/fs: make uv_fs_open() report EINVAL correctly (Bert Belder) + +* windows/fs: handle _open_osfhandle() failure correctly (Bert Belder) + +* windows/fs: wrap multi-statement macros in do..while block (Bert Belder) + +* windows/fs: make uv_fs_open() report EINVAL correctly (Bert Belder) + +* windows/fs: handle _open_osfhandle() failure correctly (Bert Belder) + +* build: clarify instructions for Windows (Brian Kaisner) + +* build: remove GCC_WARN_ABOUT_MISSING_NEWLINE (Ben Noordhuis) + +* darwin: fix 10.6 build error in fsevents.c (Ben Noordhuis) + +* windows: run close callbacks after polling for i/o (Saúl Ibarra Corretgé) + +* include: clarify uv_tcp_bind() behavior (Ben Noordhuis) + +* include: clean up includes in uv.h (Ben Noordhuis) + +* include: remove UV_IO_PRIVATE_FIELDS macro (Ben Noordhuis) + +* include: fix typo in comment in uv.h (Ben Noordhuis) + +* include: update uv_is_active() documentation (Ben Noordhuis) + +* include: make uv_process_options_t.cwd const (Ben Noordhuis) + +* unix: wrap long lines at 80 columns (Ben Noordhuis) + +* unix, windows: make uv_is_*() always return 0 or 1 (Ben Noordhuis) + +* bench: measure total/init/dispatch/cleanup times (Ben Noordhuis) + +* build: use -pthread on sunos (Timothy J. Fontaine) + +* windows: remove duplicate check in stream.c (Ben Noordhuis) + +* unix: sanity-check fds before closing (Ben Noordhuis) + +* unix: remove uv__pipe_accept() (Ben Noordhuis) + +* unix: fix uv_spawn() NULL pointer deref on ENOMEM (Ben Noordhuis) + +* unix: don't close inherited fds on uv_spawn() fail (Ben Noordhuis) + +* unix: revert recent FSEvent changes (Ben Noordhuis) + +* fsevents: fix clever rescheduling (Fedor Indutny) + +* linux: ignore fractional time in uv_uptime() (Ben Noordhuis) + +* unix: fix SIGCHLD waitpid() race in process.c (Ben Noordhuis) + +* unix, windows: add uv_fs_event_start/stop functions (Saúl Ibarra Corretgé) + +* unix: fix non-synchronized access in signal.c (Ben Noordhuis) + +* unix: add atomic-ops.h (Ben Noordhuis) + +* unix: add spinlock.h (Ben Noordhuis) + +* unix: clean up uv_tty_set_mode() a little (Ben Noordhuis) + +* unix: make uv_tty_reset_mode() async signal-safe (Ben Noordhuis) + +* include: add E2BIG status code mapping (Ben Noordhuis) + +* windows: fix duplicate case build error (Ben Noordhuis) + +* windows: remove unneeded check (Saúl Ibarra Corretgé) + +* include: document pipe path truncation behavior (Ben Noordhuis) + +* fsevents: increase stack size for OSX 10.9 (Fedor Indutny) + +* windows: _snprintf expected wrong parameter type in string (Maks Naumov) + +* windows: "else" keyword is missing (Maks Naumov) + +* windows: incorrect check for SOCKET_ERROR (Maks Naumov) + +* windows: add stdlib.h to satisfy reference to abort (Sean Farrell) + +* build: fix check target for mingw (Sean Farrell) + +* unix: move uv_shutdown() assertion (Keno Fischer) + +* darwin: avoid calling GetCurrentProcess (Fedor Indutny) + + +2013.10.19, Version 0.10.18 (Stable), 9ec52963b585e822e87bdc5de28d6143aff0d2e5 + +Changes since version 0.10.17: + +* unix: fix uv_spawn() NULL pointer deref on ENOMEM (Ben Noordhuis) + +* unix: don't close inherited fds on uv_spawn() fail (Ben Noordhuis) + +* unix: revert recent FSEvent changes (Ben Noordhuis) + +* unix: fix non-synchronized access in signal.c (Ben Noordhuis) + + +2013.09.25, Version 0.10.17 (Stable), 9670e0a93540c2f0d86c84a375f2303383c11e7e + +Changes since version 0.10.16: + +* build: remove GCC_WARN_ABOUT_MISSING_NEWLINE (Ben Noordhuis) + +* darwin: fix 10.6 build error in fsevents.c (Ben Noordhuis) + + +2013.09.06, Version 0.10.16 (Stable), 2bce230d81f4853a23662cbeb26fe98010b1084b + +Changes since version 0.10.15: + +* windows: make uv_shutdown() for write-only pipes work (Bert Belder) + +* windows: make uv_fs_open() report EINVAL when invalid arguments are passed + (Bert Belder) + +* windows: make uv_fs_open() report _open_osfhandle() failure correctly (Bert + Belder) + +* windows: make uv_fs_chmod() report errors correctly (Bert Belder) + +* windows: wrap multi-statement macros in do..while block (Bert Belder) + + +2013.09.05, Version 0.11.13 (Unstable), f5b6db6c1d7f93d28281207fd47c3841c9a9792e + +Changes since version 0.11.12: + +* unix: define _GNU_SOURCE, exposes glibc-isms (Ben Noordhuis) + +* windows: check for nonconforming swprintf arguments (Brent Cook) + +* build: include internal headers in source list (Brent Cook) + +* include: merge uv_tcp_bind and uv_tcp_bind6 (Ben Noordhuis) + +* include: merge uv_tcp_connect and uv_tcp_connect6 (Ben Noordhuis) + +* include: merge uv_udp_bind and uv_udp_bind6 (Ben Noordhuis) + +* include: merge uv_udp_send and uv_udp_send6 (Ben Noordhuis) + + +2013.09.03, Version 0.11.12 (Unstable), 82d01d5f6780d178f5176a01425ec297583c0811 + +Changes since version 0.11.11: + +* test: fix epoll_wait() usage in test-embed.c (Ben Noordhuis) + +* include: uv_alloc_cb now takes uv_buf_t* (Ben Noordhuis) + +* include: uv_read{2}_cb now takes const uv_buf_t* (Ben Noordhuis) + +* include: uv_ip[46]_addr now takes sockaddr_in* (Ben Noordhuis) + +* include: uv_tcp_bind{6} now takes sockaddr_in* (Ben Noordhuis) + +* include: uv_tcp_connect{6} now takes sockaddr_in* (Ben Noordhuis) + +* include: uv_udp_recv_cb now takes const uv_buf_t* (Ben Noordhuis) + +* include: uv_udp_bind{6} now takes sockaddr_in* (Ben Noordhuis) + +* include: uv_udp_send{6} now takes sockaddr_in* (Ben Noordhuis) + +* include: uv_spawn takes const uv_process_options_t* (Ben Noordhuis) + +* include: make uv_write{2} const correct (Ben Noordhuis) + +* windows: fix flags assignment in uv_fs_readdir() (Ben Noordhuis) + +* windows: fix stray comments (Ben Noordhuis) + +* windows: remove unused is_path_dir() function (Ben Noordhuis) + + +2013.08.30, Version 0.11.11 (Unstable), ba876d53539ed0427c52039012419cd9374c6f0d + +Changes since version 0.11.10: + +* unix, windows: add thread-local storage API (Ben Noordhuis) + +* linux: don't turn on SO_REUSEPORT socket option (Ben Noordhuis) + +* darwin: fix 10.6 build error in fsevents.c (Ben Noordhuis) + +* windows: make uv_shutdown() for write-only pipes work (Bert Belder) + +* include: update uv_udp_open() / uv_udp_bind() docs (Ben Noordhuis) + +* unix: req queue must be empty when destroying loop (Ben Noordhuis) + +* unix: move loop functions from core.c to loop.c (Ben Noordhuis) + +* darwin: remove CoreFoundation dependency (Ben Noordhuis) + +* windows: make autotools build system work with mingw (Keno Fischer) + +* windows: fix mingw build (Alex Crichton) + +* windows: tweak Makefile.mingw for easier usage (Alex Crichton) + +* build: remove _GNU_SOURCE macro definition (Ben Noordhuis) + + +2013.08.25, Version 0.11.10 (Unstable), 742dadcb7154cc7bb89c0c228a223b767a36cf0d + +* windows: Re-implement uv_fs_stat. The st_ctime field now contains the change + time, not the creation time, like on unix systems. st_dev, st_ino, st_blocks + and st_blksize are now also filled out. (Bert Belder) + +* linux: fix setsockopt(SO_REUSEPORT) error handling (Ben Noordhuis) + +* windows: report uv_process_t exit code correctly (Bert Belder) + +* windows: make uv_fs_chmod() report errors correctly (Bert Belder) + +* windows: make some more NT apis available for libuv's internal use (Bert + Belder) + +* windows: squelch some compiler warnings (Bert Belder) + + +2013.08.24, Version 0.11.9 (Unstable), a2d29b5b068cbac93dc16138fb30a74e2669daad + +Changes since version 0.11.8: + +* fsevents: share FSEventStream between multiple FS watchers, which removes a + limit on the maximum number of file watchers that can be created on OS X. + (Fedor Indutny) + +* process: the `exit_status` parameter for a uv_process_t's exit callback now + is an int64_t, and no longer an int. (Bert Belder) + +* process: make uv_spawn() return some types of errors immediately on windows, + instead of passing the error code the the exit callback. This brings it on + par with libuv's behavior on unix. (Bert Belder) + + +2013.08.24, Version 0.10.15 (Stable), 221078a8fdd9b853c6b557b3d9a5dd744b4fdd6b + +Changes since version 0.10.14: + +* fsevents: create FSEvents thread on demand (Ben Noordhuis) + +* fsevents: use a single thread for interacting with FSEvents, because it's not + thread-safe. (Fedor Indutny) + +* fsevents: share FSEventStream between multiple FS watchers, which removes a + limit on the maximum number of file watchers that can be created on OS X. + (Fedor Indutny) + + +2013.08.22, Version 0.11.8 (Unstable), a5260462db80ab0deab6b9e6a8991dd8f5a9a2f8 + +Changes since version 0.11.7: + +* unix: fix missing return value warning in stream.c (Ben Noordhuis) + +* build: serial-tests was added in automake v1.12 (Ben Noordhuis) + +* windows: fix uninitialized local variable warning (Ben Noordhuis) + +* windows: fix missing return value warning (Ben Noordhuis) + +* build: fix string comparisons in autogen.sh (Ben Noordhuis) + +* windows: move INLINE macro, remove UNUSED (Ben Noordhuis) + +* unix: clean up __attribute__((quux)) usage (Ben Noordhuis) + +* sunos: remove futimes() macro (Ben Noordhuis) + +* unix: fix uv__signal_unlock() prototype (Ben Noordhuis) + +* unix, windows: allow NULL async callback (Ben Noordhuis) + +* build: apply dtrace -G to all object files (Timothy J. Fontaine) + +* darwin: fix indentation in uv__hrtime() (Ben Noordhuis) + +* darwin: create fsevents thread on demand (Ben Noordhuis) + +* darwin: reduce fsevents thread stack size (Ben Noordhuis) + +* darwin: call pthread_setname_np() if available (Ben Noordhuis) + +* build: fix automake serial-tests check again (Ben Noordhuis) + +* unix: retry waitpid() on EINTR (Ben Noordhuis) + +* darwin: fix ios build error (Ben Noordhuis) + +* darwin: fix ios compiler warning (Ben Noordhuis) + +* test: simplify test-ip6-addr.c (Ben Noordhuis) + +* unix, windows: fix ipv6 link-local address parsing (Ben Noordhuis) + +* fsevents: FSEvents is most likely not thread-safe (Fedor Indutny) + +* windows: omit stdint.h, fix msvc 2008 build error (Ben Noordhuis) + + +2013.08.22, Version 0.10.14 (Stable), 15d64132151c18b26346afa892444b95e2addad0 + +Changes since version 0.10.13: + +* unix: retry waitpid() on EINTR (Ben Noordhuis) + + +2013.08.07, Version 0.11.7 (Unstable), 3cad361f8776f70941b39d65bd9426bcb1aa817b + +Changes since version 0.11.6: + +* unix, windows: fix uv_fs_chown() function prototype (Ben Noordhuis) + +* unix, windows: remove unused variables (Brian White) + +* test: fix signed/unsigned comparison warnings (Ben Noordhuis) + +* build: dtrace shouldn't break out of tree builds (Timothy J. Fontaine) + +* unix, windows: don't read/recv if buf.len==0 (Ben Noordhuis) + +* build: add mingw makefile (Ben Noordhuis) + +* unix, windows: add MAC to uv_interface_addresses() (Brian White) + +* build: enable AM_INIT_AUTOMAKE([subdir-objects]) (Ben Noordhuis) + +* unix, windows: make buf arg to uv_fs_write const (Ben Noordhuis) + +* sunos: fix build breakage introduced in e3a657c (Ben Noordhuis) + +* aix: fix build breakage introduced in 3ee4d3f (Ben Noordhuis) + +* windows: fix mingw32 build, define JOB_OBJECT_XXX (Yasuhiro Matsumoto) + +* windows: fix mingw32 build, include limits.h (Yasuhiro Matsumoto) + +* test: replace sprintf() with snprintf() (Ben Noordhuis) + +* test: replace strcpy() with strncpy() (Ben Noordhuis) + +* openbsd: fix uv_ip6_addr() unused variable warnings (Ben Noordhuis) + +* openbsd: fix dlerror() const correctness warning (Ben Noordhuis) + +* openbsd: fix uv_fs_sendfile() unused variable warnings (Ben Noordhuis) + +* build: disable parallel automake tests (Ben Noordhuis) + +* test: add windows-only snprintf() function (Ben Noordhuis) + +* build: add automake serial-tests version check (Ben Noordhuis) + + +2013.07.26, Version 0.10.13 (Stable), 381312e1fe6fecbabc943ccd56f0e7d114b3d064 + +Changes since version 0.10.12: + +* unix, windows: fix uv_fs_chown() function prototype (Ben Noordhuis) + + +2013.07.21, Version 0.11.6 (Unstable), 6645b93273e0553d23823c576573b82b129bf28c + +Changes since version 0.11.5: + +* test: open stdout fd in write-only mode (Ben Noordhuis) + +* windows: uv_spawn shouldn't reject reparse points (Bert Belder) + +* windows: use WSAGetLastError(), not errno (Ben Noordhuis) + +* build: darwin: disable -fstrict-aliasing warnings (Ben Noordhuis) + +* test: fix signed/unsigned compiler warning (Ben Noordhuis) + +* test: add 'start timer from check handle' test (Ben Noordhuis) + +* build: `all` now builds static and dynamic lib (Ben Noordhuis) + +* unix, windows: add extra fields to uv_stat_t (Saúl Ibarra Corretgé) + +* build: add install target to the makefile (Navaneeth Kedaram Nambiathan) + +* build: switch to autotools (Ben Noordhuis) + +* build: use AM_PROG_AR conditionally (Ben Noordhuis) + +* test: fix fs_fstat test on sunos (Ben Noordhuis) + +* test: fix fs_chown when running as root (Ben Noordhuis) + +* test: fix spawn_setgid_fails and spawn_setuid_fails (Ben Noordhuis) + +* build: use AM_SILENT_RULES conditionally (Ben Noordhuis) + +* build: add DTrace detection for autotools (Timothy J. Fontaine) + +* linux,darwin,win: link-local IPv6 addresses (Miroslav Bajtoš) + +* unix: fix build when !defined(PTHREAD_MUTEX_ERRORCHECK) (Ben Noordhuis) + +* unix, windows: return error codes directly (Ben Noordhuis) + + +2013.07.10, Version 0.10.12 (Stable), 58a46221bba726746887a661a9f36fe9ff204209 + +Changes since version 0.10.11: + +* linux: add support for MIPS (Andrei Sedoi) + +* windows: uv_spawn shouldn't reject reparse points (Bert Belder) + +* windows: use WSAGetLastError(), not errno (Ben Noordhuis) + +* build: darwin: disable -fstrict-aliasing warnings (Ben Noordhuis) + +* build: `all` now builds static and dynamic lib (Ben Noordhuis) + +* unix: fix build when !defined(PTHREAD_MUTEX_ERRORCHECK) (Ben Noordhuis) + + +2013.06.27, Version 0.11.5 (Unstable), e3c63ff1627a14e96f54c1c62b0d68b446d8425b + +Changes since version 0.11.4: + +* build: remove CSTDFLAG, use only CFLAGS (Ben Noordhuis) + +* unix: support for android builds (Linus Mårtensson) + +* unix: avoid extra read, short-circuit on POLLHUP (Ben Noordhuis) + +* uv: support android libuv standalone build (Linus Mårtensson) + +* src: make queue.h c++ compatible (Ben Noordhuis) + +* unix: s/ngx-queue.h/queue.h/ in checksparse.sh (Ben Noordhuis) + +* unix: unconditionally stop handle on close (Ben Noordhuis) + +* freebsd: don't enable dtrace if it's not available (Brian White) + +* build: make HAVE_DTRACE=0 should disable dtrace (Timothy J. Fontaine) + +* unix: remove overzealous assert (Ben Noordhuis) + +* unix: remove unused function uv_fatal_error() (Ben Noordhuis) + +* unix, windows: clean up uv_thread_create() (Ben Noordhuis) + +* queue: fix pointer truncation on LLP64 platforms (Bert Belder) + +* build: set OS=="android" for android builds (Linus Mårtensson) + +* windows: don't use uppercase in include filename (Ben Noordhuis) + +* stream: add an API to make streams do blocking writes (Henry Rawas) + +* windows: use WSAGetLastError(), not errno (Ben Noordhuis) + + +2013.06.13, Version 0.10.11 (Stable), c3b75406a66a10222a589cb173e8f469e9665c7e + +Changes since version 0.10.10: + +* unix: unconditionally stop handle on close (Ben Noordhuis) + +* freebsd: don't enable dtrace if it's not available (Brian White) + +* build: make HAVE_DTRACE=0 should disable dtrace (Timothy J. Fontaine) + +* unix: remove overzealous assert (Ben Noordhuis) + +* unix: clear UV_STREAM_SHUTTING after shutdown() (Ben Noordhuis) + +* unix: fix busy loop, write if POLLERR or POLLHUP (Ben Noordhuis) + + +2013.06.05, Version 0.10.10 (Stable), 0d95a88bd35fce93863c57a460be613aea34d2c5 + +Changes since version 0.10.9: + +* include: document uv_update_time() and uv_now() (Ben Noordhuis) + +* linux: fix cpu model parsing on newer arm kernels (Ben Noordhuis) + +* linux: fix a memory leak in uv_cpu_info() error path (Ben Noordhuis) + +* linux: don't ignore out-of-memory errors in uv_cpu_info() (Ben Noordhuis) + +* unix, windows: move uv_now() to uv-common.c (Ben Noordhuis) + +* test: fix a compilation problem in test-osx-select.c that was caused by the + use of c-style comments (Bert Belder) + +* darwin: use uv_fs_sendfile() use the sendfile api correctly (Wynn Wilkes) + + +2013.05.30, Version 0.11.4 (Unstable), e43e5b3d954a0989db5588aa110e1fe4fe6e0219 + +Changes since version 0.11.3: + +* windows: make uv_spawn not fail when the libuv embedding application is run + under external job control (Bert Belder) + +* darwin: assume CFRunLoopStop() isn't thread-safe, fixing a race condition + when stopping the 'stdin select hack' thread (Fedor Indutny) + +* win: fix UV_EALREADY not being reported correctly to the libuv user in some + cases (Bert Belder) + +* darwin: make the uv__cf_loop_runner and uv__cf_loop_cb functions static (Ben + Noordhuis) + +* darwin: task_info() cannot fail (Ben Noordhuis) + +* unix: add error mapping for ENETDOWN (Ben Noordhuis) + +* unix: implicitly signal write errors to the libuv user (Ben Noordhuis) + +* unix: fix assertion error on signal pipe overflow (Bert Belder) + +* unix: turn off POLLOUT after stream connect (Ben Noordhuis) + +* unix: fix stream refcounting buglet (Ben Noordhuis) + +* unix: remove assert statements that are no longer correct (Ben Noordhuis) + +* unix: appease warning about non-standard `inline` (Sean Silva) + +* unix: add uv__is_closing() macro (Ben Noordhuis) + +* unix: stop stream POLLOUT watcher on write error (Ben Noordhuis) + +* include: document uv_update_time() and uv_now() (Ben Noordhuis) + +* linux: fix cpu model parsing on newer arm kernels (Ben Noordhuis) + +* linux: fix a memory leak in uv_cpu_info() error path (Ben Noordhuis) + +* linux: don't ignore out-of-memory errors in uv_cpu_info() (Ben Noordhuis) + +* unix, windows: move uv_now() to uv-common.c (Ben Noordhuis) + +* test: fix a compilation problem in test-osx-select.c that was caused by the + use of c-style comments (Bert Belder) + +* darwin: use uv_fs_sendfile() use the sendfile api correctly (Wynn Wilkes) + +* windows: call idle handles on every loop iteration, something the unix + implementation already did (Bert Belder) + +* test: update the idle-starvation test to verify that idle handles are called + in every loop iteration (Bert Belder) + +* unix, windows: ensure that uv_run() in RUN_ONCE mode calls timers that expire + after blocking (Ben Noordhuis) + + +2013.05.29, Version 0.10.9 (Stable), a195f9ace23d92345baf57582678bfc3017e6632 + +Changes since version 0.10.8: + +* unix: fix stream refcounting buglet (Ben Noordhuis) + +* unix: remove erroneous asserts (Ben Noordhuis) + +* unix: add uv__is_closing() macro (Ben Noordhuis) + +* unix: stop stream POLLOUT watcher on write error (Ben Noordhuis) + + +2013.05.25, Version 0.10.8 (Stable), 0f39be12926fe2d8766a9f025797a473003e6504 + +Changes since version 0.10.7: + +* windows: make uv_spawn not fail under job control (Bert Belder) + +* darwin: assume CFRunLoopStop() isn't thread-safe (Fedor Indutny) + +* win: fix UV_EALREADY incorrectly set (Bert Belder) + +* darwin: make two uv__cf_*() functions static (Ben Noordhuis) + +* darwin: task_info() cannot fail (Ben Noordhuis) + +* unix: add mapping for ENETDOWN (Ben Noordhuis) + +* unix: implicitly signal write errors to libuv user (Ben Noordhuis) + +* unix: fix assert on signal pipe overflow (Bert Belder) + +* unix: turn off POLLOUT after stream connect (Ben Noordhuis) + + +2013.05.16, Version 0.11.3 (Unstable), 0a48c05b5988aea84c605751900926fa25443b34 + +Changes since version 0.11.2: + +* unix: clean up uv_accept() (Ben Noordhuis) + +* unix: remove errno preserving code (Ben Noordhuis) + +* darwin: fix ios build, don't require ApplicationServices (Ben Noordhuis) + +* windows: kill child processes when the parent dies (Bert Belder) + +* build: set soname in shared library (Ben Noordhuis) + +* build: make `make test` link against .a again (Ben Noordhuis) + +* build: only set soname on shared object builds (Timothy J. Fontaine) + +* build: convert predefined $PLATFORM to lower case (Elliot Saba) + +* test: fix process_title failing on linux (Miroslav Bajtoš) + +* test, sunos: disable process_title test (Miroslav Bajtoš) + +* test: add error logging to tty unit test (Miroslav Bajtoš) + + +2013.05.15, Version 0.10.7 (Stable), 028baaf0846b686a81e992cb2f2f5a9b8e841fcf + +Changes since version 0.10.6: + +* windows: kill child processes when the parent dies (Bert Belder) + + +2013.05.15, Version 0.10.6 (Stable), 11e6613e6260d95c8cf11bf89a2759c24649319a + +Changes since version 0.10.5: + +* stream: fix osx select hack (Fedor Indutny) + +* stream: fix small nit in select hack, add test (Fedor Indutny) + +* build: link with libkvm on openbsd (Ben Noordhuis) + +* stream: use harder sync restrictions for osx-hack (Fedor Indutny) + +* unix: fix EMFILE error handling (Ben Noordhuis) + +* darwin: fix unnecessary include headers (Daisuke Murase) + +* darwin: rename darwin-getproctitle.m (Ben Noordhuis) + +* build: convert predefined $PLATFORM to lower case (Elliot Saba) + +* build: set soname in shared library (Ben Noordhuis) + +* build: make `make test` link against .a again (Ben Noordhuis) + +* darwin: fix ios build, don't require ApplicationServices (Ben Noordhuis) + +* build: only set soname on shared object builds (Timothy J. Fontaine) + + +2013.05.11, Version 0.11.2 (Unstable), 3fba0bf65f091b91a9760530c05c6339c658d88b + +Changes since version 0.11.1: + +* darwin: look up file path with F_GETPATH (Ben Noordhuis) + +* unix, windows: add uv_has_ref() function (Saúl Ibarra Corretgé) + +* build: avoid double / in paths for dtrace (Timothy J. Fontaine) + +* unix: remove src/unix/cygwin.c (Ben Noordhuis) + +* windows: deal with the fact that GetTickCount might lag (Bert Belder) + +* unix: silence STATIC_ASSERT compiler warnings (Ben Noordhuis) + +* linux: don't use fopen() in uv_resident_set_memory() (Ben Noordhuis) + + +2013.04.24, Version 0.10.5 (Stable), 6595a7732c52eb4f8e57c88655f72997a8567a67 + +Changes since version 0.10.4: + +* unix: silence STATIC_ASSERT compiler warnings (Ben Noordhuis) + +* windows: make timers handle large timeouts (Miroslav Bajtoš) + +* windows: remove superfluous assert statement (Bert Belder) + +* unix: silence STATIC_ASSERT compiler warnings (Ben Noordhuis) + +* linux: don't use fopen() in uv_resident_set_memory() (Ben Noordhuis) + + +2013.04.12, Version 0.10.4 (Stable), 85827e26403ac6dfa331af8ec9916ea7e27bd833 + +Changes since version 0.10.3: + +* include: update uv_backend_fd() documentation (Ben Noordhuis) + +* unix: include uv.h in src/version.c (Ben Noordhuis) + +* unix: don't write more than IOV_MAX iovecs (Fedor Indutny) + +* mingw-w64: don't call _set_invalid_parameter_handler (Nils Maier) + +* build: gyp disable thin archives (Timothy J. Fontaine) + +* sunos: re-export entire library when static (Timothy J. Fontaine) + +* unix: dtrace probes for tick-start and tick-stop (Timothy J. Fontaine) + +* windows: fix memory leak in fs__sendfile (Shannen Saez) + +* windows: remove double initialization in uv_tty_init (Shannen Saez) + +* build: fix dtrace-enabled out of tree build (Ben Noordhuis) + +* build: squelch -Wdollar-in-identifier-extension warnings (Ben Noordhuis) + +* inet: snprintf returns int, not size_t (Brian White) + +* win: refactor uv_cpu_info (Bert Belder) + +* build: add support for Visual Studio 2012 (Nicholas Vavilov) + +* build: -Wno-dollar-in-identifier-extension is clang only (Ben Noordhuis) + + +2013.04.11, Version 0.11.1 (Unstable), 5c10e82ae0bc99eff86d4b9baff1f1aa0bf84c0a + +This is the first versioned release from the current unstable libuv branch. + +Changes since Node.js v0.11.0: + +* all platforms: nanosecond resolution support for uv_fs_[fl]stat (Timothy J. + Fontaine) + +* all platforms: add netmask to uv_interface_address (Ben Kelly) + +* unix: make sure the `status` parameter passed to the `uv_getaddrinfo` is 0 or + -1 (Ben Noordhuis) + +* unix: limit the number of iovecs written in a single `writev` syscall to + IOV_MAX (Fedor Indutny) + +* unix: add dtrace probes for tick-start and tick-stop (Timothy J. Fontaine) + +* mingw-w64: don't call _set_invalid_parameter_handler (Nils Maier) + +* windows: fix memory leak in fs__sendfile (Shannen Saez) + +* windows: fix edge case bugs in uv_cpu_info (Bert Belder) + +* include: no longer ship with / include ngx-queue.h (Ben Noordhuis) + +* include: remove UV_VERSION_* macros from uv.h (Ben Noordhuis) + +* documentation updates (Kristian Evensen, Ben Kelly, Ben Noordhuis) + +* build: fix dtrace-enabled builds (Ben Noordhuis, Timothy J. Fontaine) + +* build: gyp disable thin archives (Timothy J. Fontaine) + +* build: add support for Visual Studio 2012 (Nicholas Vavilov) + + +2013.03.28, Version 0.10.3 (Stable), 31ebe23973dd98fd8a24c042b606f37a794e99d0 + +Changes since version 0.10.2: + +* include: remove extraneous const from uv_version() (Ben Noordhuis) + +* doc: update README, replace `OS` by `PLATFORM` (Ben Noordhuis) + +* build: simplify .buildstamp rule (Ben Noordhuis) + +* build: disable -Wstrict-aliasing on darwin (Ben Noordhuis) + +* darwin: don't select(&exceptfds) in fallback path (Ben Noordhuis) + +* unix: don't clear flags after closing UDP handle (Saúl Ibarra Corretgé) + + +2013.03.25, Version 0.10.2 (Stable), 0f36a00568f3e7608f97f6c6cdb081f4800a50c9 + +This is the first officially versioned release of libuv. Starting now +libuv will make releases independently of Node.js. + +Changes since Node.js v0.10.0: + +* test: add tap output for windows (Timothy J. Fontaine) + +* unix: fix uv_tcp_simultaneous_accepts() logic (Ben Noordhuis) + +* include: bump UV_VERSION_MINOR (Ben Noordhuis) + +* unix: improve uv_guess_handle() implementation (Ben Noordhuis) + +* stream: run try_select only for pipes and ttys (Fedor Indutny) + +Changes since Node.js v0.10.1: + +* build: rename OS to PLATFORM (Ben Noordhuis) + +* unix: make uv_timer_init() initialize repeat (Brian Mazza) + +* unix: make timers handle large timeouts (Ben Noordhuis) + +* build: add OBJC makefile var (Ben Noordhuis) + +* Add `uv_version()` and `uv_version_string()` APIs (Bert Belder) diff --git a/deps/libuv/LICENSE b/deps/libuv/LICENSE new file mode 100644 index 00000000..41ba44c2 --- /dev/null +++ b/deps/libuv/LICENSE @@ -0,0 +1,70 @@ +libuv is licensed for use as follows: + +==== +Copyright (c) 2015-present libuv project contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. +==== + +This license applies to parts of libuv originating from the +https://github.com/joyent/libuv repository: + +==== + +Copyright Joyent, Inc. and other Node contributors. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + +==== + +This license applies to all parts of libuv that are not externally +maintained libraries. + +The externally maintained libraries used by libuv are: + + - tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license. + + - inet_pton and inet_ntop implementations, contained in src/inet.c, are + copyright the Internet Systems Consortium, Inc., and licensed under the ISC + license. + + - stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three + clause BSD license. + + - pthread-fixes.h, pthread-fixes.c, copyright Google Inc. and Sony Mobile + Communications AB. Three clause BSD license. + + - android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design + Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement + n° 289016). Three clause BSD license. diff --git a/deps/libuv/MAINTAINERS.md b/deps/libuv/MAINTAINERS.md new file mode 100644 index 00000000..f2f3db5a --- /dev/null +++ b/deps/libuv/MAINTAINERS.md @@ -0,0 +1,41 @@ + +# Project Maintainers + +libuv is currently managed by the following individuals: + +* **Ben Noordhuis** ([@bnoordhuis](https://github.com/bnoordhuis)) + - GPG key: D77B 1E34 243F BAF0 5F8E 9CC3 4F55 C8C8 46AB 89B9 (pubkey-bnoordhuis) +* **Bert Belder** ([@piscisaureus](https://github.com/piscisaureus)) +* **Colin Ihrig** ([@cjihrig](https://github.com/cjihrig)) + - GPG key: 94AE 3667 5C46 4D64 BAFA 68DD 7434 390B DBE9 B9C5 (pubkey-cjihrig) +* **Fedor Indutny** ([@indutny](https://github.com/indutny)) + - GPG key: AF2E EA41 EC34 47BF DD86 FED9 D706 3CCE 19B7 E890 (pubkey-indutny) +* **Imran Iqbal** ([@iWuzHere](https://github.com/iWuzHere)) + - GPG key: 9DFE AA5F 481B BF77 2D90 03CE D592 4925 2F8E C41A (pubkey-iwuzhere) +* **Santiago Gimeno** ([@santigimeno](https://github.com/santigimeno)) + - GPG key: 612F 0EAD 9401 6223 79DF 4402 F28C 3C8D A33C 03BE (pubkey-santigimeno) +* **Saúl Ibarra Corretgé** ([@saghul](https://github.com/saghul)) + - GPG key: FDF5 1936 4458 319F A823 3DC9 410E 5553 AE9B C059 (pubkey-saghul) + +## Storing a maintainer key in Git + +It's quite handy to store a maintainer's signature as a git blob, and have +that object tagged and signed with such key. + +Export your public key: + + $ gpg --armor --export saghul@gmail.com > saghul.asc + +Store it as a blob on the repo: + + $ git hash-object -w saghul.asc + +The previous command returns a hash, copy it. For the sake of this explanation, +we'll assume it's 'abcd1234'. Storing the blob in git is not enough, it could +be garbage collected since nothing references it, so we'll create a tag for it: + + $ git tag -s pubkey-saghul abcd1234 + +Commit the changes and push: + + $ git push origin pubkey-saghul diff --git a/deps/libuv/Makefile.am b/deps/libuv/Makefile.am new file mode 100644 index 00000000..c232b6db --- /dev/null +++ b/deps/libuv/Makefile.am @@ -0,0 +1,412 @@ +# Copyright (c) 2013, Ben Noordhuis +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +ACLOCAL_AMFLAGS = -I m4 + +AM_CPPFLAGS = -I$(top_srcdir)/include \ + -I$(top_srcdir)/src + +include_HEADERS=include/uv.h include/uv-errno.h include/uv-threadpool.h include/uv-version.h + +CLEANFILES = + +lib_LTLIBRARIES = libuv.la +libuv_la_CFLAGS = @CFLAGS@ +libuv_la_LDFLAGS = -no-undefined -version-info 1:0:0 +libuv_la_SOURCES = src/fs-poll.c \ + src/heap-inl.h \ + src/inet.c \ + src/queue.h \ + src/threadpool.c \ + src/uv-common.c \ + src/uv-common.h \ + src/version.c + +if SUNOS +# Can't be turned into a CC_CHECK_CFLAGS in configure.ac, it makes compilers +# on other platforms complain that the argument is unused during compilation. +libuv_la_CFLAGS += -pthreads +endif + +if WINNT + +include_HEADERS += include/uv-win.h include/tree.h +AM_CPPFLAGS += -I$(top_srcdir)/src/win \ + -DWIN32_LEAN_AND_MEAN \ + -D_WIN32_WINNT=0x0600 +libuv_la_SOURCES += src/win/async.c \ + src/win/atomicops-inl.h \ + src/win/core.c \ + src/win/detect-wakeup.c \ + src/win/dl.c \ + src/win/error.c \ + src/win/fs-event.c \ + src/win/fs.c \ + src/win/getaddrinfo.c \ + src/win/getnameinfo.c \ + src/win/handle.c \ + src/win/handle-inl.h \ + src/win/internal.h \ + src/win/loop-watcher.c \ + src/win/pipe.c \ + src/win/poll.c \ + src/win/process-stdio.c \ + src/win/process.c \ + src/win/req.c \ + src/win/req-inl.h \ + src/win/signal.c \ + src/win/stream.c \ + src/win/stream-inl.h \ + src/win/tcp.c \ + src/win/thread.c \ + src/win/timer.c \ + src/win/tty.c \ + src/win/udp.c \ + src/win/util.c \ + src/win/winapi.c \ + src/win/winapi.h \ + src/win/winsock.c \ + src/win/winsock.h + +else # WINNT + +include_HEADERS += include/uv-unix.h +AM_CPPFLAGS += -I$(top_srcdir)/src/unix +libuv_la_SOURCES += src/unix/async.c \ + src/unix/atomic-ops.h \ + src/unix/core.c \ + src/unix/dl.c \ + src/unix/fs.c \ + src/unix/getaddrinfo.c \ + src/unix/getnameinfo.c \ + src/unix/internal.h \ + src/unix/loop-watcher.c \ + src/unix/loop.c \ + src/unix/pipe.c \ + src/unix/poll.c \ + src/unix/process.c \ + src/unix/signal.c \ + src/unix/spinlock.h \ + src/unix/stream.c \ + src/unix/tcp.c \ + src/unix/thread.c \ + src/unix/timer.c \ + src/unix/tty.c \ + src/unix/udp.c + +endif # WINNT + +EXTRA_DIST = test/fixtures/empty_file \ + test/fixtures/load_error.node \ + include \ + test \ + docs \ + img \ + samples \ + android-configure \ + CONTRIBUTING.md \ + LICENSE \ + README.md \ + checksparse.sh \ + vcbuild.bat \ + Makefile.mingw \ + common.gypi \ + gyp_uv.py \ + uv.gyp + + + +TESTS = test/run-tests +check_PROGRAMS = test/run-tests +if OS390 +test_run_tests_CFLAGS = +else +test_run_tests_CFLAGS = -Wno-long-long +endif + +if SUNOS +# Can't be turned into a CC_CHECK_CFLAGS in configure.ac, it makes compilers +# on other platforms complain that the argument is unused during compilation. +test_run_tests_CFLAGS += -pthreads +endif + +test_run_tests_LDFLAGS = +test_run_tests_SOURCES = test/blackhole-server.c \ + test/dns-server.c \ + test/echo-server.c \ + test/run-tests.c \ + test/runner.c \ + test/runner.h \ + test/task.h \ + test/test-active.c \ + test/test-async.c \ + test/test-async-null-cb.c \ + test/test-barrier.c \ + test/test-callback-order.c \ + test/test-callback-stack.c \ + test/test-close-fd.c \ + test/test-close-order.c \ + test/test-condvar.c \ + test/test-connection-fail.c \ + test/test-cwd-and-chdir.c \ + test/test-default-loop-close.c \ + test/test-delayed-accept.c \ + test/test-dlerror.c \ + test/test-eintr-handling.c \ + test/test-embed.c \ + test/test-emfile.c \ + test/test-error.c \ + test/test-fail-always.c \ + test/test-fs-event.c \ + test/test-fs-poll.c \ + test/test-fs.c \ + test/test-get-currentexe.c \ + test/test-get-loadavg.c \ + test/test-get-memory.c \ + test/test-get-passwd.c \ + test/test-getaddrinfo.c \ + test/test-getnameinfo.c \ + test/test-getsockname.c \ + test/test-handle-fileno.c \ + test/test-homedir.c \ + test/test-hrtime.c \ + test/test-idle.c \ + test/test-ip4-addr.c \ + test/test-ip6-addr.c \ + test/test-ipc-send-recv.c \ + test/test-ipc.c \ + test/test-list.h \ + test/test-loop-handles.c \ + test/test-loop-alive.c \ + test/test-loop-close.c \ + test/test-loop-stop.c \ + test/test-loop-time.c \ + test/test-loop-configure.c \ + test/test-multiple-listen.c \ + test/test-mutexes.c \ + test/test-osx-select.c \ + test/test-pass-always.c \ + test/test-ping-pong.c \ + test/test-pipe-bind-error.c \ + test/test-pipe-connect-error.c \ + test/test-pipe-connect-multiple.c \ + test/test-pipe-connect-prepare.c \ + test/test-pipe-getsockname.c \ + test/test-pipe-pending-instances.c \ + test/test-pipe-sendmsg.c \ + test/test-pipe-server-close.c \ + test/test-pipe-close-stdout-read-stdin.c \ + test/test-pipe-set-non-blocking.c \ + test/test-platform-output.c \ + test/test-poll-close.c \ + test/test-poll-close-doesnt-corrupt-stack.c \ + test/test-poll-closesocket.c \ + test/test-poll.c \ + test/test-process-title.c \ + test/test-queue-foreach-delete.c \ + test/test-ref.c \ + test/test-run-nowait.c \ + test/test-run-once.c \ + test/test-semaphore.c \ + test/test-shutdown-close.c \ + test/test-shutdown-eof.c \ + test/test-shutdown-twice.c \ + test/test-signal-multiple-loops.c \ + test/test-signal.c \ + test/test-socket-buffer-size.c \ + test/test-spawn.c \ + test/test-stdio-over-pipes.c \ + test/test-tcp-alloc-cb-fail.c \ + test/test-tcp-bind-error.c \ + test/test-tcp-bind6-error.c \ + test/test-tcp-close-accept.c \ + test/test-tcp-close-while-connecting.c \ + test/test-tcp-close.c \ + test/test-tcp-create-socket-early.c \ + test/test-tcp-connect-error-after-write.c \ + test/test-tcp-connect-error.c \ + test/test-tcp-connect-timeout.c \ + test/test-tcp-connect6-error.c \ + test/test-tcp-flags.c \ + test/test-tcp-open.c \ + test/test-tcp-read-stop.c \ + test/test-tcp-shutdown-after-write.c \ + test/test-tcp-unexpected-read.c \ + test/test-tcp-oob.c \ + test/test-tcp-write-to-half-open-connection.c \ + test/test-tcp-write-after-connect.c \ + test/test-tcp-writealot.c \ + test/test-tcp-write-fail.c \ + test/test-tcp-try-write.c \ + test/test-tcp-write-queue-order.c \ + test/test-thread-equal.c \ + test/test-thread.c \ + test/test-threadpool-cancel.c \ + test/test-threadpool.c \ + test/test-timer-again.c \ + test/test-timer-from-check.c \ + test/test-timer.c \ + test/test-tmpdir.c \ + test/test-tty.c \ + test/test-udp-alloc-cb-fail.c \ + test/test-udp-bind.c \ + test/test-udp-create-socket-early.c \ + test/test-udp-dgram-too-big.c \ + test/test-udp-ipv6.c \ + test/test-udp-multicast-interface.c \ + test/test-udp-multicast-interface6.c \ + test/test-udp-multicast-join.c \ + test/test-udp-multicast-join6.c \ + test/test-udp-multicast-ttl.c \ + test/test-udp-open.c \ + test/test-udp-options.c \ + test/test-udp-send-and-recv.c \ + test/test-udp-send-immediate.c \ + test/test-udp-send-unreachable.c \ + test/test-udp-try-send.c \ + test/test-walk-handles.c \ + test/test-watcher-cross-stop.c +test_run_tests_LDADD = libuv.la + +if WINNT +test_run_tests_SOURCES += test/runner-win.c \ + test/runner-win.h +else +test_run_tests_SOURCES += test/runner-unix.c \ + test/runner-unix.h +endif + +if AIX +test_run_tests_CFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500 -D_LINUX_SOURCE_COMPAT +endif + +if LINUX +test_run_tests_CFLAGS += -D_GNU_SOURCE +endif + +if SUNOS +test_run_tests_CFLAGS += -D__EXTENSIONS__ -D_XOPEN_SOURCE=500 +endif + +if OS390 +test_run_tests_CFLAGS += -D_UNIX03_THREADS \ + -D_UNIX03_SOURCE \ + -D_OPEN_SYS_IF_EXT=1 \ + -D_OPEN_SYS_SOCK_IPV6 \ + -D_OPEN_MSGQ_EXT \ + -D_XOPEN_SOURCE_EXTENDED \ + -D_ALL_SOURCE \ + -D_LARGE_TIME_API \ + -D_OPEN_SYS_FILE_EXT \ + -DPATH_MAX=255 \ + -qCHARS=signed \ + -qXPLINK \ + -qFLOAT=IEEE +endif + +if AIX +libuv_la_CFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500 -D_LINUX_SOURCE_COMPAT -D_THREAD_SAFE +include_HEADERS += include/uv-aix.h +libuv_la_SOURCES += src/unix/aix.c +endif + +if ANDROID +include_HEADERS += include/android-ifaddrs.h \ + include/pthread-barrier.h +libuv_la_SOURCES += src/unix/android-ifaddrs.c \ + src/unix/pthread-fixes.c \ + src/unix/pthread-barrier.c +endif + +if DARWIN +include_HEADERS += include/uv-darwin.h \ + include/pthread-barrier.h +libuv_la_CFLAGS += -D_DARWIN_USE_64_BIT_INODE=1 +libuv_la_CFLAGS += -D_DARWIN_UNLIMITED_SELECT=1 +libuv_la_SOURCES += src/unix/darwin.c \ + src/unix/darwin-proctitle.c \ + src/unix/fsevents.c \ + src/unix/kqueue.c \ + src/unix/proctitle.c \ + src/unix/pthread-barrier.c +test_run_tests_LDFLAGS += -lutil +endif + +if DRAGONFLY +include_HEADERS += include/uv-bsd.h +libuv_la_SOURCES += src/unix/freebsd.c src/unix/kqueue.c +test_run_tests_LDFLAGS += -lutil +endif + +if FREEBSD +include_HEADERS += include/uv-bsd.h +libuv_la_SOURCES += src/unix/freebsd.c src/unix/kqueue.c +test_run_tests_LDFLAGS += -lutil +endif + +if LINUX +include_HEADERS += include/uv-linux.h +libuv_la_CFLAGS += -D_GNU_SOURCE +libuv_la_SOURCES += src/unix/linux-core.c \ + src/unix/linux-inotify.c \ + src/unix/linux-syscalls.c \ + src/unix/linux-syscalls.h \ + src/unix/proctitle.c +test_run_tests_LDFLAGS += -lutil +endif + +if NETBSD +include_HEADERS += include/uv-bsd.h +libuv_la_SOURCES += src/unix/kqueue.c src/unix/netbsd.c +test_run_tests_LDFLAGS += -lutil +endif + +if OPENBSD +include_HEADERS += include/uv-bsd.h +libuv_la_SOURCES += src/unix/kqueue.c src/unix/openbsd.c +test_run_tests_LDFLAGS += -lutil +endif + +if SUNOS +include_HEADERS += include/uv-sunos.h +libuv_la_CFLAGS += -D__EXTENSIONS__ -D_XOPEN_SOURCE=500 +libuv_la_SOURCES += src/unix/sunos.c +endif + +if OS390 +include_HEADERS += include/pthread-fixes.h include/pthread-barrier.h +libuv_la_CFLAGS += -D_UNIX03_THREADS \ + -D_UNIX03_SOURCE \ + -D_OPEN_SYS_IF_EXT=1 \ + -D_OPEN_MSGQ_EXT \ + -D_XOPEN_SOURCE_EXTENDED \ + -D_ALL_SOURCE \ + -D_LARGE_TIME_API \ + -D_OPEN_SYS_SOCK_IPV6 \ + -D_OPEN_SYS_FILE_EXT \ + -DUV_PLATFORM_SEM_T=int \ + -DPATH_MAX=255 \ + -qCHARS=signed \ + -qXPLINK \ + -qFLOAT=IEEE +libuv_la_LDFLAGS += -qXPLINK +libuv_la_SOURCES += src/unix/pthread-fixes.c \ + src/unix/pthread-barrier.c +libuv_la_SOURCES += src/unix/os390.c +endif + +if HAVE_PKG_CONFIG +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = @PACKAGE_NAME@.pc +endif diff --git a/deps/libuv/Makefile.mingw b/deps/libuv/Makefile.mingw new file mode 100644 index 00000000..8139138f --- /dev/null +++ b/deps/libuv/Makefile.mingw @@ -0,0 +1,85 @@ +# Copyright (c) 2013, Ben Noordhuis +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +CC ?= gcc + +CFLAGS += -Wall \ + -Wextra \ + -Wno-unused-parameter \ + -Iinclude \ + -Isrc \ + -Isrc/win \ + -DWIN32_LEAN_AND_MEAN \ + -D_WIN32_WINNT=0x0600 + +INCLUDES = include/stdint-msvc2008.h \ + include/tree.h \ + include/uv-errno.h \ + include/uv-threadpool.h \ + include/uv-version.h \ + include/uv-win.h \ + include/uv.h \ + src/heap-inl.h \ + src/queue.h \ + src/uv-common.h \ + src/win/atomicops-inl.h \ + src/win/handle-inl.h \ + src/win/internal.h \ + src/win/req-inl.h \ + src/win/stream-inl.h \ + src/win/winapi.h \ + src/win/winsock.h + +OBJS = src/fs-poll.o \ + src/inet.o \ + src/threadpool.o \ + src/uv-common.o \ + src/version.o \ + src/win/async.o \ + src/win/core.o \ + src/win/detect-wakeup.o \ + src/win/dl.o \ + src/win/error.o \ + src/win/fs-event.o \ + src/win/fs.o \ + src/win/getaddrinfo.o \ + src/win/getnameinfo.o \ + src/win/handle.o \ + src/win/loop-watcher.o \ + src/win/pipe.o \ + src/win/poll.o \ + src/win/process-stdio.o \ + src/win/process.o \ + src/win/req.o \ + src/win/signal.o \ + src/win/stream.o \ + src/win/tcp.o \ + src/win/thread.o \ + src/win/timer.o \ + src/win/tty.o \ + src/win/udp.o \ + src/win/util.o \ + src/win/winapi.o \ + src/win/winsock.o + +all: libuv.a + +clean: + -$(RM) $(OBJS) libuv.a + +libuv.a: $(OBJS) + $(AR) crs $@ $^ + +$(OBJS): %.o : %.c $(INCLUDES) + $(CC) $(CFLAGS) -c -o $@ $< diff --git a/deps/libuv/README.md b/deps/libuv/README.md new file mode 100644 index 00000000..284dfb47 --- /dev/null +++ b/deps/libuv/README.md @@ -0,0 +1,250 @@ +![libuv][libuv_banner] + +## Overview + +libuv is a multi-platform support library with a focus on asynchronous I/O. It +was primarily developed for use by [Node.js](http://nodejs.org), but it's also +used by [Luvit](http://luvit.io/), [Julia](http://julialang.org/), +[pyuv](https://github.com/saghul/pyuv), and [others](https://github.com/libuv/libuv/wiki/Projects-that-use-libuv). + +## Feature highlights + + * Full-featured event loop backed by epoll, kqueue, IOCP, event ports. + + * Asynchronous TCP and UDP sockets + + * Asynchronous DNS resolution + + * Asynchronous file and file system operations + + * File system events + + * ANSI escape code controlled TTY + + * IPC with socket sharing, using Unix domain sockets or named pipes (Windows) + + * Child processes + + * Thread pool + + * Signal handling + + * High resolution clock + + * Threading and synchronization primitives + +## Versioning + +Starting with version 1.0.0 libuv follows the [semantic versioning](http://semver.org/) +scheme. The API change and backwards compatibility rules are those indicated by +SemVer. libuv will keep a stable ABI across major releases. + +The ABI/API changes can be tracked [here](http://abi-laboratory.pro/tracker/timeline/libuv/). + +## Licensing + +libuv is licensed under the MIT license. Check the [LICENSE file](LICENSE). + +## Community + + * [Mailing list](http://groups.google.com/group/libuv) + * [IRC chatroom (#libuv@irc.freenode.org)](http://webchat.freenode.net?channels=libuv&uio=d4) + +## Documentation + +### Official API documentation + +Located in the docs/ subdirectory. It uses the [Sphinx](http://sphinx-doc.org/) +framework, which makes it possible to build the documentation in multiple +formats. + +Show different supported building options: + + $ make help + +Build documentation as HTML: + + $ make html + +Build documentation as HTML and live reload it when it changes (this requires +sphinx-autobuild to be installed and is only supported on Unix): + + $ make livehtml + +Build documentation as man pages: + + $ make man + +Build documentation as ePub: + + $ make epub + +NOTE: Windows users need to use make.bat instead of plain 'make'. + +Documentation can be browsed online [here](http://docs.libuv.org). + +The [tests and benchmarks](https://github.com/libuv/libuv/tree/master/test) +also serve as API specification and usage examples. + +### Other resources + + * [An Introduction to libuv](http://nikhilm.github.com/uvbook/) + — An overview of libuv with tutorials. + * [LXJS 2012 talk](http://www.youtube.com/watch?v=nGn60vDSxQ4) + — High-level introductory talk about libuv. + * [libuv-dox](https://github.com/thlorenz/libuv-dox) + — Documenting types and methods of libuv, mostly by reading uv.h. + * [learnuv](https://github.com/thlorenz/learnuv) + — Learn uv for fun and profit, a self guided workshop to libuv. + +These resources are not handled by libuv maintainers and might be out of +date. Please verify it before opening new issues. + +## Downloading + +libuv can be downloaded either from the +[GitHub repository](https://github.com/libuv/libuv) +or from the [downloads site](http://dist.libuv.org/dist/). + +Starting with libuv 1.7.0, binaries for Windows are also provided. This is to +be considered EXPERIMENTAL. + +Before verifying the git tags or signature files, importing the relevant keys +is necessary. Key IDs are listed in the +[MAINTAINERS](https://github.com/libuv/libuv/blob/master/MAINTAINERS.md) +file, but are also available as git blob objects for easier use. + +Importing a key the usual way: + + $ gpg --keyserver pool.sks-keyservers.net \ + --recv-keys AE9BC059 + +Importing a key from a git blob object: + + $ git show pubkey-saghul | gpg --import + +### Verifying releases + +Git tags are signed with the developer's key, they can be verified as follows: + + $ git verify-tag v1.6.1 + +Starting with libuv 1.7.0, the tarballs stored in the +[downloads site](http://dist.libuv.org/dist/) are signed and an accompanying +signature file sit alongside each. Once both the release tarball and the +signature file are downloaded, the file can be verified as follows: + + $ gpg --verify libuv-1.7.0.tar.gz.sign + +## Build Instructions + +For GCC there are two build methods: via autotools or via [GYP][]. +GYP is a meta-build system which can generate MSVS, Makefile, and XCode +backends. It is best used for integration into other projects. + +To build with autotools: + + $ sh autogen.sh + $ ./configure + $ make + $ make check + $ make install + +### Windows + +First, [Python][] 2.6 or 2.7 must be installed as it is required by [GYP][]. +If python is not in your path, set the environment variable `PYTHON` to its +location. For example: `set PYTHON=C:\Python27\python.exe` + +To build with Visual Studio, launch a git shell (e.g. Cmd or PowerShell) +and run vcbuild.bat which will checkout the GYP code into build/gyp and +generate uv.sln as well as related project files. + +To have GYP generate build script for another system, checkout GYP into the +project tree manually: + + $ git clone https://chromium.googlesource.com/external/gyp.git build/gyp + +### Unix + +For Debug builds (recommended) run: + + $ ./gyp_uv.py -f make + $ make -C out + +For Release builds run: + + $ ./gyp_uv.py -f make + $ BUILDTYPE=Release make -C out + +Run `./gyp_uv.py -f make -Dtarget_arch=x32` to build [x32][] binaries. + +### OS X + +Run: + + $ ./gyp_uv.py -f xcode + $ xcodebuild -ARCHS="x86_64" -project uv.xcodeproj \ + -configuration Release -target All + +Using Homebrew: + + $ brew install --HEAD libuv + +Note to OS X users: + +Make sure that you specify the architecture you wish to build for in the +"ARCHS" flag. You can specify more than one by delimiting with a space +(e.g. "x86_64 i386"). + +### Android + +Run: + + $ source ./android-configure NDK_PATH gyp + $ make -C out + +Note for UNIX users: compile your project with `-D_LARGEFILE_SOURCE` and +`-D_FILE_OFFSET_BITS=64`. GYP builds take care of that automatically. + +### Using Ninja + +To use ninja for build on ninja supported platforms, run: + + $ ./gyp_uv.py -f ninja + $ ninja -C out/Debug #for debug build OR + $ ninja -C out/Release + + +### Running tests + +Run: + + $ ./gyp_uv.py -f make + $ make -C out + $ ./out/Debug/run-tests + +## Supported Platforms + +Check the [SUPPORTED_PLATFORMS file](SUPPORTED_PLATFORMS.md). + +### AIX Notes + +AIX support for filesystem events requires the non-default IBM `bos.ahafs` +package to be installed. This package provides the AIX Event Infrastructure +that is detected by `autoconf`. +[IBM documentation](http://www.ibm.com/developerworks/aix/library/au-aix_event_infrastructure/) +describes the package in more detail. + +AIX support for filesystem events is not compiled when building with `gyp`. + +## Patches + +See the [guidelines for contributing][]. + +[node.js]: http://nodejs.org/ +[GYP]: http://code.google.com/p/gyp/ +[Python]: https://www.python.org/downloads/ +[guidelines for contributing]: https://github.com/libuv/libuv/blob/master/CONTRIBUTING.md +[libuv_banner]: https://raw.githubusercontent.com/libuv/libuv/master/img/banner.png +[x32]: https://en.wikipedia.org/wiki/X32_ABI diff --git a/deps/libuv/SUPPORTED_PLATFORMS.md b/deps/libuv/SUPPORTED_PLATFORMS.md new file mode 100644 index 00000000..bff1050d --- /dev/null +++ b/deps/libuv/SUPPORTED_PLATFORMS.md @@ -0,0 +1,70 @@ +# Supported platforms + +| System | Support type | Supported versions | Notes | +|---|---|---|---| +| GNU/Linux | Tier 1 | Linux >= 2.6.18 with glibc >= 2.5 | | +| macOS | Tier 1 | macOS >= 10.7 | | +| Windows | Tier 1 | Windows >= XP SP1 | MSVC 2008 and later are supported | +| FreeBSD | Tier 1 | >= 9 (see note) | | +| AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix | +| z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos | +| Linux with musl | Tier 2 | musl >= 1.0 | | +| SunOS | Tier 2 | Solaris 121 and later | Maintainers: @libuv/sunos | +| MinGW | Tier 3 | MinGW32 and MinGW-w64 | | +| Other | Tier 3 | N/A | | + +#### Note on FreeBSD 9 + +While FreeBSD is supported as Tier 1, FreeBSD 9 will get Tier 2 support until +it reaches end of life, in December 2016. + +## Support types + +* **Tier 1**: Officially supported and tested with CI. Any contributed patch + MUST NOT break such systems. These are supported by @libuv/collaborators. + +* **Tier 2**: Officially supported, but not necessarily tested with CI. These + systems are maintained to the best of @libuv/collaborators ability, + without being a top priority. + +* **Tier 3**: Community maintained. These systems may inadvertently break and the + community and interested parties are expected to help with the maintenance. + +## Adding support for a new platform + +**IMPORTANT**: Before attempting to add support for a new platform please open +an issue about it for discussion. + +### Unix + +I/O handling is abstracted by an internal `uv__io_t` handle. The new platform +will need to implement some of the functions, the prototypes are in +``src/unix/internal.h``. + +If the new platform requires extra fields for any handle structure, create a +new include file in ``include/`` with the name ``uv-theplatform.h`` and add +the appropriate defines there. + +All functionality related to the new platform must be implemented in its own +file inside ``src/unix/`` unless it's already done in a common file, in which +case adding an `ifdef` is fine. + +Two build systems are supported: autotools and GYP. Ideally both need to be +supported, but if GYP does not support the new platform it can be left out. + +### Windows + +Windows is treated as a single platform, so adding support for a new platform +would mean adding support for a new version. + +Compilation and runtime must succeed for the minimum supported version. If a +new API is to be used, it must be done optionally, only in supported versions. + +### Common + +Some common notes when adding support for new platforms: + +* Generally libuv tries to avoid compile time checks. Do not add any to the + autotools based build system or use version checking macros. + Dynamically load functions and symbols if they are not supported by the + minimum supported version. diff --git a/deps/libuv/android-configure b/deps/libuv/android-configure new file mode 100755 index 00000000..7ffc035c --- /dev/null +++ b/deps/libuv/android-configure @@ -0,0 +1,20 @@ +#!/bin/bash + +export TOOLCHAIN=$PWD/android-toolchain +mkdir -p $TOOLCHAIN +$1/build/tools/make-standalone-toolchain.sh \ + --toolchain=arm-linux-androideabi-4.9 \ + --arch=arm \ + --install-dir=$TOOLCHAIN \ + --platform=android-21 +export PATH=$TOOLCHAIN/bin:$PATH +export AR=arm-linux-androideabi-ar +export CC=arm-linux-androideabi-gcc +export CXX=arm-linux-androideabi-g++ +export LINK=arm-linux-androideabi-g++ +export PLATFORM=android + +if [[ $2 == 'gyp' ]] + then + ./gyp_uv.py -Dtarget_arch=arm -DOS=android -f make-android +fi diff --git a/deps/libuv/appveyor.yml b/deps/libuv/appveyor.yml new file mode 100644 index 00000000..17609ad8 --- /dev/null +++ b/deps/libuv/appveyor.yml @@ -0,0 +1,36 @@ +version: v1.10.0.build{build} + +install: + - cinst -y nsis + +matrix: + fast_finish: true + allow_failures: + - platform: x86 + configuration: Release + - platform: x64 + configuration: Release + +platform: + - x86 + - x64 + +configuration: + - Release + +build_script: + # Fixed tag version number if using a tag. + - cmd: if "%APPVEYOR_REPO_TAG%" == "true" set APPVEYOR_BUILD_VERSION=%APPVEYOR_REPO_TAG_NAME% + # vcbuild overwrites the platform variable. + - cmd: set ARCH=%platform% + - cmd: vcbuild.bat release %ARCH% shared + +after_build: + - '"%PROGRAMFILES(x86)%\NSIS\makensis" /DVERSION=%APPVEYOR_BUILD_VERSION% /DARCH=%ARCH% libuv.nsi' + +artifacts: + - name: Installer + path: 'libuv-*.exe' + +cache: + - C:\projects\libuv\build\gyp diff --git a/deps/libuv/autogen.sh b/deps/libuv/autogen.sh new file mode 100755 index 00000000..271c2ee8 --- /dev/null +++ b/deps/libuv/autogen.sh @@ -0,0 +1,46 @@ +#!/bin/sh + +# Copyright (c) 2013, Ben Noordhuis +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +cd `dirname "$0"` + +if [ "$LIBTOOLIZE" = "" ] && [ "`uname`" = "Darwin" ]; then + LIBTOOLIZE=glibtoolize +fi + +ACLOCAL=${ACLOCAL:-aclocal} +AUTOCONF=${AUTOCONF:-autoconf} +AUTOMAKE=${AUTOMAKE:-automake} +LIBTOOLIZE=${LIBTOOLIZE:-libtoolize} + +automake_version=`"$AUTOMAKE" --version | head -n 1 | sed 's/[^.0-9]//g'` +automake_version_major=`echo "$automake_version" | cut -d. -f1` +automake_version_minor=`echo "$automake_version" | cut -d. -f2` + +UV_EXTRA_AUTOMAKE_FLAGS= +if test "$automake_version_major" -gt 1 || \ + test "$automake_version_major" -eq 1 && \ + test "$automake_version_minor" -gt 11; then + # serial-tests is available in v1.12 and newer. + UV_EXTRA_AUTOMAKE_FLAGS="$UV_EXTRA_AUTOMAKE_FLAGS serial-tests" +fi +echo "m4_define([UV_EXTRA_AUTOMAKE_FLAGS], [$UV_EXTRA_AUTOMAKE_FLAGS])" \ + > m4/libuv-extra-automake-flags.m4 + +set -ex +"$LIBTOOLIZE" --copy +"$ACLOCAL" -I m4 +"$AUTOCONF" +"$AUTOMAKE" --add-missing --copy diff --git a/deps/libuv/build/gyp/.gitignore b/deps/libuv/build/gyp/.gitignore new file mode 100644 index 00000000..0d20b648 --- /dev/null +++ b/deps/libuv/build/gyp/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/deps/libuv/build/gyp/AUTHORS b/deps/libuv/build/gyp/AUTHORS new file mode 100644 index 00000000..07887276 --- /dev/null +++ b/deps/libuv/build/gyp/AUTHORS @@ -0,0 +1,14 @@ +# Names should be added to this file like so: +# Name or Organization + +Google Inc. <*@google.com> +Bloomberg Finance L.P. <*@bloomberg.net> +IBM Inc. <*@*.ibm.com> +Yandex LLC <*@yandex-team.ru> + +Steven Knight +Ryan Norton +David J. Sankel +Eric N. Vander Weele +Tom Freudenberg +Julien Brianceau diff --git a/deps/libuv/build/gyp/DEPS b/deps/libuv/build/gyp/DEPS new file mode 100644 index 00000000..167fb779 --- /dev/null +++ b/deps/libuv/build/gyp/DEPS @@ -0,0 +1,23 @@ +# DEPS file for gclient use in buildbot execution of gyp tests. +# +# (You don't need to use gclient for normal GYP development work.) + +vars = { + "chromium_git": "https://chromium.googlesource.com/", +} + +deps = { +} + +deps_os = { + "win": { + "third_party/cygwin": + Var("chromium_git") + "chromium/deps/cygwin@4fbd5b9", + + "third_party/python_26": + Var("chromium_git") + "chromium/deps/python_26@5bb4080", + + "src/third_party/pefile": + Var("chromium_git") + "external/pefile@72c6ae4", + }, +} diff --git a/deps/libuv/build/gyp/LICENSE b/deps/libuv/build/gyp/LICENSE new file mode 100644 index 00000000..ab6b011a --- /dev/null +++ b/deps/libuv/build/gyp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/deps/libuv/build/gyp/OWNERS b/deps/libuv/build/gyp/OWNERS new file mode 100644 index 00000000..72e8ffc0 --- /dev/null +++ b/deps/libuv/build/gyp/OWNERS @@ -0,0 +1 @@ +* diff --git a/deps/libuv/build/gyp/PRESUBMIT.py b/deps/libuv/build/gyp/PRESUBMIT.py new file mode 100644 index 00000000..4bc1b8ca --- /dev/null +++ b/deps/libuv/build/gyp/PRESUBMIT.py @@ -0,0 +1,126 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + + +"""Top-level presubmit script for GYP. + +See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts +for more details about the presubmit API built into gcl. +""" + + +PYLINT_BLACKLIST = [ + # TODO: fix me. + # From SCons, not done in google style. + 'test/lib/TestCmd.py', + 'test/lib/TestCommon.py', + 'test/lib/TestGyp.py', +] + + +PYLINT_DISABLED_WARNINGS = [ + # TODO: fix me. + # Many tests include modules they don't use. + 'W0611', + # Possible unbalanced tuple unpacking with sequence. + 'W0632', + # Attempting to unpack a non-sequence. + 'W0633', + # Include order doesn't properly include local files? + 'F0401', + # Some use of built-in names. + 'W0622', + # Some unused variables. + 'W0612', + # Operator not preceded/followed by space. + 'C0323', + 'C0322', + # Unnecessary semicolon. + 'W0301', + # Unused argument. + 'W0613', + # String has no effect (docstring in wrong place). + 'W0105', + # map/filter on lambda could be replaced by comprehension. + 'W0110', + # Use of eval. + 'W0123', + # Comma not followed by space. + 'C0324', + # Access to a protected member. + 'W0212', + # Bad indent. + 'W0311', + # Line too long. + 'C0301', + # Undefined variable. + 'E0602', + # Not exception type specified. + 'W0702', + # No member of that name. + 'E1101', + # Dangerous default {}. + 'W0102', + # Cyclic import. + 'R0401', + # Others, too many to sort. + 'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231', + 'R0201', 'E0101', 'C0321', + # ************* Module copy + # W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect + 'W0104', +] + + +def _LicenseHeader(input_api): + # Accept any year number from 2009 to the current year. + current_year = int(input_api.time.strftime('%Y')) + allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1))) + + years_re = '(' + '|'.join(allowed_years) + ')' + + # The (c) is deprecated, but tolerate it until it's removed from all files. + return ( + r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n' + r'.*? Use of this source code is governed by a BSD-style license that ' + r'can be\n' + r'.*? found in the LICENSE file\.\n' + ) % { + 'year': years_re, + } + +def CheckChangeOnUpload(input_api, output_api): + report = [] + report.extend(input_api.canned_checks.PanProjectChecks( + input_api, output_api, license_header=_LicenseHeader(input_api))) + return report + + +def CheckChangeOnCommit(input_api, output_api): + report = [] + + report.extend(input_api.canned_checks.PanProjectChecks( + input_api, output_api, license_header=_LicenseHeader(input_api))) + report.extend(input_api.canned_checks.CheckTreeIsOpen( + input_api, output_api, + 'http://gyp-status.appspot.com/status', + 'http://gyp-status.appspot.com/current')) + + import os + import sys + old_sys_path = sys.path + try: + sys.path = ['pylib', 'test/lib'] + sys.path + blacklist = PYLINT_BLACKLIST + if sys.platform == 'win32': + blacklist = [os.path.normpath(x).replace('\\', '\\\\') + for x in PYLINT_BLACKLIST] + report.extend(input_api.canned_checks.RunPylint( + input_api, + output_api, + black_list=blacklist, + disabled_warnings=PYLINT_DISABLED_WARNINGS)) + finally: + sys.path = old_sys_path + return report diff --git a/deps/libuv/build/gyp/README.md b/deps/libuv/build/gyp/README.md new file mode 100644 index 00000000..c0d73ac9 --- /dev/null +++ b/deps/libuv/build/gyp/README.md @@ -0,0 +1,4 @@ +GYP can Generate Your Projects. +=================================== + +Documents are available at [gyp.gsrc.io](https://gyp.gsrc.io), or you can check out ```md-pages``` branch to read those documents offline. diff --git a/deps/libuv/build/gyp/codereview.settings b/deps/libuv/build/gyp/codereview.settings new file mode 100644 index 00000000..27fb9f99 --- /dev/null +++ b/deps/libuv/build/gyp/codereview.settings @@ -0,0 +1,6 @@ +# This file is used by git cl to get repository specific information. +CC_LIST: gyp-developer@googlegroups.com +CODE_REVIEW_SERVER: codereview.chromium.org +GERRIT_HOST: True +PROJECT: gyp +VIEW_VC: https://chromium.googlesource.com/external/gyp/+/ diff --git a/deps/libuv/build/gyp/data/win/large-pdb-shim.cc b/deps/libuv/build/gyp/data/win/large-pdb-shim.cc new file mode 100644 index 00000000..8bca5108 --- /dev/null +++ b/deps/libuv/build/gyp/data/win/large-pdb-shim.cc @@ -0,0 +1,12 @@ +// Copyright (c) 2013 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// This file is used to generate an empty .pdb -- with a 4KB pagesize -- that is +// then used during the final link for modules that have large PDBs. Otherwise, +// the linker will generate a pdb with a page size of 1KB, which imposes a limit +// of 1GB on the .pdb. By generating an initial empty .pdb with the compiler +// (rather than the linker), this limit is avoided. With this in place PDBs may +// grow to 2GB. +// +// This file is referenced by the msvs_large_pdb mechanism in MSVSUtil.py. diff --git a/deps/libuv/build/gyp/gyp b/deps/libuv/build/gyp/gyp new file mode 100755 index 00000000..1b8b9bdf --- /dev/null +++ b/deps/libuv/build/gyp/gyp @@ -0,0 +1,8 @@ +#!/bin/sh +# Copyright 2013 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +set -e +base=$(dirname "$0") +exec python "${base}/gyp_main.py" "$@" diff --git a/deps/libuv/build/gyp/gyp.bat b/deps/libuv/build/gyp/gyp.bat new file mode 100755 index 00000000..c0b4ca24 --- /dev/null +++ b/deps/libuv/build/gyp/gyp.bat @@ -0,0 +1,5 @@ +@rem Copyright (c) 2009 Google Inc. All rights reserved. +@rem Use of this source code is governed by a BSD-style license that can be +@rem found in the LICENSE file. + +@python "%~dp0gyp_main.py" %* diff --git a/deps/libuv/build/gyp/gyp_main.py b/deps/libuv/build/gyp/gyp_main.py new file mode 100755 index 00000000..25a6eba9 --- /dev/null +++ b/deps/libuv/build/gyp/gyp_main.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import os +import sys + +# Make sure we're using the version of pylib in this repo, not one installed +# elsewhere on the system. +sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 'pylib')) +import gyp + +if __name__ == '__main__': + sys.exit(gyp.script_main()) diff --git a/deps/libuv/build/gyp/gyptest.py b/deps/libuv/build/gyp/gyptest.py new file mode 100755 index 00000000..98957e16 --- /dev/null +++ b/deps/libuv/build/gyp/gyptest.py @@ -0,0 +1,275 @@ +#!/usr/bin/env python + +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +__doc__ = """ +gyptest.py -- test runner for GYP tests. +""" + +import os +import optparse +import shlex +import subprocess +import sys + +class CommandRunner(object): + """ + Executor class for commands, including "commands" implemented by + Python functions. + """ + verbose = True + active = True + + def __init__(self, dictionary={}): + self.subst_dictionary(dictionary) + + def subst_dictionary(self, dictionary): + self._subst_dictionary = dictionary + + def subst(self, string, dictionary=None): + """ + Substitutes (via the format operator) the values in the specified + dictionary into the specified command. + + The command can be an (action, string) tuple. In all cases, we + perform substitution on strings and don't worry if something isn't + a string. (It's probably a Python function to be executed.) + """ + if dictionary is None: + dictionary = self._subst_dictionary + if dictionary: + try: + string = string % dictionary + except TypeError: + pass + return string + + def display(self, command, stdout=None, stderr=None): + if not self.verbose: + return + if type(command) == type(()): + func = command[0] + args = command[1:] + s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args))) + if type(command) == type([]): + # TODO: quote arguments containing spaces + # TODO: handle meta characters? + s = ' '.join(command) + else: + s = self.subst(command) + if not s.endswith('\n'): + s += '\n' + sys.stdout.write(s) + sys.stdout.flush() + + def execute(self, command, stdout=None, stderr=None): + """ + Executes a single command. + """ + if not self.active: + return 0 + if type(command) == type(''): + command = self.subst(command) + cmdargs = shlex.split(command) + if cmdargs[0] == 'cd': + command = (os.chdir,) + tuple(cmdargs[1:]) + if type(command) == type(()): + func = command[0] + args = command[1:] + return func(*args) + else: + if stdout is sys.stdout: + # Same as passing sys.stdout, except python2.4 doesn't fail on it. + subout = None + else: + # Open pipe for anything else so Popen works on python2.4. + subout = subprocess.PIPE + if stderr is sys.stderr: + # Same as passing sys.stderr, except python2.4 doesn't fail on it. + suberr = None + elif stderr is None: + # Merge with stdout if stderr isn't specified. + suberr = subprocess.STDOUT + else: + # Open pipe for anything else so Popen works on python2.4. + suberr = subprocess.PIPE + p = subprocess.Popen(command, + shell=(sys.platform == 'win32'), + stdout=subout, + stderr=suberr) + p.wait() + if stdout is None: + self.stdout = p.stdout.read() + elif stdout is not sys.stdout: + stdout.write(p.stdout.read()) + if stderr not in (None, sys.stderr): + stderr.write(p.stderr.read()) + return p.returncode + + def run(self, command, display=None, stdout=None, stderr=None): + """ + Runs a single command, displaying it first. + """ + if display is None: + display = command + self.display(display) + return self.execute(command, stdout, stderr) + + +class Unbuffered(object): + def __init__(self, fp): + self.fp = fp + def write(self, arg): + self.fp.write(arg) + self.fp.flush() + def __getattr__(self, attr): + return getattr(self.fp, attr) + +sys.stdout = Unbuffered(sys.stdout) +sys.stderr = Unbuffered(sys.stderr) + + +def is_test_name(f): + return f.startswith('gyptest') and f.endswith('.py') + + +def find_all_gyptest_files(directory): + result = [] + for root, dirs, files in os.walk(directory): + if '.svn' in dirs: + dirs.remove('.svn') + result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ]) + result.sort() + return result + + +def main(argv=None): + if argv is None: + argv = sys.argv + + usage = "gyptest.py [-ahlnq] [-f formats] [test ...]" + parser = optparse.OptionParser(usage=usage) + parser.add_option("-a", "--all", action="store_true", + help="run all tests") + parser.add_option("-C", "--chdir", action="store", default=None, + help="chdir to the specified directory") + parser.add_option("-f", "--format", action="store", default='', + help="run tests with the specified formats") + parser.add_option("-G", '--gyp_option', action="append", default=[], + help="Add -G options to the gyp command line") + parser.add_option("-l", "--list", action="store_true", + help="list available tests and exit") + parser.add_option("-n", "--no-exec", action="store_true", + help="no execute, just print the command line") + parser.add_option("--passed", action="store_true", + help="report passed tests") + parser.add_option("--path", action="append", default=[], + help="additional $PATH directory") + parser.add_option("-q", "--quiet", action="store_true", + help="quiet, don't print test command lines") + opts, args = parser.parse_args(argv[1:]) + + if opts.chdir: + os.chdir(opts.chdir) + + if opts.path: + extra_path = [os.path.abspath(p) for p in opts.path] + extra_path = os.pathsep.join(extra_path) + os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH'] + + if not args: + if not opts.all: + sys.stderr.write('Specify -a to get all tests.\n') + return 1 + args = ['test'] + + tests = [] + for arg in args: + if os.path.isdir(arg): + tests.extend(find_all_gyptest_files(os.path.normpath(arg))) + else: + if not is_test_name(os.path.basename(arg)): + print >>sys.stderr, arg, 'is not a valid gyp test name.' + sys.exit(1) + tests.append(arg) + + if opts.list: + for test in tests: + print test + sys.exit(0) + + CommandRunner.verbose = not opts.quiet + CommandRunner.active = not opts.no_exec + cr = CommandRunner() + + os.environ['PYTHONPATH'] = os.path.abspath('test/lib') + if not opts.quiet: + sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH']) + + passed = [] + failed = [] + no_result = [] + + if opts.format: + format_list = opts.format.split(',') + else: + # TODO: not duplicate this mapping from pylib/gyp/__init__.py + format_list = { + 'aix5': ['make'], + 'freebsd7': ['make'], + 'freebsd8': ['make'], + 'openbsd5': ['make'], + 'cygwin': ['msvs'], + 'win32': ['msvs', 'ninja'], + 'linux2': ['make', 'ninja'], + 'linux3': ['make', 'ninja'], + 'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'], + }[sys.platform] + + for format in format_list: + os.environ['TESTGYP_FORMAT'] = format + if not opts.quiet: + sys.stdout.write('TESTGYP_FORMAT=%s\n' % format) + + gyp_options = [] + for option in opts.gyp_option: + gyp_options += ['-G', option] + if gyp_options and not opts.quiet: + sys.stdout.write('Extra Gyp options: %s\n' % gyp_options) + + for test in tests: + status = cr.run([sys.executable, test] + gyp_options, + stdout=sys.stdout, + stderr=sys.stderr) + if status == 2: + no_result.append(test) + elif status: + failed.append(test) + else: + passed.append(test) + + if not opts.quiet: + def report(description, tests): + if tests: + if len(tests) == 1: + sys.stdout.write("\n%s the following test:\n" % description) + else: + fmt = "\n%s the following %d tests:\n" + sys.stdout.write(fmt % (description, len(tests))) + sys.stdout.write("\t" + "\n\t".join(tests) + "\n") + + if opts.passed: + report("Passed", passed) + report("Failed", failed) + report("No result from", no_result) + + if failed: + return 1 + else: + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/deps/libuv/build/gyp/pylib/gyp/MSVSNew.py b/deps/libuv/build/gyp/pylib/gyp/MSVSNew.py new file mode 100644 index 00000000..593f0e5b --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/MSVSNew.py @@ -0,0 +1,340 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""New implementation of Visual Studio project generation.""" + +import os +import random + +import gyp.common + +# hashlib is supplied as of Python 2.5 as the replacement interface for md5 +# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if +# available, avoiding a deprecation warning under 2.6. Import md5 otherwise, +# preserving 2.4 compatibility. +try: + import hashlib + _new_md5 = hashlib.md5 +except ImportError: + import md5 + _new_md5 = md5.new + + +# Initialize random number generator +random.seed() + +# GUIDs for project types +ENTRY_TYPE_GUIDS = { + 'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}', + 'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}', +} + +#------------------------------------------------------------------------------ +# Helper functions + + +def MakeGuid(name, seed='msvs_new'): + """Returns a GUID for the specified target name. + + Args: + name: Target name. + seed: Seed for MD5 hash. + Returns: + A GUID-line string calculated from the name and seed. + + This generates something which looks like a GUID, but depends only on the + name and seed. This means the same name/seed will always generate the same + GUID, so that projects and solutions which refer to each other can explicitly + determine the GUID to refer to explicitly. It also means that the GUID will + not change when the project for a target is rebuilt. + """ + # Calculate a MD5 signature for the seed and name. + d = _new_md5(str(seed) + str(name)).hexdigest().upper() + # Convert most of the signature to GUID form (discard the rest) + guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20] + + '-' + d[20:32] + '}') + return guid + +#------------------------------------------------------------------------------ + + +class MSVSSolutionEntry(object): + def __cmp__(self, other): + # Sort by name then guid (so things are in order on vs2008). + return cmp((self.name, self.get_guid()), (other.name, other.get_guid())) + + +class MSVSFolder(MSVSSolutionEntry): + """Folder in a Visual Studio project or solution.""" + + def __init__(self, path, name = None, entries = None, + guid = None, items = None): + """Initializes the folder. + + Args: + path: Full path to the folder. + name: Name of the folder. + entries: List of folder entries to nest inside this folder. May contain + Folder or Project objects. May be None, if the folder is empty. + guid: GUID to use for folder, if not None. + items: List of solution items to include in the folder project. May be + None, if the folder does not directly contain items. + """ + if name: + self.name = name + else: + # Use last layer. + self.name = os.path.basename(path) + + self.path = path + self.guid = guid + + # Copy passed lists (or set to empty lists) + self.entries = sorted(list(entries or [])) + self.items = list(items or []) + + self.entry_type_guid = ENTRY_TYPE_GUIDS['folder'] + + def get_guid(self): + if self.guid is None: + # Use consistent guids for folders (so things don't regenerate). + self.guid = MakeGuid(self.path, seed='msvs_folder') + return self.guid + + +#------------------------------------------------------------------------------ + + +class MSVSProject(MSVSSolutionEntry): + """Visual Studio project.""" + + def __init__(self, path, name = None, dependencies = None, guid = None, + spec = None, build_file = None, config_platform_overrides = None, + fixpath_prefix = None): + """Initializes the project. + + Args: + path: Absolute path to the project file. + name: Name of project. If None, the name will be the same as the base + name of the project file. + dependencies: List of other Project objects this project is dependent + upon, if not None. + guid: GUID to use for project, if not None. + spec: Dictionary specifying how to build this project. + build_file: Filename of the .gyp file that the vcproj file comes from. + config_platform_overrides: optional dict of configuration platforms to + used in place of the default for this target. + fixpath_prefix: the path used to adjust the behavior of _fixpath + """ + self.path = path + self.guid = guid + self.spec = spec + self.build_file = build_file + # Use project filename if name not specified + self.name = name or os.path.splitext(os.path.basename(path))[0] + + # Copy passed lists (or set to empty lists) + self.dependencies = list(dependencies or []) + + self.entry_type_guid = ENTRY_TYPE_GUIDS['project'] + + if config_platform_overrides: + self.config_platform_overrides = config_platform_overrides + else: + self.config_platform_overrides = {} + self.fixpath_prefix = fixpath_prefix + self.msbuild_toolset = None + + def set_dependencies(self, dependencies): + self.dependencies = list(dependencies or []) + + def get_guid(self): + if self.guid is None: + # Set GUID from path + # TODO(rspangler): This is fragile. + # 1. We can't just use the project filename sans path, since there could + # be multiple projects with the same base name (for example, + # foo/unittest.vcproj and bar/unittest.vcproj). + # 2. The path needs to be relative to $SOURCE_ROOT, so that the project + # GUID is the same whether it's included from base/base.sln or + # foo/bar/baz/baz.sln. + # 3. The GUID needs to be the same each time this builder is invoked, so + # that we don't need to rebuild the solution when the project changes. + # 4. We should be able to handle pre-built project files by reading the + # GUID from the files. + self.guid = MakeGuid(self.name) + return self.guid + + def set_msbuild_toolset(self, msbuild_toolset): + self.msbuild_toolset = msbuild_toolset + +#------------------------------------------------------------------------------ + + +class MSVSSolution(object): + """Visual Studio solution.""" + + def __init__(self, path, version, entries=None, variants=None, + websiteProperties=True): + """Initializes the solution. + + Args: + path: Path to solution file. + version: Format version to emit. + entries: List of entries in solution. May contain Folder or Project + objects. May be None, if the folder is empty. + variants: List of build variant strings. If none, a default list will + be used. + websiteProperties: Flag to decide if the website properties section + is generated. + """ + self.path = path + self.websiteProperties = websiteProperties + self.version = version + + # Copy passed lists (or set to empty lists) + self.entries = list(entries or []) + + if variants: + # Copy passed list + self.variants = variants[:] + else: + # Use default + self.variants = ['Debug|Win32', 'Release|Win32'] + # TODO(rspangler): Need to be able to handle a mapping of solution config + # to project config. Should we be able to handle variants being a dict, + # or add a separate variant_map variable? If it's a dict, we can't + # guarantee the order of variants since dict keys aren't ordered. + + + # TODO(rspangler): Automatically write to disk for now; should delay until + # node-evaluation time. + self.Write() + + + def Write(self, writer=gyp.common.WriteOnDiff): + """Writes the solution file to disk. + + Raises: + IndexError: An entry appears multiple times. + """ + # Walk the entry tree and collect all the folders and projects. + all_entries = set() + entries_to_check = self.entries[:] + while entries_to_check: + e = entries_to_check.pop(0) + + # If this entry has been visited, nothing to do. + if e in all_entries: + continue + + all_entries.add(e) + + # If this is a folder, check its entries too. + if isinstance(e, MSVSFolder): + entries_to_check += e.entries + + all_entries = sorted(all_entries) + + # Open file and print header + f = writer(self.path) + f.write('Microsoft Visual Studio Solution File, ' + 'Format Version %s\r\n' % self.version.SolutionVersion()) + f.write('# %s\r\n' % self.version.Description()) + + # Project entries + sln_root = os.path.split(self.path)[0] + for e in all_entries: + relative_path = gyp.common.RelativePath(e.path, sln_root) + # msbuild does not accept an empty folder_name. + # use '.' in case relative_path is empty. + folder_name = relative_path.replace('/', '\\') or '.' + f.write('Project("%s") = "%s", "%s", "%s"\r\n' % ( + e.entry_type_guid, # Entry type GUID + e.name, # Folder name + folder_name, # Folder name (again) + e.get_guid(), # Entry GUID + )) + + # TODO(rspangler): Need a way to configure this stuff + if self.websiteProperties: + f.write('\tProjectSection(WebsiteProperties) = preProject\r\n' + '\t\tDebug.AspNetCompiler.Debug = "True"\r\n' + '\t\tRelease.AspNetCompiler.Debug = "False"\r\n' + '\tEndProjectSection\r\n') + + if isinstance(e, MSVSFolder): + if e.items: + f.write('\tProjectSection(SolutionItems) = preProject\r\n') + for i in e.items: + f.write('\t\t%s = %s\r\n' % (i, i)) + f.write('\tEndProjectSection\r\n') + + if isinstance(e, MSVSProject): + if e.dependencies: + f.write('\tProjectSection(ProjectDependencies) = postProject\r\n') + for d in e.dependencies: + f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid())) + f.write('\tEndProjectSection\r\n') + + f.write('EndProject\r\n') + + # Global section + f.write('Global\r\n') + + # Configurations (variants) + f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n') + for v in self.variants: + f.write('\t\t%s = %s\r\n' % (v, v)) + f.write('\tEndGlobalSection\r\n') + + # Sort config guids for easier diffing of solution changes. + config_guids = [] + config_guids_overrides = {} + for e in all_entries: + if isinstance(e, MSVSProject): + config_guids.append(e.get_guid()) + config_guids_overrides[e.get_guid()] = e.config_platform_overrides + config_guids.sort() + + f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n') + for g in config_guids: + for v in self.variants: + nv = config_guids_overrides[g].get(v, v) + # Pick which project configuration to build for this solution + # configuration. + f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % ( + g, # Project GUID + v, # Solution build configuration + nv, # Project build config for that solution config + )) + + # Enable project in this solution configuration. + f.write('\t\t%s.%s.Build.0 = %s\r\n' % ( + g, # Project GUID + v, # Solution build configuration + nv, # Project build config for that solution config + )) + f.write('\tEndGlobalSection\r\n') + + # TODO(rspangler): Should be able to configure this stuff too (though I've + # never seen this be any different) + f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n') + f.write('\t\tHideSolutionNode = FALSE\r\n') + f.write('\tEndGlobalSection\r\n') + + # Folder mappings + # Omit this section if there are no folders + if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]): + f.write('\tGlobalSection(NestedProjects) = preSolution\r\n') + for e in all_entries: + if not isinstance(e, MSVSFolder): + continue # Does not apply to projects, only folders + for subentry in e.entries: + f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid())) + f.write('\tEndGlobalSection\r\n') + + f.write('EndGlobal\r\n') + + f.close() diff --git a/deps/libuv/build/gyp/pylib/gyp/MSVSProject.py b/deps/libuv/build/gyp/pylib/gyp/MSVSProject.py new file mode 100644 index 00000000..db1ceede --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/MSVSProject.py @@ -0,0 +1,208 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Visual Studio project reader/writer.""" + +import gyp.common +import gyp.easy_xml as easy_xml + +#------------------------------------------------------------------------------ + + +class Tool(object): + """Visual Studio tool.""" + + def __init__(self, name, attrs=None): + """Initializes the tool. + + Args: + name: Tool name. + attrs: Dict of tool attributes; may be None. + """ + self._attrs = attrs or {} + self._attrs['Name'] = name + + def _GetSpecification(self): + """Creates an element for the tool. + + Returns: + A new xml.dom.Element for the tool. + """ + return ['Tool', self._attrs] + +class Filter(object): + """Visual Studio filter - that is, a virtual folder.""" + + def __init__(self, name, contents=None): + """Initializes the folder. + + Args: + name: Filter (folder) name. + contents: List of filenames and/or Filter objects contained. + """ + self.name = name + self.contents = list(contents or []) + + +#------------------------------------------------------------------------------ + + +class Writer(object): + """Visual Studio XML project writer.""" + + def __init__(self, project_path, version, name, guid=None, platforms=None): + """Initializes the project. + + Args: + project_path: Path to the project file. + version: Format version to emit. + name: Name of the project. + guid: GUID to use for project, if not None. + platforms: Array of string, the supported platforms. If null, ['Win32'] + """ + self.project_path = project_path + self.version = version + self.name = name + self.guid = guid + + # Default to Win32 for platforms. + if not platforms: + platforms = ['Win32'] + + # Initialize the specifications of the various sections. + self.platform_section = ['Platforms'] + for platform in platforms: + self.platform_section.append(['Platform', {'Name': platform}]) + self.tool_files_section = ['ToolFiles'] + self.configurations_section = ['Configurations'] + self.files_section = ['Files'] + + # Keep a dict keyed on filename to speed up access. + self.files_dict = dict() + + def AddToolFile(self, path): + """Adds a tool file to the project. + + Args: + path: Relative path from project to tool file. + """ + self.tool_files_section.append(['ToolFile', {'RelativePath': path}]) + + def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools): + """Returns the specification for a configuration. + + Args: + config_type: Type of configuration node. + config_name: Configuration name. + attrs: Dict of configuration attributes; may be None. + tools: List of tools (strings or Tool objects); may be None. + Returns: + """ + # Handle defaults + if not attrs: + attrs = {} + if not tools: + tools = [] + + # Add configuration node and its attributes + node_attrs = attrs.copy() + node_attrs['Name'] = config_name + specification = [config_type, node_attrs] + + # Add tool nodes and their attributes + if tools: + for t in tools: + if isinstance(t, Tool): + specification.append(t._GetSpecification()) + else: + specification.append(Tool(t)._GetSpecification()) + return specification + + + def AddConfig(self, name, attrs=None, tools=None): + """Adds a configuration to the project. + + Args: + name: Configuration name. + attrs: Dict of configuration attributes; may be None. + tools: List of tools (strings or Tool objects); may be None. + """ + spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools) + self.configurations_section.append(spec) + + def _AddFilesToNode(self, parent, files): + """Adds files and/or filters to the parent node. + + Args: + parent: Destination node + files: A list of Filter objects and/or relative paths to files. + + Will call itself recursively, if the files list contains Filter objects. + """ + for f in files: + if isinstance(f, Filter): + node = ['Filter', {'Name': f.name}] + self._AddFilesToNode(node, f.contents) + else: + node = ['File', {'RelativePath': f}] + self.files_dict[f] = node + parent.append(node) + + def AddFiles(self, files): + """Adds files to the project. + + Args: + files: A list of Filter objects and/or relative paths to files. + + This makes a copy of the file/filter tree at the time of this call. If you + later add files to a Filter object which was passed into a previous call + to AddFiles(), it will not be reflected in this project. + """ + self._AddFilesToNode(self.files_section, files) + # TODO(rspangler) This also doesn't handle adding files to an existing + # filter. That is, it doesn't merge the trees. + + def AddFileConfig(self, path, config, attrs=None, tools=None): + """Adds a configuration to a file. + + Args: + path: Relative path to the file. + config: Name of configuration to add. + attrs: Dict of configuration attributes; may be None. + tools: List of tools (strings or Tool objects); may be None. + + Raises: + ValueError: Relative path does not match any file added via AddFiles(). + """ + # Find the file node with the right relative path + parent = self.files_dict.get(path) + if not parent: + raise ValueError('AddFileConfig: file "%s" not in project.' % path) + + # Add the config to the file node + spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs, + tools) + parent.append(spec) + + def WriteIfChanged(self): + """Writes the project file.""" + # First create XML content definition + content = [ + 'VisualStudioProject', + {'ProjectType': 'Visual C++', + 'Version': self.version.ProjectVersion(), + 'Name': self.name, + 'ProjectGUID': self.guid, + 'RootNamespace': self.name, + 'Keyword': 'Win32Proj' + }, + self.platform_section, + self.tool_files_section, + self.configurations_section, + ['References'], # empty section + self.files_section, + ['Globals'] # empty section + ] + easy_xml.WriteXmlIfChanged(content, self.project_path, + encoding="Windows-1252") diff --git a/deps/libuv/build/gyp/pylib/gyp/MSVSSettings.py b/deps/libuv/build/gyp/pylib/gyp/MSVSSettings.py new file mode 100644 index 00000000..8ae19180 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/MSVSSettings.py @@ -0,0 +1,1097 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +r"""Code to validate and convert settings of the Microsoft build tools. + +This file contains code to validate and convert settings of the Microsoft +build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(), +and ValidateMSBuildSettings() are the entry points. + +This file was created by comparing the projects created by Visual Studio 2008 +and Visual Studio 2010 for all available settings through the user interface. +The MSBuild schemas were also considered. They are typically found in the +MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild +""" + +import sys +import re + +# Dictionaries of settings validators. The key is the tool name, the value is +# a dictionary mapping setting names to validation functions. +_msvs_validators = {} +_msbuild_validators = {} + + +# A dictionary of settings converters. The key is the tool name, the value is +# a dictionary mapping setting names to conversion functions. +_msvs_to_msbuild_converters = {} + + +# Tool name mapping from MSVS to MSBuild. +_msbuild_name_of_tool = {} + + +class _Tool(object): + """Represents a tool used by MSVS or MSBuild. + + Attributes: + msvs_name: The name of the tool in MSVS. + msbuild_name: The name of the tool in MSBuild. + """ + + def __init__(self, msvs_name, msbuild_name): + self.msvs_name = msvs_name + self.msbuild_name = msbuild_name + + +def _AddTool(tool): + """Adds a tool to the four dictionaries used to process settings. + + This only defines the tool. Each setting also needs to be added. + + Args: + tool: The _Tool object to be added. + """ + _msvs_validators[tool.msvs_name] = {} + _msbuild_validators[tool.msbuild_name] = {} + _msvs_to_msbuild_converters[tool.msvs_name] = {} + _msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name + + +def _GetMSBuildToolSettings(msbuild_settings, tool): + """Returns an MSBuild tool dictionary. Creates it if needed.""" + return msbuild_settings.setdefault(tool.msbuild_name, {}) + + +class _Type(object): + """Type of settings (Base class).""" + + def ValidateMSVS(self, value): + """Verifies that the value is legal for MSVS. + + Args: + value: the value to check for this type. + + Raises: + ValueError if value is not valid for MSVS. + """ + + def ValidateMSBuild(self, value): + """Verifies that the value is legal for MSBuild. + + Args: + value: the value to check for this type. + + Raises: + ValueError if value is not valid for MSBuild. + """ + + def ConvertToMSBuild(self, value): + """Returns the MSBuild equivalent of the MSVS value given. + + Args: + value: the MSVS value to convert. + + Returns: + the MSBuild equivalent. + + Raises: + ValueError if value is not valid. + """ + return value + + +class _String(_Type): + """A setting that's just a string.""" + + def ValidateMSVS(self, value): + if not isinstance(value, basestring): + raise ValueError('expected string; got %r' % value) + + def ValidateMSBuild(self, value): + if not isinstance(value, basestring): + raise ValueError('expected string; got %r' % value) + + def ConvertToMSBuild(self, value): + # Convert the macros + return ConvertVCMacrosToMSBuild(value) + + +class _StringList(_Type): + """A settings that's a list of strings.""" + + def ValidateMSVS(self, value): + if not isinstance(value, basestring) and not isinstance(value, list): + raise ValueError('expected string list; got %r' % value) + + def ValidateMSBuild(self, value): + if not isinstance(value, basestring) and not isinstance(value, list): + raise ValueError('expected string list; got %r' % value) + + def ConvertToMSBuild(self, value): + # Convert the macros + if isinstance(value, list): + return [ConvertVCMacrosToMSBuild(i) for i in value] + else: + return ConvertVCMacrosToMSBuild(value) + + +class _Boolean(_Type): + """Boolean settings, can have the values 'false' or 'true'.""" + + def _Validate(self, value): + if value != 'true' and value != 'false': + raise ValueError('expected bool; got %r' % value) + + def ValidateMSVS(self, value): + self._Validate(value) + + def ValidateMSBuild(self, value): + self._Validate(value) + + def ConvertToMSBuild(self, value): + self._Validate(value) + return value + + +class _Integer(_Type): + """Integer settings.""" + + def __init__(self, msbuild_base=10): + _Type.__init__(self) + self._msbuild_base = msbuild_base + + def ValidateMSVS(self, value): + # Try to convert, this will raise ValueError if invalid. + self.ConvertToMSBuild(value) + + def ValidateMSBuild(self, value): + # Try to convert, this will raise ValueError if invalid. + int(value, self._msbuild_base) + + def ConvertToMSBuild(self, value): + msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x' + return msbuild_format % int(value) + + +class _Enumeration(_Type): + """Type of settings that is an enumeration. + + In MSVS, the values are indexes like '0', '1', and '2'. + MSBuild uses text labels that are more representative, like 'Win32'. + + Constructor args: + label_list: an array of MSBuild labels that correspond to the MSVS index. + In the rare cases where MSVS has skipped an index value, None is + used in the array to indicate the unused spot. + new: an array of labels that are new to MSBuild. + """ + + def __init__(self, label_list, new=None): + _Type.__init__(self) + self._label_list = label_list + self._msbuild_values = set(value for value in label_list + if value is not None) + if new is not None: + self._msbuild_values.update(new) + + def ValidateMSVS(self, value): + # Try to convert. It will raise an exception if not valid. + self.ConvertToMSBuild(value) + + def ValidateMSBuild(self, value): + if value not in self._msbuild_values: + raise ValueError('unrecognized enumerated value %s' % value) + + def ConvertToMSBuild(self, value): + index = int(value) + if index < 0 or index >= len(self._label_list): + raise ValueError('index value (%d) not in expected range [0, %d)' % + (index, len(self._label_list))) + label = self._label_list[index] + if label is None: + raise ValueError('converted value for %s not specified.' % value) + return label + + +# Instantiate the various generic types. +_boolean = _Boolean() +_integer = _Integer() +# For now, we don't do any special validation on these types: +_string = _String() +_file_name = _String() +_folder_name = _String() +_file_list = _StringList() +_folder_list = _StringList() +_string_list = _StringList() +# Some boolean settings went from numerical values to boolean. The +# mapping is 0: default, 1: false, 2: true. +_newly_boolean = _Enumeration(['', 'false', 'true']) + + +def _Same(tool, name, setting_type): + """Defines a setting that has the same name in MSVS and MSBuild. + + Args: + tool: a dictionary that gives the names of the tool for MSVS and MSBuild. + name: the name of the setting. + setting_type: the type of this setting. + """ + _Renamed(tool, name, name, setting_type) + + +def _Renamed(tool, msvs_name, msbuild_name, setting_type): + """Defines a setting for which the name has changed. + + Args: + tool: a dictionary that gives the names of the tool for MSVS and MSBuild. + msvs_name: the name of the MSVS setting. + msbuild_name: the name of the MSBuild setting. + setting_type: the type of this setting. + """ + + def _Translate(value, msbuild_settings): + msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool) + msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value) + + _msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS + _msbuild_validators[tool.msbuild_name][msbuild_name] = ( + setting_type.ValidateMSBuild) + _msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate + + +def _Moved(tool, settings_name, msbuild_tool_name, setting_type): + _MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name, + setting_type) + + +def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name, + msbuild_settings_name, setting_type): + """Defines a setting that may have moved to a new section. + + Args: + tool: a dictionary that gives the names of the tool for MSVS and MSBuild. + msvs_settings_name: the MSVS name of the setting. + msbuild_tool_name: the name of the MSBuild tool to place the setting under. + msbuild_settings_name: the MSBuild name of the setting. + setting_type: the type of this setting. + """ + + def _Translate(value, msbuild_settings): + tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {}) + tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value) + + _msvs_validators[tool.msvs_name][msvs_settings_name] = ( + setting_type.ValidateMSVS) + validator = setting_type.ValidateMSBuild + _msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator + _msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate + + +def _MSVSOnly(tool, name, setting_type): + """Defines a setting that is only found in MSVS. + + Args: + tool: a dictionary that gives the names of the tool for MSVS and MSBuild. + name: the name of the setting. + setting_type: the type of this setting. + """ + + def _Translate(unused_value, unused_msbuild_settings): + # Since this is for MSVS only settings, no translation will happen. + pass + + _msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS + _msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate + + +def _MSBuildOnly(tool, name, setting_type): + """Defines a setting that is only found in MSBuild. + + Args: + tool: a dictionary that gives the names of the tool for MSVS and MSBuild. + name: the name of the setting. + setting_type: the type of this setting. + """ + + def _Translate(value, msbuild_settings): + # Let msbuild-only properties get translated as-is from msvs_settings. + tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {}) + tool_settings[name] = value + + _msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild + _msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate + + +def _ConvertedToAdditionalOption(tool, msvs_name, flag): + """Defines a setting that's handled via a command line option in MSBuild. + + Args: + tool: a dictionary that gives the names of the tool for MSVS and MSBuild. + msvs_name: the name of the MSVS setting that if 'true' becomes a flag + flag: the flag to insert at the end of the AdditionalOptions + """ + + def _Translate(value, msbuild_settings): + if value == 'true': + tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool) + if 'AdditionalOptions' in tool_settings: + new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag) + else: + new_flags = flag + tool_settings['AdditionalOptions'] = new_flags + _msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS + _msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate + + +def _CustomGeneratePreprocessedFile(tool, msvs_name): + def _Translate(value, msbuild_settings): + tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool) + if value == '0': + tool_settings['PreprocessToFile'] = 'false' + tool_settings['PreprocessSuppressLineNumbers'] = 'false' + elif value == '1': # /P + tool_settings['PreprocessToFile'] = 'true' + tool_settings['PreprocessSuppressLineNumbers'] = 'false' + elif value == '2': # /EP /P + tool_settings['PreprocessToFile'] = 'true' + tool_settings['PreprocessSuppressLineNumbers'] = 'true' + else: + raise ValueError('value must be one of [0, 1, 2]; got %s' % value) + # Create a bogus validator that looks for '0', '1', or '2' + msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS + _msvs_validators[tool.msvs_name][msvs_name] = msvs_validator + msbuild_validator = _boolean.ValidateMSBuild + msbuild_tool_validators = _msbuild_validators[tool.msbuild_name] + msbuild_tool_validators['PreprocessToFile'] = msbuild_validator + msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator + _msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate + + +fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir') +fix_vc_macro_slashes_regex = re.compile( + r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list) +) + +# Regular expression to detect keys that were generated by exclusion lists +_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$') + + +def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr): + """Verify that 'setting' is valid if it is generated from an exclusion list. + + If the setting appears to be generated from an exclusion list, the root name + is checked. + + Args: + setting: A string that is the setting name to validate + settings: A dictionary where the keys are valid settings + error_msg: The message to emit in the event of error + stderr: The stream receiving the error messages. + """ + # This may be unrecognized because it's an exclusion list. If the + # setting name has the _excluded suffix, then check the root name. + unrecognized = True + m = re.match(_EXCLUDED_SUFFIX_RE, setting) + if m: + root_setting = m.group(1) + unrecognized = root_setting not in settings + + if unrecognized: + # We don't know this setting. Give a warning. + print >> stderr, error_msg + + +def FixVCMacroSlashes(s): + """Replace macros which have excessive following slashes. + + These macros are known to have a built-in trailing slash. Furthermore, many + scripts hiccup on processing paths with extra slashes in the middle. + + This list is probably not exhaustive. Add as needed. + """ + if '$' in s: + s = fix_vc_macro_slashes_regex.sub(r'\1', s) + return s + + +def ConvertVCMacrosToMSBuild(s): + """Convert the the MSVS macros found in the string to the MSBuild equivalent. + + This list is probably not exhaustive. Add as needed. + """ + if '$' in s: + replace_map = { + '$(ConfigurationName)': '$(Configuration)', + '$(InputDir)': '%(RelativeDir)', + '$(InputExt)': '%(Extension)', + '$(InputFileName)': '%(Filename)%(Extension)', + '$(InputName)': '%(Filename)', + '$(InputPath)': '%(Identity)', + '$(ParentName)': '$(ProjectFileName)', + '$(PlatformName)': '$(Platform)', + '$(SafeInputName)': '%(Filename)', + } + for old, new in replace_map.iteritems(): + s = s.replace(old, new) + s = FixVCMacroSlashes(s) + return s + + +def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr): + """Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+). + + Args: + msvs_settings: A dictionary. The key is the tool name. The values are + themselves dictionaries of settings and their values. + stderr: The stream receiving the error messages. + + Returns: + A dictionary of MSBuild settings. The key is either the MSBuild tool name + or the empty string (for the global settings). The values are themselves + dictionaries of settings and their values. + """ + msbuild_settings = {} + for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems(): + if msvs_tool_name in _msvs_to_msbuild_converters: + msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name] + for msvs_setting, msvs_value in msvs_tool_settings.iteritems(): + if msvs_setting in msvs_tool: + # Invoke the translation function. + try: + msvs_tool[msvs_setting](msvs_value, msbuild_settings) + except ValueError, e: + print >> stderr, ('Warning: while converting %s/%s to MSBuild, ' + '%s' % (msvs_tool_name, msvs_setting, e)) + else: + _ValidateExclusionSetting(msvs_setting, + msvs_tool, + ('Warning: unrecognized setting %s/%s ' + 'while converting to MSBuild.' % + (msvs_tool_name, msvs_setting)), + stderr) + else: + print >> stderr, ('Warning: unrecognized tool %s while converting to ' + 'MSBuild.' % msvs_tool_name) + return msbuild_settings + + +def ValidateMSVSSettings(settings, stderr=sys.stderr): + """Validates that the names of the settings are valid for MSVS. + + Args: + settings: A dictionary. The key is the tool name. The values are + themselves dictionaries of settings and their values. + stderr: The stream receiving the error messages. + """ + _ValidateSettings(_msvs_validators, settings, stderr) + + +def ValidateMSBuildSettings(settings, stderr=sys.stderr): + """Validates that the names of the settings are valid for MSBuild. + + Args: + settings: A dictionary. The key is the tool name. The values are + themselves dictionaries of settings and their values. + stderr: The stream receiving the error messages. + """ + _ValidateSettings(_msbuild_validators, settings, stderr) + + +def _ValidateSettings(validators, settings, stderr): + """Validates that the settings are valid for MSBuild or MSVS. + + We currently only validate the names of the settings, not their values. + + Args: + validators: A dictionary of tools and their validators. + settings: A dictionary. The key is the tool name. The values are + themselves dictionaries of settings and their values. + stderr: The stream receiving the error messages. + """ + for tool_name in settings: + if tool_name in validators: + tool_validators = validators[tool_name] + for setting, value in settings[tool_name].iteritems(): + if setting in tool_validators: + try: + tool_validators[setting](value) + except ValueError, e: + print >> stderr, ('Warning: for %s/%s, %s' % + (tool_name, setting, e)) + else: + _ValidateExclusionSetting(setting, + tool_validators, + ('Warning: unrecognized setting %s/%s' % + (tool_name, setting)), + stderr) + + else: + print >> stderr, ('Warning: unrecognized tool %s' % tool_name) + + +# MSVS and MBuild names of the tools. +_compile = _Tool('VCCLCompilerTool', 'ClCompile') +_link = _Tool('VCLinkerTool', 'Link') +_midl = _Tool('VCMIDLTool', 'Midl') +_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile') +_lib = _Tool('VCLibrarianTool', 'Lib') +_manifest = _Tool('VCManifestTool', 'Manifest') +_masm = _Tool('MASM', 'MASM') + + +_AddTool(_compile) +_AddTool(_link) +_AddTool(_midl) +_AddTool(_rc) +_AddTool(_lib) +_AddTool(_manifest) +_AddTool(_masm) +# Add sections only found in the MSBuild settings. +_msbuild_validators[''] = {} +_msbuild_validators['ProjectReference'] = {} +_msbuild_validators['ManifestResourceCompile'] = {} + +# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and +# ClCompile in MSBuild. +# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for +# the schema of the MSBuild ClCompile settings. + +# Options that have the same name in MSVS and MSBuild +_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I +_Same(_compile, 'AdditionalOptions', _string_list) +_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI +_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa +_Same(_compile, 'BrowseInformationFile', _file_name) +_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS +_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za +_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd +_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT +_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false' +_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx +_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except +_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope +_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI +_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU +_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc +_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X +_Same(_compile, 'MinimalRebuild', _boolean) # /Gm +_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl +_Same(_compile, 'OmitFramePointers', _boolean) # /Oy +_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D +_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd +_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR +_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes +_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc +_Same(_compile, 'StringPooling', _boolean) # /GF +_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo +_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t +_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u +_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U +_Same(_compile, 'UseFullPaths', _boolean) # /FC +_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL +_Same(_compile, 'XMLDocumentationFileName', _file_name) +_Same(_compile, 'CompileAsWinRT', _boolean) # /ZW + +_Same(_compile, 'AssemblerOutput', + _Enumeration(['NoListing', + 'AssemblyCode', # /FA + 'All', # /FAcs + 'AssemblyAndMachineCode', # /FAc + 'AssemblyAndSourceCode'])) # /FAs +_Same(_compile, 'BasicRuntimeChecks', + _Enumeration(['Default', + 'StackFrameRuntimeCheck', # /RTCs + 'UninitializedLocalUsageCheck', # /RTCu + 'EnableFastChecks'])) # /RTC1 +_Same(_compile, 'BrowseInformation', + _Enumeration(['false', + 'true', # /FR + 'true'])) # /Fr +_Same(_compile, 'CallingConvention', + _Enumeration(['Cdecl', # /Gd + 'FastCall', # /Gr + 'StdCall', # /Gz + 'VectorCall'])) # /Gv +_Same(_compile, 'CompileAs', + _Enumeration(['Default', + 'CompileAsC', # /TC + 'CompileAsCpp'])) # /TP +_Same(_compile, 'DebugInformationFormat', + _Enumeration(['', # Disabled + 'OldStyle', # /Z7 + None, + 'ProgramDatabase', # /Zi + 'EditAndContinue'])) # /ZI +_Same(_compile, 'EnableEnhancedInstructionSet', + _Enumeration(['NotSet', + 'StreamingSIMDExtensions', # /arch:SSE + 'StreamingSIMDExtensions2', # /arch:SSE2 + 'AdvancedVectorExtensions', # /arch:AVX (vs2012+) + 'NoExtensions', # /arch:IA32 (vs2012+) + # This one only exists in the new msbuild format. + 'AdvancedVectorExtensions2', # /arch:AVX2 (vs2013r2+) + ])) +_Same(_compile, 'ErrorReporting', + _Enumeration(['None', # /errorReport:none + 'Prompt', # /errorReport:prompt + 'Queue'], # /errorReport:queue + new=['Send'])) # /errorReport:send" +_Same(_compile, 'ExceptionHandling', + _Enumeration(['false', + 'Sync', # /EHsc + 'Async'], # /EHa + new=['SyncCThrow'])) # /EHs +_Same(_compile, 'FavorSizeOrSpeed', + _Enumeration(['Neither', + 'Speed', # /Ot + 'Size'])) # /Os +_Same(_compile, 'FloatingPointModel', + _Enumeration(['Precise', # /fp:precise + 'Strict', # /fp:strict + 'Fast'])) # /fp:fast +_Same(_compile, 'InlineFunctionExpansion', + _Enumeration(['Default', + 'OnlyExplicitInline', # /Ob1 + 'AnySuitable'], # /Ob2 + new=['Disabled'])) # /Ob0 +_Same(_compile, 'Optimization', + _Enumeration(['Disabled', # /Od + 'MinSpace', # /O1 + 'MaxSpeed', # /O2 + 'Full'])) # /Ox +_Same(_compile, 'RuntimeLibrary', + _Enumeration(['MultiThreaded', # /MT + 'MultiThreadedDebug', # /MTd + 'MultiThreadedDLL', # /MD + 'MultiThreadedDebugDLL'])) # /MDd +_Same(_compile, 'StructMemberAlignment', + _Enumeration(['Default', + '1Byte', # /Zp1 + '2Bytes', # /Zp2 + '4Bytes', # /Zp4 + '8Bytes', # /Zp8 + '16Bytes'])) # /Zp16 +_Same(_compile, 'WarningLevel', + _Enumeration(['TurnOffAllWarnings', # /W0 + 'Level1', # /W1 + 'Level2', # /W2 + 'Level3', # /W3 + 'Level4'], # /W4 + new=['EnableAllWarnings'])) # /Wall + +# Options found in MSVS that have been renamed in MSBuild. +_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking', + _boolean) # /Gy +_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions', + _boolean) # /Oi +_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C +_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo +_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp +_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile', + _file_name) # Used with /Yc and /Yu +_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile', + _file_name) # /Fp +_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader', + _Enumeration(['NotUsing', # VS recognized '' for this value too. + 'Create', # /Yc + 'Use'])) # /Yu +_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX + +_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J') + +# MSVS options not found in MSBuild. +_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean) +_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean) + +# MSBuild options not found in MSVS. +_MSBuildOnly(_compile, 'BuildingInIDE', _boolean) +_MSBuildOnly(_compile, 'CompileAsManaged', + _Enumeration([], new=['false', + 'true'])) # /clr +_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch +_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP +_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi +_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors +_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name) +_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we +_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu + +# Defines a setting that needs very customized processing +_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile') + + +# Directives for converting MSVS VCLinkerTool to MSBuild Link. +# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for +# the schema of the MSBuild Link settings. + +# Options that have the same name in MSVS and MSBuild +_Same(_link, 'AdditionalDependencies', _file_list) +_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH +# /MANIFESTDEPENDENCY: +_Same(_link, 'AdditionalManifestDependencies', _file_list) +_Same(_link, 'AdditionalOptions', _string_list) +_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE +_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION +_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE +_Same(_link, 'BaseAddress', _string) # /BASE +_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK +_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD +_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN +_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE +_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC +_Same(_link, 'EntryPointSymbol', _string) # /ENTRY +_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE +_Same(_link, 'FunctionOrder', _file_name) # /ORDER +_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG +_Same(_link, 'GenerateMapFile', _boolean) # /MAP +_Same(_link, 'HeapCommitSize', _string) +_Same(_link, 'HeapReserveSize', _string) # /HEAP +_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB +_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL +_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB +_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER +_Same(_link, 'KeyFile', _file_name) # /KEYFILE +_Same(_link, 'ManifestFile', _file_name) # /ManifestFile +_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS +_Same(_link, 'MapFileName', _file_name) +_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT +_Same(_link, 'MergeSections', _string) # /MERGE +_Same(_link, 'MidlCommandFile', _file_name) # /MIDL +_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF +_Same(_link, 'OutputFile', _file_name) # /OUT +_Same(_link, 'PerUserRedirection', _boolean) +_Same(_link, 'Profile', _boolean) # /PROFILE +_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD +_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB +_Same(_link, 'RegisterOutput', _boolean) +_Same(_link, 'SetChecksum', _boolean) # /RELEASE +_Same(_link, 'StackCommitSize', _string) +_Same(_link, 'StackReserveSize', _string) # /STACK +_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED +_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD +_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO +_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD +_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY +_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT +_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID +_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true' +_Same(_link, 'Version', _string) # /VERSION + +_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF +_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED +_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE +_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF +_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE +_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE + +_subsystem_enumeration = _Enumeration( + ['NotSet', + 'Console', # /SUBSYSTEM:CONSOLE + 'Windows', # /SUBSYSTEM:WINDOWS + 'Native', # /SUBSYSTEM:NATIVE + 'EFI Application', # /SUBSYSTEM:EFI_APPLICATION + 'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER + 'EFI ROM', # /SUBSYSTEM:EFI_ROM + 'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER + 'WindowsCE'], # /SUBSYSTEM:WINDOWSCE + new=['POSIX']) # /SUBSYSTEM:POSIX + +_target_machine_enumeration = _Enumeration( + ['NotSet', + 'MachineX86', # /MACHINE:X86 + None, + 'MachineARM', # /MACHINE:ARM + 'MachineEBC', # /MACHINE:EBC + 'MachineIA64', # /MACHINE:IA64 + None, + 'MachineMIPS', # /MACHINE:MIPS + 'MachineMIPS16', # /MACHINE:MIPS16 + 'MachineMIPSFPU', # /MACHINE:MIPSFPU + 'MachineMIPSFPU16', # /MACHINE:MIPSFPU16 + None, + None, + None, + 'MachineSH4', # /MACHINE:SH4 + None, + 'MachineTHUMB', # /MACHINE:THUMB + 'MachineX64']) # /MACHINE:X64 + +_Same(_link, 'AssemblyDebug', + _Enumeration(['', + 'true', # /ASSEMBLYDEBUG + 'false'])) # /ASSEMBLYDEBUG:DISABLE +_Same(_link, 'CLRImageType', + _Enumeration(['Default', + 'ForceIJWImage', # /CLRIMAGETYPE:IJW + 'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE + 'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE +_Same(_link, 'CLRThreadAttribute', + _Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE + 'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA + 'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA +_Same(_link, 'DataExecutionPrevention', + _Enumeration(['', + 'false', # /NXCOMPAT:NO + 'true'])) # /NXCOMPAT +_Same(_link, 'Driver', + _Enumeration(['NotSet', + 'Driver', # /Driver + 'UpOnly', # /DRIVER:UPONLY + 'WDM'])) # /DRIVER:WDM +_Same(_link, 'LinkTimeCodeGeneration', + _Enumeration(['Default', + 'UseLinkTimeCodeGeneration', # /LTCG + 'PGInstrument', # /LTCG:PGInstrument + 'PGOptimization', # /LTCG:PGOptimize + 'PGUpdate'])) # /LTCG:PGUpdate +_Same(_link, 'ShowProgress', + _Enumeration(['NotSet', + 'LinkVerbose', # /VERBOSE + 'LinkVerboseLib'], # /VERBOSE:Lib + new=['LinkVerboseICF', # /VERBOSE:ICF + 'LinkVerboseREF', # /VERBOSE:REF + 'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH + 'LinkVerboseCLR'])) # /VERBOSE:CLR +_Same(_link, 'SubSystem', _subsystem_enumeration) +_Same(_link, 'TargetMachine', _target_machine_enumeration) +_Same(_link, 'UACExecutionLevel', + _Enumeration(['AsInvoker', # /level='asInvoker' + 'HighestAvailable', # /level='highestAvailable' + 'RequireAdministrator'])) # /level='requireAdministrator' +_Same(_link, 'MinimumRequiredVersion', _string) +_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX + + +# Options found in MSVS that have been renamed in MSBuild. +_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting', + _Enumeration(['NoErrorReport', # /ERRORREPORT:NONE + 'PromptImmediately', # /ERRORREPORT:PROMPT + 'QueueForNextLogin'], # /ERRORREPORT:QUEUE + new=['SendErrorReport'])) # /ERRORREPORT:SEND +_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries', + _file_list) # /NODEFAULTLIB +_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY +_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET + +_Moved(_link, 'GenerateManifest', '', _boolean) +_Moved(_link, 'IgnoreImportLibrary', '', _boolean) +_Moved(_link, 'LinkIncremental', '', _newly_boolean) +_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean) +_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean) + +# MSVS options not found in MSBuild. +_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean) +_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean) + +# MSBuild options not found in MSVS. +_MSBuildOnly(_link, 'BuildingInIDE', _boolean) +_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH +_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false' +_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS +_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND +_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND +_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name) +_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false' +_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN +_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION +_MSBuildOnly(_link, 'ForceFileOutput', + _Enumeration([], new=['Enabled', # /FORCE + # /FORCE:MULTIPLE + 'MultiplyDefinedSymbolOnly', + 'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED +_MSBuildOnly(_link, 'CreateHotPatchableImage', + _Enumeration([], new=['Enabled', # /FUNCTIONPADMIN + 'X86Image', # /FUNCTIONPADMIN:5 + 'X64Image', # /FUNCTIONPADMIN:6 + 'ItaniumImage'])) # /FUNCTIONPADMIN:16 +_MSBuildOnly(_link, 'CLRSupportLastError', + _Enumeration([], new=['Enabled', # /CLRSupportLastError + 'Disabled', # /CLRSupportLastError:NO + # /CLRSupportLastError:SYSTEMDLL + 'SystemDlls'])) + + +# Directives for converting VCResourceCompilerTool to ResourceCompile. +# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for +# the schema of the MSBuild ResourceCompile settings. + +_Same(_rc, 'AdditionalOptions', _string_list) +_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I +_Same(_rc, 'Culture', _Integer(msbuild_base=16)) +_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X +_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D +_Same(_rc, 'ResourceOutputFileName', _string) # /fo +_Same(_rc, 'ShowProgress', _boolean) # /v +# There is no UI in VisualStudio 2008 to set the following properties. +# However they are found in CL and other tools. Include them here for +# completeness, as they are very likely to have the same usage pattern. +_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo +_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u + +# MSBuild options not found in MSVS. +_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n +_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name) + + +# Directives for converting VCMIDLTool to Midl. +# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for +# the schema of the MSBuild Midl settings. + +_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I +_Same(_midl, 'AdditionalOptions', _string_list) +_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt +_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation +_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check +_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum +_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref +_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data +_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf +_Same(_midl, 'GenerateTypeLibrary', _boolean) +_Same(_midl, 'HeaderFileName', _file_name) # /h +_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir +_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid +_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203 +_Same(_midl, 'OutputDirectory', _string) # /out +_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D +_Same(_midl, 'ProxyFileName', _file_name) # /proxy +_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o +_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo +_Same(_midl, 'TypeLibraryName', _file_name) # /tlb +_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U +_Same(_midl, 'WarnAsError', _boolean) # /WX + +_Same(_midl, 'DefaultCharType', + _Enumeration(['Unsigned', # /char unsigned + 'Signed', # /char signed + 'Ascii'])) # /char ascii7 +_Same(_midl, 'TargetEnvironment', + _Enumeration(['NotSet', + 'Win32', # /env win32 + 'Itanium', # /env ia64 + 'X64'])) # /env x64 +_Same(_midl, 'EnableErrorChecks', + _Enumeration(['EnableCustom', + 'None', # /error none + 'All'])) # /error all +_Same(_midl, 'StructMemberAlignment', + _Enumeration(['NotSet', + '1', # Zp1 + '2', # Zp2 + '4', # Zp4 + '8'])) # Zp8 +_Same(_midl, 'WarningLevel', + _Enumeration(['0', # /W0 + '1', # /W1 + '2', # /W2 + '3', # /W3 + '4'])) # /W4 + +_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata +_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters', + _boolean) # /robust + +# MSBuild options not found in MSVS. +_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config +_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub +_MSBuildOnly(_midl, 'GenerateClientFiles', + _Enumeration([], new=['Stub', # /client stub + 'None'])) # /client none +_MSBuildOnly(_midl, 'GenerateServerFiles', + _Enumeration([], new=['Stub', # /client stub + 'None'])) # /client none +_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL +_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub +_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn +_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name) +_MSBuildOnly(_midl, 'TypeLibFormat', + _Enumeration([], new=['NewFormat', # /newtlb + 'OldFormat'])) # /oldtlb + + +# Directives for converting VCLibrarianTool to Lib. +# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for +# the schema of the MSBuild Lib settings. + +_Same(_lib, 'AdditionalDependencies', _file_list) +_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH +_Same(_lib, 'AdditionalOptions', _string_list) +_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT +_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE +_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB +_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB +_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF +_Same(_lib, 'OutputFile', _file_name) # /OUT +_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO +_Same(_lib, 'UseUnicodeResponseFiles', _boolean) +_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG +_Same(_lib, 'TargetMachine', _target_machine_enumeration) + +# TODO(jeanluc) _link defines the same value that gets moved to +# ProjectReference. We may want to validate that they are consistent. +_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean) + +_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false' +_MSBuildOnly(_lib, 'ErrorReporting', + _Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT + 'QueueForNextLogin', # /ERRORREPORT:QUEUE + 'SendErrorReport', # /ERRORREPORT:SEND + 'NoErrorReport'])) # /ERRORREPORT:NONE +_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string) +_MSBuildOnly(_lib, 'Name', _file_name) # /NAME +_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE +_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration) +_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name) +_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX +_MSBuildOnly(_lib, 'Verbose', _boolean) + + +# Directives for converting VCManifestTool to Mt. +# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for +# the schema of the MSBuild Lib settings. + +# Options that have the same name in MSVS and MSBuild +_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest +_Same(_manifest, 'AdditionalOptions', _string_list) +_Same(_manifest, 'AssemblyIdentity', _string) # /identity: +_Same(_manifest, 'ComponentFileName', _file_name) # /dll +_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs +_Same(_manifest, 'InputResourceManifests', _string) # /inputresource +_Same(_manifest, 'OutputManifestFile', _file_name) # /out +_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs +_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements +_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo +_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb: +_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate +_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name) +_Same(_manifest, 'VerboseOutput', _boolean) # /verbose + +# Options that have moved location. +_MovedAndRenamed(_manifest, 'ManifestResourceFile', + 'ManifestResourceCompile', + 'ResourceOutputFileName', + _file_name) +_Moved(_manifest, 'EmbedManifest', '', _boolean) + +# MSVS options not found in MSBuild. +_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name) +_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean) +_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean) + +# MSBuild options not found in MSVS. +_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean) +_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category +_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly', + _file_name) # /managedassemblyname +_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource +_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency +_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name) + + +# Directives for MASM. +# See "$(VCTargetsPath)\BuildCustomizations\masm.xml" for the schema of the +# MSBuild MASM settings. + +# Options that have the same name in MSVS and MSBuild. +_Same(_masm, 'UseSafeExceptionHandlers', _boolean) # /safeseh diff --git a/deps/libuv/build/gyp/pylib/gyp/MSVSSettings_test.py b/deps/libuv/build/gyp/pylib/gyp/MSVSSettings_test.py new file mode 100755 index 00000000..bf6ea6b8 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/MSVSSettings_test.py @@ -0,0 +1,1483 @@ +#!/usr/bin/env python + +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Unit tests for the MSVSSettings.py file.""" + +import StringIO +import unittest +import gyp.MSVSSettings as MSVSSettings + + +class TestSequenceFunctions(unittest.TestCase): + + def setUp(self): + self.stderr = StringIO.StringIO() + + def _ExpectedWarnings(self, expected): + """Compares recorded lines to expected warnings.""" + self.stderr.seek(0) + actual = self.stderr.read().split('\n') + actual = [line for line in actual if line] + self.assertEqual(sorted(expected), sorted(actual)) + + def testValidateMSVSSettings_tool_names(self): + """Tests that only MSVS tool names are allowed.""" + MSVSSettings.ValidateMSVSSettings( + {'VCCLCompilerTool': {}, + 'VCLinkerTool': {}, + 'VCMIDLTool': {}, + 'foo': {}, + 'VCResourceCompilerTool': {}, + 'VCLibrarianTool': {}, + 'VCManifestTool': {}, + 'ClCompile': {}}, + self.stderr) + self._ExpectedWarnings([ + 'Warning: unrecognized tool foo', + 'Warning: unrecognized tool ClCompile']) + + def testValidateMSVSSettings_settings(self): + """Tests that for invalid MSVS settings.""" + MSVSSettings.ValidateMSVSSettings( + {'VCCLCompilerTool': { + 'AdditionalIncludeDirectories': 'folder1;folder2', + 'AdditionalOptions': ['string1', 'string2'], + 'AdditionalUsingDirectories': 'folder1;folder2', + 'AssemblerListingLocation': 'a_file_name', + 'AssemblerOutput': '0', + 'BasicRuntimeChecks': '5', + 'BrowseInformation': 'fdkslj', + 'BrowseInformationFile': 'a_file_name', + 'BufferSecurityCheck': 'true', + 'CallingConvention': '-1', + 'CompileAs': '1', + 'DebugInformationFormat': '2', + 'DefaultCharIsUnsigned': 'true', + 'Detect64BitPortabilityProblems': 'true', + 'DisableLanguageExtensions': 'true', + 'DisableSpecificWarnings': 'string1;string2', + 'EnableEnhancedInstructionSet': '1', + 'EnableFiberSafeOptimizations': 'true', + 'EnableFunctionLevelLinking': 'true', + 'EnableIntrinsicFunctions': 'true', + 'EnablePREfast': 'true', + 'Enableprefast': 'bogus', + 'ErrorReporting': '1', + 'ExceptionHandling': '1', + 'ExpandAttributedSource': 'true', + 'FavorSizeOrSpeed': '1', + 'FloatingPointExceptions': 'true', + 'FloatingPointModel': '1', + 'ForceConformanceInForLoopScope': 'true', + 'ForcedIncludeFiles': 'file1;file2', + 'ForcedUsingFiles': 'file1;file2', + 'GeneratePreprocessedFile': '1', + 'GenerateXMLDocumentationFiles': 'true', + 'IgnoreStandardIncludePath': 'true', + 'InlineFunctionExpansion': '1', + 'KeepComments': 'true', + 'MinimalRebuild': 'true', + 'ObjectFile': 'a_file_name', + 'OmitDefaultLibName': 'true', + 'OmitFramePointers': 'true', + 'OpenMP': 'true', + 'Optimization': '1', + 'PrecompiledHeaderFile': 'a_file_name', + 'PrecompiledHeaderThrough': 'a_file_name', + 'PreprocessorDefinitions': 'string1;string2', + 'ProgramDataBaseFileName': 'a_file_name', + 'RuntimeLibrary': '1', + 'RuntimeTypeInfo': 'true', + 'ShowIncludes': 'true', + 'SmallerTypeCheck': 'true', + 'StringPooling': 'true', + 'StructMemberAlignment': '1', + 'SuppressStartupBanner': 'true', + 'TreatWChar_tAsBuiltInType': 'true', + 'UndefineAllPreprocessorDefinitions': 'true', + 'UndefinePreprocessorDefinitions': 'string1;string2', + 'UseFullPaths': 'true', + 'UsePrecompiledHeader': '1', + 'UseUnicodeResponseFiles': 'true', + 'WarnAsError': 'true', + 'WarningLevel': '1', + 'WholeProgramOptimization': 'true', + 'XMLDocumentationFileName': 'a_file_name', + 'ZZXYZ': 'bogus'}, + 'VCLinkerTool': { + 'AdditionalDependencies': 'file1;file2', + 'AdditionalDependencies_excluded': 'file3', + 'AdditionalLibraryDirectories': 'folder1;folder2', + 'AdditionalManifestDependencies': 'file1;file2', + 'AdditionalOptions': 'a string1', + 'AddModuleNamesToAssembly': 'file1;file2', + 'AllowIsolation': 'true', + 'AssemblyDebug': '2', + 'AssemblyLinkResource': 'file1;file2', + 'BaseAddress': 'a string1', + 'CLRImageType': '2', + 'CLRThreadAttribute': '2', + 'CLRUnmanagedCodeCheck': 'true', + 'DataExecutionPrevention': '2', + 'DelayLoadDLLs': 'file1;file2', + 'DelaySign': 'true', + 'Driver': '2', + 'EmbedManagedResourceFile': 'file1;file2', + 'EnableCOMDATFolding': '2', + 'EnableUAC': 'true', + 'EntryPointSymbol': 'a string1', + 'ErrorReporting': '2', + 'FixedBaseAddress': '2', + 'ForceSymbolReferences': 'file1;file2', + 'FunctionOrder': 'a_file_name', + 'GenerateDebugInformation': 'true', + 'GenerateManifest': 'true', + 'GenerateMapFile': 'true', + 'HeapCommitSize': 'a string1', + 'HeapReserveSize': 'a string1', + 'IgnoreAllDefaultLibraries': 'true', + 'IgnoreDefaultLibraryNames': 'file1;file2', + 'IgnoreEmbeddedIDL': 'true', + 'IgnoreImportLibrary': 'true', + 'ImportLibrary': 'a_file_name', + 'KeyContainer': 'a_file_name', + 'KeyFile': 'a_file_name', + 'LargeAddressAware': '2', + 'LinkIncremental': '2', + 'LinkLibraryDependencies': 'true', + 'LinkTimeCodeGeneration': '2', + 'ManifestFile': 'a_file_name', + 'MapExports': 'true', + 'MapFileName': 'a_file_name', + 'MergedIDLBaseFileName': 'a_file_name', + 'MergeSections': 'a string1', + 'MidlCommandFile': 'a_file_name', + 'ModuleDefinitionFile': 'a_file_name', + 'OptimizeForWindows98': '1', + 'OptimizeReferences': '2', + 'OutputFile': 'a_file_name', + 'PerUserRedirection': 'true', + 'Profile': 'true', + 'ProfileGuidedDatabase': 'a_file_name', + 'ProgramDatabaseFile': 'a_file_name', + 'RandomizedBaseAddress': '2', + 'RegisterOutput': 'true', + 'ResourceOnlyDLL': 'true', + 'SetChecksum': 'true', + 'ShowProgress': '2', + 'StackCommitSize': 'a string1', + 'StackReserveSize': 'a string1', + 'StripPrivateSymbols': 'a_file_name', + 'SubSystem': '2', + 'SupportUnloadOfDelayLoadedDLL': 'true', + 'SuppressStartupBanner': 'true', + 'SwapRunFromCD': 'true', + 'SwapRunFromNet': 'true', + 'TargetMachine': '2', + 'TerminalServerAware': '2', + 'TurnOffAssemblyGeneration': 'true', + 'TypeLibraryFile': 'a_file_name', + 'TypeLibraryResourceID': '33', + 'UACExecutionLevel': '2', + 'UACUIAccess': 'true', + 'UseLibraryDependencyInputs': 'true', + 'UseUnicodeResponseFiles': 'true', + 'Version': 'a string1'}, + 'VCMIDLTool': { + 'AdditionalIncludeDirectories': 'folder1;folder2', + 'AdditionalOptions': 'a string1', + 'CPreprocessOptions': 'a string1', + 'DefaultCharType': '1', + 'DLLDataFileName': 'a_file_name', + 'EnableErrorChecks': '1', + 'ErrorCheckAllocations': 'true', + 'ErrorCheckBounds': 'true', + 'ErrorCheckEnumRange': 'true', + 'ErrorCheckRefPointers': 'true', + 'ErrorCheckStubData': 'true', + 'GenerateStublessProxies': 'true', + 'GenerateTypeLibrary': 'true', + 'HeaderFileName': 'a_file_name', + 'IgnoreStandardIncludePath': 'true', + 'InterfaceIdentifierFileName': 'a_file_name', + 'MkTypLibCompatible': 'true', + 'notgood': 'bogus', + 'OutputDirectory': 'a string1', + 'PreprocessorDefinitions': 'string1;string2', + 'ProxyFileName': 'a_file_name', + 'RedirectOutputAndErrors': 'a_file_name', + 'StructMemberAlignment': '1', + 'SuppressStartupBanner': 'true', + 'TargetEnvironment': '1', + 'TypeLibraryName': 'a_file_name', + 'UndefinePreprocessorDefinitions': 'string1;string2', + 'ValidateParameters': 'true', + 'WarnAsError': 'true', + 'WarningLevel': '1'}, + 'VCResourceCompilerTool': { + 'AdditionalOptions': 'a string1', + 'AdditionalIncludeDirectories': 'folder1;folder2', + 'Culture': '1003', + 'IgnoreStandardIncludePath': 'true', + 'notgood2': 'bogus', + 'PreprocessorDefinitions': 'string1;string2', + 'ResourceOutputFileName': 'a string1', + 'ShowProgress': 'true', + 'SuppressStartupBanner': 'true', + 'UndefinePreprocessorDefinitions': 'string1;string2'}, + 'VCLibrarianTool': { + 'AdditionalDependencies': 'file1;file2', + 'AdditionalLibraryDirectories': 'folder1;folder2', + 'AdditionalOptions': 'a string1', + 'ExportNamedFunctions': 'string1;string2', + 'ForceSymbolReferences': 'a string1', + 'IgnoreAllDefaultLibraries': 'true', + 'IgnoreSpecificDefaultLibraries': 'file1;file2', + 'LinkLibraryDependencies': 'true', + 'ModuleDefinitionFile': 'a_file_name', + 'OutputFile': 'a_file_name', + 'SuppressStartupBanner': 'true', + 'UseUnicodeResponseFiles': 'true'}, + 'VCManifestTool': { + 'AdditionalManifestFiles': 'file1;file2', + 'AdditionalOptions': 'a string1', + 'AssemblyIdentity': 'a string1', + 'ComponentFileName': 'a_file_name', + 'DependencyInformationFile': 'a_file_name', + 'GenerateCatalogFiles': 'true', + 'InputResourceManifests': 'a string1', + 'ManifestResourceFile': 'a_file_name', + 'OutputManifestFile': 'a_file_name', + 'RegistrarScriptFile': 'a_file_name', + 'ReplacementsFile': 'a_file_name', + 'SuppressStartupBanner': 'true', + 'TypeLibraryFile': 'a_file_name', + 'UpdateFileHashes': 'truel', + 'UpdateFileHashesSearchPath': 'a_file_name', + 'UseFAT32Workaround': 'true', + 'UseUnicodeResponseFiles': 'true', + 'VerboseOutput': 'true'}}, + self.stderr) + self._ExpectedWarnings([ + 'Warning: for VCCLCompilerTool/BasicRuntimeChecks, ' + 'index value (5) not in expected range [0, 4)', + 'Warning: for VCCLCompilerTool/BrowseInformation, ' + "invalid literal for int() with base 10: 'fdkslj'", + 'Warning: for VCCLCompilerTool/CallingConvention, ' + 'index value (-1) not in expected range [0, 4)', + 'Warning: for VCCLCompilerTool/DebugInformationFormat, ' + 'converted value for 2 not specified.', + 'Warning: unrecognized setting VCCLCompilerTool/Enableprefast', + 'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ', + 'Warning: for VCLinkerTool/TargetMachine, ' + 'converted value for 2 not specified.', + 'Warning: unrecognized setting VCMIDLTool/notgood', + 'Warning: unrecognized setting VCResourceCompilerTool/notgood2', + 'Warning: for VCManifestTool/UpdateFileHashes, ' + "expected bool; got 'truel'" + '']) + + def testValidateMSBuildSettings_settings(self): + """Tests that for invalid MSBuild settings.""" + MSVSSettings.ValidateMSBuildSettings( + {'ClCompile': { + 'AdditionalIncludeDirectories': 'folder1;folder2', + 'AdditionalOptions': ['string1', 'string2'], + 'AdditionalUsingDirectories': 'folder1;folder2', + 'AssemblerListingLocation': 'a_file_name', + 'AssemblerOutput': 'NoListing', + 'BasicRuntimeChecks': 'StackFrameRuntimeCheck', + 'BrowseInformation': 'false', + 'BrowseInformationFile': 'a_file_name', + 'BufferSecurityCheck': 'true', + 'BuildingInIDE': 'true', + 'CallingConvention': 'Cdecl', + 'CompileAs': 'CompileAsC', + 'CompileAsManaged': 'true', + 'CreateHotpatchableImage': 'true', + 'DebugInformationFormat': 'ProgramDatabase', + 'DisableLanguageExtensions': 'true', + 'DisableSpecificWarnings': 'string1;string2', + 'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions', + 'EnableFiberSafeOptimizations': 'true', + 'EnablePREfast': 'true', + 'Enableprefast': 'bogus', + 'ErrorReporting': 'Prompt', + 'ExceptionHandling': 'SyncCThrow', + 'ExpandAttributedSource': 'true', + 'FavorSizeOrSpeed': 'Neither', + 'FloatingPointExceptions': 'true', + 'FloatingPointModel': 'Precise', + 'ForceConformanceInForLoopScope': 'true', + 'ForcedIncludeFiles': 'file1;file2', + 'ForcedUsingFiles': 'file1;file2', + 'FunctionLevelLinking': 'false', + 'GenerateXMLDocumentationFiles': 'true', + 'IgnoreStandardIncludePath': 'true', + 'InlineFunctionExpansion': 'OnlyExplicitInline', + 'IntrinsicFunctions': 'false', + 'MinimalRebuild': 'true', + 'MultiProcessorCompilation': 'true', + 'ObjectFileName': 'a_file_name', + 'OmitDefaultLibName': 'true', + 'OmitFramePointers': 'true', + 'OpenMPSupport': 'true', + 'Optimization': 'Disabled', + 'PrecompiledHeader': 'NotUsing', + 'PrecompiledHeaderFile': 'a_file_name', + 'PrecompiledHeaderOutputFile': 'a_file_name', + 'PreprocessKeepComments': 'true', + 'PreprocessorDefinitions': 'string1;string2', + 'PreprocessOutputPath': 'a string1', + 'PreprocessSuppressLineNumbers': 'false', + 'PreprocessToFile': 'false', + 'ProcessorNumber': '33', + 'ProgramDataBaseFileName': 'a_file_name', + 'RuntimeLibrary': 'MultiThreaded', + 'RuntimeTypeInfo': 'true', + 'ShowIncludes': 'true', + 'SmallerTypeCheck': 'true', + 'StringPooling': 'true', + 'StructMemberAlignment': '1Byte', + 'SuppressStartupBanner': 'true', + 'TrackerLogDirectory': 'a_folder', + 'TreatSpecificWarningsAsErrors': 'string1;string2', + 'TreatWarningAsError': 'true', + 'TreatWChar_tAsBuiltInType': 'true', + 'UndefineAllPreprocessorDefinitions': 'true', + 'UndefinePreprocessorDefinitions': 'string1;string2', + 'UseFullPaths': 'true', + 'UseUnicodeForAssemblerListing': 'true', + 'WarningLevel': 'TurnOffAllWarnings', + 'WholeProgramOptimization': 'true', + 'XMLDocumentationFileName': 'a_file_name', + 'ZZXYZ': 'bogus'}, + 'Link': { + 'AdditionalDependencies': 'file1;file2', + 'AdditionalLibraryDirectories': 'folder1;folder2', + 'AdditionalManifestDependencies': 'file1;file2', + 'AdditionalOptions': 'a string1', + 'AddModuleNamesToAssembly': 'file1;file2', + 'AllowIsolation': 'true', + 'AssemblyDebug': '', + 'AssemblyLinkResource': 'file1;file2', + 'BaseAddress': 'a string1', + 'BuildingInIDE': 'true', + 'CLRImageType': 'ForceIJWImage', + 'CLRSupportLastError': 'Enabled', + 'CLRThreadAttribute': 'MTAThreadingAttribute', + 'CLRUnmanagedCodeCheck': 'true', + 'CreateHotPatchableImage': 'X86Image', + 'DataExecutionPrevention': 'false', + 'DelayLoadDLLs': 'file1;file2', + 'DelaySign': 'true', + 'Driver': 'NotSet', + 'EmbedManagedResourceFile': 'file1;file2', + 'EnableCOMDATFolding': 'false', + 'EnableUAC': 'true', + 'EntryPointSymbol': 'a string1', + 'FixedBaseAddress': 'false', + 'ForceFileOutput': 'Enabled', + 'ForceSymbolReferences': 'file1;file2', + 'FunctionOrder': 'a_file_name', + 'GenerateDebugInformation': 'true', + 'GenerateMapFile': 'true', + 'HeapCommitSize': 'a string1', + 'HeapReserveSize': 'a string1', + 'IgnoreAllDefaultLibraries': 'true', + 'IgnoreEmbeddedIDL': 'true', + 'IgnoreSpecificDefaultLibraries': 'a_file_list', + 'ImageHasSafeExceptionHandlers': 'true', + 'ImportLibrary': 'a_file_name', + 'KeyContainer': 'a_file_name', + 'KeyFile': 'a_file_name', + 'LargeAddressAware': 'false', + 'LinkDLL': 'true', + 'LinkErrorReporting': 'SendErrorReport', + 'LinkStatus': 'true', + 'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration', + 'ManifestFile': 'a_file_name', + 'MapExports': 'true', + 'MapFileName': 'a_file_name', + 'MergedIDLBaseFileName': 'a_file_name', + 'MergeSections': 'a string1', + 'MidlCommandFile': 'a_file_name', + 'MinimumRequiredVersion': 'a string1', + 'ModuleDefinitionFile': 'a_file_name', + 'MSDOSStubFileName': 'a_file_name', + 'NoEntryPoint': 'true', + 'OptimizeReferences': 'false', + 'OutputFile': 'a_file_name', + 'PerUserRedirection': 'true', + 'PreventDllBinding': 'true', + 'Profile': 'true', + 'ProfileGuidedDatabase': 'a_file_name', + 'ProgramDatabaseFile': 'a_file_name', + 'RandomizedBaseAddress': 'false', + 'RegisterOutput': 'true', + 'SectionAlignment': '33', + 'SetChecksum': 'true', + 'ShowProgress': 'LinkVerboseREF', + 'SpecifySectionAttributes': 'a string1', + 'StackCommitSize': 'a string1', + 'StackReserveSize': 'a string1', + 'StripPrivateSymbols': 'a_file_name', + 'SubSystem': 'Console', + 'SupportNobindOfDelayLoadedDLL': 'true', + 'SupportUnloadOfDelayLoadedDLL': 'true', + 'SuppressStartupBanner': 'true', + 'SwapRunFromCD': 'true', + 'SwapRunFromNET': 'true', + 'TargetMachine': 'MachineX86', + 'TerminalServerAware': 'false', + 'TrackerLogDirectory': 'a_folder', + 'TreatLinkerWarningAsErrors': 'true', + 'TurnOffAssemblyGeneration': 'true', + 'TypeLibraryFile': 'a_file_name', + 'TypeLibraryResourceID': '33', + 'UACExecutionLevel': 'AsInvoker', + 'UACUIAccess': 'true', + 'Version': 'a string1'}, + 'ResourceCompile': { + 'AdditionalIncludeDirectories': 'folder1;folder2', + 'AdditionalOptions': 'a string1', + 'Culture': '0x236', + 'IgnoreStandardIncludePath': 'true', + 'NullTerminateStrings': 'true', + 'PreprocessorDefinitions': 'string1;string2', + 'ResourceOutputFileName': 'a string1', + 'ShowProgress': 'true', + 'SuppressStartupBanner': 'true', + 'TrackerLogDirectory': 'a_folder', + 'UndefinePreprocessorDefinitions': 'string1;string2'}, + 'Midl': { + 'AdditionalIncludeDirectories': 'folder1;folder2', + 'AdditionalOptions': 'a string1', + 'ApplicationConfigurationMode': 'true', + 'ClientStubFile': 'a_file_name', + 'CPreprocessOptions': 'a string1', + 'DefaultCharType': 'Signed', + 'DllDataFileName': 'a_file_name', + 'EnableErrorChecks': 'EnableCustom', + 'ErrorCheckAllocations': 'true', + 'ErrorCheckBounds': 'true', + 'ErrorCheckEnumRange': 'true', + 'ErrorCheckRefPointers': 'true', + 'ErrorCheckStubData': 'true', + 'GenerateClientFiles': 'Stub', + 'GenerateServerFiles': 'None', + 'GenerateStublessProxies': 'true', + 'GenerateTypeLibrary': 'true', + 'HeaderFileName': 'a_file_name', + 'IgnoreStandardIncludePath': 'true', + 'InterfaceIdentifierFileName': 'a_file_name', + 'LocaleID': '33', + 'MkTypLibCompatible': 'true', + 'OutputDirectory': 'a string1', + 'PreprocessorDefinitions': 'string1;string2', + 'ProxyFileName': 'a_file_name', + 'RedirectOutputAndErrors': 'a_file_name', + 'ServerStubFile': 'a_file_name', + 'StructMemberAlignment': 'NotSet', + 'SuppressCompilerWarnings': 'true', + 'SuppressStartupBanner': 'true', + 'TargetEnvironment': 'Itanium', + 'TrackerLogDirectory': 'a_folder', + 'TypeLibFormat': 'NewFormat', + 'TypeLibraryName': 'a_file_name', + 'UndefinePreprocessorDefinitions': 'string1;string2', + 'ValidateAllParameters': 'true', + 'WarnAsError': 'true', + 'WarningLevel': '1'}, + 'Lib': { + 'AdditionalDependencies': 'file1;file2', + 'AdditionalLibraryDirectories': 'folder1;folder2', + 'AdditionalOptions': 'a string1', + 'DisplayLibrary': 'a string1', + 'ErrorReporting': 'PromptImmediately', + 'ExportNamedFunctions': 'string1;string2', + 'ForceSymbolReferences': 'a string1', + 'IgnoreAllDefaultLibraries': 'true', + 'IgnoreSpecificDefaultLibraries': 'file1;file2', + 'LinkTimeCodeGeneration': 'true', + 'MinimumRequiredVersion': 'a string1', + 'ModuleDefinitionFile': 'a_file_name', + 'Name': 'a_file_name', + 'OutputFile': 'a_file_name', + 'RemoveObjects': 'file1;file2', + 'SubSystem': 'Console', + 'SuppressStartupBanner': 'true', + 'TargetMachine': 'MachineX86i', + 'TrackerLogDirectory': 'a_folder', + 'TreatLibWarningAsErrors': 'true', + 'UseUnicodeResponseFiles': 'true', + 'Verbose': 'true'}, + 'Manifest': { + 'AdditionalManifestFiles': 'file1;file2', + 'AdditionalOptions': 'a string1', + 'AssemblyIdentity': 'a string1', + 'ComponentFileName': 'a_file_name', + 'EnableDPIAwareness': 'fal', + 'GenerateCatalogFiles': 'truel', + 'GenerateCategoryTags': 'true', + 'InputResourceManifests': 'a string1', + 'ManifestFromManagedAssembly': 'a_file_name', + 'notgood3': 'bogus', + 'OutputManifestFile': 'a_file_name', + 'OutputResourceManifests': 'a string1', + 'RegistrarScriptFile': 'a_file_name', + 'ReplacementsFile': 'a_file_name', + 'SuppressDependencyElement': 'true', + 'SuppressStartupBanner': 'true', + 'TrackerLogDirectory': 'a_folder', + 'TypeLibraryFile': 'a_file_name', + 'UpdateFileHashes': 'true', + 'UpdateFileHashesSearchPath': 'a_file_name', + 'VerboseOutput': 'true'}, + 'ProjectReference': { + 'LinkLibraryDependencies': 'true', + 'UseLibraryDependencyInputs': 'true'}, + 'ManifestResourceCompile': { + 'ResourceOutputFileName': 'a_file_name'}, + '': { + 'EmbedManifest': 'true', + 'GenerateManifest': 'true', + 'IgnoreImportLibrary': 'true', + 'LinkIncremental': 'false'}}, + self.stderr) + self._ExpectedWarnings([ + 'Warning: unrecognized setting ClCompile/Enableprefast', + 'Warning: unrecognized setting ClCompile/ZZXYZ', + 'Warning: unrecognized setting Manifest/notgood3', + 'Warning: for Manifest/GenerateCatalogFiles, ' + "expected bool; got 'truel'", + 'Warning: for Lib/TargetMachine, unrecognized enumerated value ' + 'MachineX86i', + "Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"]) + + def testConvertToMSBuildSettings_empty(self): + """Tests an empty conversion.""" + msvs_settings = {} + expected_msbuild_settings = {} + actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings( + msvs_settings, + self.stderr) + self.assertEqual(expected_msbuild_settings, actual_msbuild_settings) + self._ExpectedWarnings([]) + + def testConvertToMSBuildSettings_minimal(self): + """Tests a minimal conversion.""" + msvs_settings = { + 'VCCLCompilerTool': { + 'AdditionalIncludeDirectories': 'dir1', + 'AdditionalOptions': '/foo', + 'BasicRuntimeChecks': '0', + }, + 'VCLinkerTool': { + 'LinkTimeCodeGeneration': '1', + 'ErrorReporting': '1', + 'DataExecutionPrevention': '2', + }, + } + expected_msbuild_settings = { + 'ClCompile': { + 'AdditionalIncludeDirectories': 'dir1', + 'AdditionalOptions': '/foo', + 'BasicRuntimeChecks': 'Default', + }, + 'Link': { + 'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration', + 'LinkErrorReporting': 'PromptImmediately', + 'DataExecutionPrevention': 'true', + }, + } + actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings( + msvs_settings, + self.stderr) + self.assertEqual(expected_msbuild_settings, actual_msbuild_settings) + self._ExpectedWarnings([]) + + def testConvertToMSBuildSettings_warnings(self): + """Tests conversion that generates warnings.""" + msvs_settings = { + 'VCCLCompilerTool': { + 'AdditionalIncludeDirectories': '1', + 'AdditionalOptions': '2', + # These are incorrect values: + 'BasicRuntimeChecks': '12', + 'BrowseInformation': '21', + 'UsePrecompiledHeader': '13', + 'GeneratePreprocessedFile': '14'}, + 'VCLinkerTool': { + # These are incorrect values: + 'Driver': '10', + 'LinkTimeCodeGeneration': '31', + 'ErrorReporting': '21', + 'FixedBaseAddress': '6'}, + 'VCResourceCompilerTool': { + # Custom + 'Culture': '1003'}} + expected_msbuild_settings = { + 'ClCompile': { + 'AdditionalIncludeDirectories': '1', + 'AdditionalOptions': '2'}, + 'Link': {}, + 'ResourceCompile': { + # Custom + 'Culture': '0x03eb'}} + actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings( + msvs_settings, + self.stderr) + self.assertEqual(expected_msbuild_settings, actual_msbuild_settings) + self._ExpectedWarnings([ + 'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to ' + 'MSBuild, index value (12) not in expected range [0, 4)', + 'Warning: while converting VCCLCompilerTool/BrowseInformation to ' + 'MSBuild, index value (21) not in expected range [0, 3)', + 'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to ' + 'MSBuild, index value (13) not in expected range [0, 3)', + 'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to ' + 'MSBuild, value must be one of [0, 1, 2]; got 14', + + 'Warning: while converting VCLinkerTool/Driver to ' + 'MSBuild, index value (10) not in expected range [0, 4)', + 'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to ' + 'MSBuild, index value (31) not in expected range [0, 5)', + 'Warning: while converting VCLinkerTool/ErrorReporting to ' + 'MSBuild, index value (21) not in expected range [0, 3)', + 'Warning: while converting VCLinkerTool/FixedBaseAddress to ' + 'MSBuild, index value (6) not in expected range [0, 3)', + ]) + + def testConvertToMSBuildSettings_full_synthetic(self): + """Tests conversion of all the MSBuild settings.""" + msvs_settings = { + 'VCCLCompilerTool': { + 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', + 'AdditionalOptions': 'a_string', + 'AdditionalUsingDirectories': 'folder1;folder2;folder3', + 'AssemblerListingLocation': 'a_file_name', + 'AssemblerOutput': '0', + 'BasicRuntimeChecks': '1', + 'BrowseInformation': '2', + 'BrowseInformationFile': 'a_file_name', + 'BufferSecurityCheck': 'true', + 'CallingConvention': '0', + 'CompileAs': '1', + 'DebugInformationFormat': '4', + 'DefaultCharIsUnsigned': 'true', + 'Detect64BitPortabilityProblems': 'true', + 'DisableLanguageExtensions': 'true', + 'DisableSpecificWarnings': 'd1;d2;d3', + 'EnableEnhancedInstructionSet': '0', + 'EnableFiberSafeOptimizations': 'true', + 'EnableFunctionLevelLinking': 'true', + 'EnableIntrinsicFunctions': 'true', + 'EnablePREfast': 'true', + 'ErrorReporting': '1', + 'ExceptionHandling': '2', + 'ExpandAttributedSource': 'true', + 'FavorSizeOrSpeed': '0', + 'FloatingPointExceptions': 'true', + 'FloatingPointModel': '1', + 'ForceConformanceInForLoopScope': 'true', + 'ForcedIncludeFiles': 'file1;file2;file3', + 'ForcedUsingFiles': 'file1;file2;file3', + 'GeneratePreprocessedFile': '1', + 'GenerateXMLDocumentationFiles': 'true', + 'IgnoreStandardIncludePath': 'true', + 'InlineFunctionExpansion': '2', + 'KeepComments': 'true', + 'MinimalRebuild': 'true', + 'ObjectFile': 'a_file_name', + 'OmitDefaultLibName': 'true', + 'OmitFramePointers': 'true', + 'OpenMP': 'true', + 'Optimization': '3', + 'PrecompiledHeaderFile': 'a_file_name', + 'PrecompiledHeaderThrough': 'a_file_name', + 'PreprocessorDefinitions': 'd1;d2;d3', + 'ProgramDataBaseFileName': 'a_file_name', + 'RuntimeLibrary': '0', + 'RuntimeTypeInfo': 'true', + 'ShowIncludes': 'true', + 'SmallerTypeCheck': 'true', + 'StringPooling': 'true', + 'StructMemberAlignment': '1', + 'SuppressStartupBanner': 'true', + 'TreatWChar_tAsBuiltInType': 'true', + 'UndefineAllPreprocessorDefinitions': 'true', + 'UndefinePreprocessorDefinitions': 'd1;d2;d3', + 'UseFullPaths': 'true', + 'UsePrecompiledHeader': '1', + 'UseUnicodeResponseFiles': 'true', + 'WarnAsError': 'true', + 'WarningLevel': '2', + 'WholeProgramOptimization': 'true', + 'XMLDocumentationFileName': 'a_file_name'}, + 'VCLinkerTool': { + 'AdditionalDependencies': 'file1;file2;file3', + 'AdditionalLibraryDirectories': 'folder1;folder2;folder3', + 'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3', + 'AdditionalManifestDependencies': 'file1;file2;file3', + 'AdditionalOptions': 'a_string', + 'AddModuleNamesToAssembly': 'file1;file2;file3', + 'AllowIsolation': 'true', + 'AssemblyDebug': '0', + 'AssemblyLinkResource': 'file1;file2;file3', + 'BaseAddress': 'a_string', + 'CLRImageType': '1', + 'CLRThreadAttribute': '2', + 'CLRUnmanagedCodeCheck': 'true', + 'DataExecutionPrevention': '0', + 'DelayLoadDLLs': 'file1;file2;file3', + 'DelaySign': 'true', + 'Driver': '1', + 'EmbedManagedResourceFile': 'file1;file2;file3', + 'EnableCOMDATFolding': '0', + 'EnableUAC': 'true', + 'EntryPointSymbol': 'a_string', + 'ErrorReporting': '0', + 'FixedBaseAddress': '1', + 'ForceSymbolReferences': 'file1;file2;file3', + 'FunctionOrder': 'a_file_name', + 'GenerateDebugInformation': 'true', + 'GenerateManifest': 'true', + 'GenerateMapFile': 'true', + 'HeapCommitSize': 'a_string', + 'HeapReserveSize': 'a_string', + 'IgnoreAllDefaultLibraries': 'true', + 'IgnoreDefaultLibraryNames': 'file1;file2;file3', + 'IgnoreEmbeddedIDL': 'true', + 'IgnoreImportLibrary': 'true', + 'ImportLibrary': 'a_file_name', + 'KeyContainer': 'a_file_name', + 'KeyFile': 'a_file_name', + 'LargeAddressAware': '2', + 'LinkIncremental': '1', + 'LinkLibraryDependencies': 'true', + 'LinkTimeCodeGeneration': '2', + 'ManifestFile': 'a_file_name', + 'MapExports': 'true', + 'MapFileName': 'a_file_name', + 'MergedIDLBaseFileName': 'a_file_name', + 'MergeSections': 'a_string', + 'MidlCommandFile': 'a_file_name', + 'ModuleDefinitionFile': 'a_file_name', + 'OptimizeForWindows98': '1', + 'OptimizeReferences': '0', + 'OutputFile': 'a_file_name', + 'PerUserRedirection': 'true', + 'Profile': 'true', + 'ProfileGuidedDatabase': 'a_file_name', + 'ProgramDatabaseFile': 'a_file_name', + 'RandomizedBaseAddress': '1', + 'RegisterOutput': 'true', + 'ResourceOnlyDLL': 'true', + 'SetChecksum': 'true', + 'ShowProgress': '0', + 'StackCommitSize': 'a_string', + 'StackReserveSize': 'a_string', + 'StripPrivateSymbols': 'a_file_name', + 'SubSystem': '2', + 'SupportUnloadOfDelayLoadedDLL': 'true', + 'SuppressStartupBanner': 'true', + 'SwapRunFromCD': 'true', + 'SwapRunFromNet': 'true', + 'TargetMachine': '3', + 'TerminalServerAware': '2', + 'TurnOffAssemblyGeneration': 'true', + 'TypeLibraryFile': 'a_file_name', + 'TypeLibraryResourceID': '33', + 'UACExecutionLevel': '1', + 'UACUIAccess': 'true', + 'UseLibraryDependencyInputs': 'false', + 'UseUnicodeResponseFiles': 'true', + 'Version': 'a_string'}, + 'VCResourceCompilerTool': { + 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', + 'AdditionalOptions': 'a_string', + 'Culture': '1003', + 'IgnoreStandardIncludePath': 'true', + 'PreprocessorDefinitions': 'd1;d2;d3', + 'ResourceOutputFileName': 'a_string', + 'ShowProgress': 'true', + 'SuppressStartupBanner': 'true', + 'UndefinePreprocessorDefinitions': 'd1;d2;d3'}, + 'VCMIDLTool': { + 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', + 'AdditionalOptions': 'a_string', + 'CPreprocessOptions': 'a_string', + 'DefaultCharType': '0', + 'DLLDataFileName': 'a_file_name', + 'EnableErrorChecks': '2', + 'ErrorCheckAllocations': 'true', + 'ErrorCheckBounds': 'true', + 'ErrorCheckEnumRange': 'true', + 'ErrorCheckRefPointers': 'true', + 'ErrorCheckStubData': 'true', + 'GenerateStublessProxies': 'true', + 'GenerateTypeLibrary': 'true', + 'HeaderFileName': 'a_file_name', + 'IgnoreStandardIncludePath': 'true', + 'InterfaceIdentifierFileName': 'a_file_name', + 'MkTypLibCompatible': 'true', + 'OutputDirectory': 'a_string', + 'PreprocessorDefinitions': 'd1;d2;d3', + 'ProxyFileName': 'a_file_name', + 'RedirectOutputAndErrors': 'a_file_name', + 'StructMemberAlignment': '3', + 'SuppressStartupBanner': 'true', + 'TargetEnvironment': '1', + 'TypeLibraryName': 'a_file_name', + 'UndefinePreprocessorDefinitions': 'd1;d2;d3', + 'ValidateParameters': 'true', + 'WarnAsError': 'true', + 'WarningLevel': '4'}, + 'VCLibrarianTool': { + 'AdditionalDependencies': 'file1;file2;file3', + 'AdditionalLibraryDirectories': 'folder1;folder2;folder3', + 'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3', + 'AdditionalOptions': 'a_string', + 'ExportNamedFunctions': 'd1;d2;d3', + 'ForceSymbolReferences': 'a_string', + 'IgnoreAllDefaultLibraries': 'true', + 'IgnoreSpecificDefaultLibraries': 'file1;file2;file3', + 'LinkLibraryDependencies': 'true', + 'ModuleDefinitionFile': 'a_file_name', + 'OutputFile': 'a_file_name', + 'SuppressStartupBanner': 'true', + 'UseUnicodeResponseFiles': 'true'}, + 'VCManifestTool': { + 'AdditionalManifestFiles': 'file1;file2;file3', + 'AdditionalOptions': 'a_string', + 'AssemblyIdentity': 'a_string', + 'ComponentFileName': 'a_file_name', + 'DependencyInformationFile': 'a_file_name', + 'EmbedManifest': 'true', + 'GenerateCatalogFiles': 'true', + 'InputResourceManifests': 'a_string', + 'ManifestResourceFile': 'my_name', + 'OutputManifestFile': 'a_file_name', + 'RegistrarScriptFile': 'a_file_name', + 'ReplacementsFile': 'a_file_name', + 'SuppressStartupBanner': 'true', + 'TypeLibraryFile': 'a_file_name', + 'UpdateFileHashes': 'true', + 'UpdateFileHashesSearchPath': 'a_file_name', + 'UseFAT32Workaround': 'true', + 'UseUnicodeResponseFiles': 'true', + 'VerboseOutput': 'true'}} + expected_msbuild_settings = { + 'ClCompile': { + 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', + 'AdditionalOptions': 'a_string /J', + 'AdditionalUsingDirectories': 'folder1;folder2;folder3', + 'AssemblerListingLocation': 'a_file_name', + 'AssemblerOutput': 'NoListing', + 'BasicRuntimeChecks': 'StackFrameRuntimeCheck', + 'BrowseInformation': 'true', + 'BrowseInformationFile': 'a_file_name', + 'BufferSecurityCheck': 'true', + 'CallingConvention': 'Cdecl', + 'CompileAs': 'CompileAsC', + 'DebugInformationFormat': 'EditAndContinue', + 'DisableLanguageExtensions': 'true', + 'DisableSpecificWarnings': 'd1;d2;d3', + 'EnableEnhancedInstructionSet': 'NotSet', + 'EnableFiberSafeOptimizations': 'true', + 'EnablePREfast': 'true', + 'ErrorReporting': 'Prompt', + 'ExceptionHandling': 'Async', + 'ExpandAttributedSource': 'true', + 'FavorSizeOrSpeed': 'Neither', + 'FloatingPointExceptions': 'true', + 'FloatingPointModel': 'Strict', + 'ForceConformanceInForLoopScope': 'true', + 'ForcedIncludeFiles': 'file1;file2;file3', + 'ForcedUsingFiles': 'file1;file2;file3', + 'FunctionLevelLinking': 'true', + 'GenerateXMLDocumentationFiles': 'true', + 'IgnoreStandardIncludePath': 'true', + 'InlineFunctionExpansion': 'AnySuitable', + 'IntrinsicFunctions': 'true', + 'MinimalRebuild': 'true', + 'ObjectFileName': 'a_file_name', + 'OmitDefaultLibName': 'true', + 'OmitFramePointers': 'true', + 'OpenMPSupport': 'true', + 'Optimization': 'Full', + 'PrecompiledHeader': 'Create', + 'PrecompiledHeaderFile': 'a_file_name', + 'PrecompiledHeaderOutputFile': 'a_file_name', + 'PreprocessKeepComments': 'true', + 'PreprocessorDefinitions': 'd1;d2;d3', + 'PreprocessSuppressLineNumbers': 'false', + 'PreprocessToFile': 'true', + 'ProgramDataBaseFileName': 'a_file_name', + 'RuntimeLibrary': 'MultiThreaded', + 'RuntimeTypeInfo': 'true', + 'ShowIncludes': 'true', + 'SmallerTypeCheck': 'true', + 'StringPooling': 'true', + 'StructMemberAlignment': '1Byte', + 'SuppressStartupBanner': 'true', + 'TreatWarningAsError': 'true', + 'TreatWChar_tAsBuiltInType': 'true', + 'UndefineAllPreprocessorDefinitions': 'true', + 'UndefinePreprocessorDefinitions': 'd1;d2;d3', + 'UseFullPaths': 'true', + 'WarningLevel': 'Level2', + 'WholeProgramOptimization': 'true', + 'XMLDocumentationFileName': 'a_file_name'}, + 'Link': { + 'AdditionalDependencies': 'file1;file2;file3', + 'AdditionalLibraryDirectories': 'folder1;folder2;folder3', + 'AdditionalManifestDependencies': 'file1;file2;file3', + 'AdditionalOptions': 'a_string', + 'AddModuleNamesToAssembly': 'file1;file2;file3', + 'AllowIsolation': 'true', + 'AssemblyDebug': '', + 'AssemblyLinkResource': 'file1;file2;file3', + 'BaseAddress': 'a_string', + 'CLRImageType': 'ForceIJWImage', + 'CLRThreadAttribute': 'STAThreadingAttribute', + 'CLRUnmanagedCodeCheck': 'true', + 'DataExecutionPrevention': '', + 'DelayLoadDLLs': 'file1;file2;file3', + 'DelaySign': 'true', + 'Driver': 'Driver', + 'EmbedManagedResourceFile': 'file1;file2;file3', + 'EnableCOMDATFolding': '', + 'EnableUAC': 'true', + 'EntryPointSymbol': 'a_string', + 'FixedBaseAddress': 'false', + 'ForceSymbolReferences': 'file1;file2;file3', + 'FunctionOrder': 'a_file_name', + 'GenerateDebugInformation': 'true', + 'GenerateMapFile': 'true', + 'HeapCommitSize': 'a_string', + 'HeapReserveSize': 'a_string', + 'IgnoreAllDefaultLibraries': 'true', + 'IgnoreEmbeddedIDL': 'true', + 'IgnoreSpecificDefaultLibraries': 'file1;file2;file3', + 'ImportLibrary': 'a_file_name', + 'KeyContainer': 'a_file_name', + 'KeyFile': 'a_file_name', + 'LargeAddressAware': 'true', + 'LinkErrorReporting': 'NoErrorReport', + 'LinkTimeCodeGeneration': 'PGInstrument', + 'ManifestFile': 'a_file_name', + 'MapExports': 'true', + 'MapFileName': 'a_file_name', + 'MergedIDLBaseFileName': 'a_file_name', + 'MergeSections': 'a_string', + 'MidlCommandFile': 'a_file_name', + 'ModuleDefinitionFile': 'a_file_name', + 'NoEntryPoint': 'true', + 'OptimizeReferences': '', + 'OutputFile': 'a_file_name', + 'PerUserRedirection': 'true', + 'Profile': 'true', + 'ProfileGuidedDatabase': 'a_file_name', + 'ProgramDatabaseFile': 'a_file_name', + 'RandomizedBaseAddress': 'false', + 'RegisterOutput': 'true', + 'SetChecksum': 'true', + 'ShowProgress': 'NotSet', + 'StackCommitSize': 'a_string', + 'StackReserveSize': 'a_string', + 'StripPrivateSymbols': 'a_file_name', + 'SubSystem': 'Windows', + 'SupportUnloadOfDelayLoadedDLL': 'true', + 'SuppressStartupBanner': 'true', + 'SwapRunFromCD': 'true', + 'SwapRunFromNET': 'true', + 'TargetMachine': 'MachineARM', + 'TerminalServerAware': 'true', + 'TurnOffAssemblyGeneration': 'true', + 'TypeLibraryFile': 'a_file_name', + 'TypeLibraryResourceID': '33', + 'UACExecutionLevel': 'HighestAvailable', + 'UACUIAccess': 'true', + 'Version': 'a_string'}, + 'ResourceCompile': { + 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', + 'AdditionalOptions': 'a_string', + 'Culture': '0x03eb', + 'IgnoreStandardIncludePath': 'true', + 'PreprocessorDefinitions': 'd1;d2;d3', + 'ResourceOutputFileName': 'a_string', + 'ShowProgress': 'true', + 'SuppressStartupBanner': 'true', + 'UndefinePreprocessorDefinitions': 'd1;d2;d3'}, + 'Midl': { + 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', + 'AdditionalOptions': 'a_string', + 'CPreprocessOptions': 'a_string', + 'DefaultCharType': 'Unsigned', + 'DllDataFileName': 'a_file_name', + 'EnableErrorChecks': 'All', + 'ErrorCheckAllocations': 'true', + 'ErrorCheckBounds': 'true', + 'ErrorCheckEnumRange': 'true', + 'ErrorCheckRefPointers': 'true', + 'ErrorCheckStubData': 'true', + 'GenerateStublessProxies': 'true', + 'GenerateTypeLibrary': 'true', + 'HeaderFileName': 'a_file_name', + 'IgnoreStandardIncludePath': 'true', + 'InterfaceIdentifierFileName': 'a_file_name', + 'MkTypLibCompatible': 'true', + 'OutputDirectory': 'a_string', + 'PreprocessorDefinitions': 'd1;d2;d3', + 'ProxyFileName': 'a_file_name', + 'RedirectOutputAndErrors': 'a_file_name', + 'StructMemberAlignment': '4', + 'SuppressStartupBanner': 'true', + 'TargetEnvironment': 'Win32', + 'TypeLibraryName': 'a_file_name', + 'UndefinePreprocessorDefinitions': 'd1;d2;d3', + 'ValidateAllParameters': 'true', + 'WarnAsError': 'true', + 'WarningLevel': '4'}, + 'Lib': { + 'AdditionalDependencies': 'file1;file2;file3', + 'AdditionalLibraryDirectories': 'folder1;folder2;folder3', + 'AdditionalOptions': 'a_string', + 'ExportNamedFunctions': 'd1;d2;d3', + 'ForceSymbolReferences': 'a_string', + 'IgnoreAllDefaultLibraries': 'true', + 'IgnoreSpecificDefaultLibraries': 'file1;file2;file3', + 'ModuleDefinitionFile': 'a_file_name', + 'OutputFile': 'a_file_name', + 'SuppressStartupBanner': 'true', + 'UseUnicodeResponseFiles': 'true'}, + 'Manifest': { + 'AdditionalManifestFiles': 'file1;file2;file3', + 'AdditionalOptions': 'a_string', + 'AssemblyIdentity': 'a_string', + 'ComponentFileName': 'a_file_name', + 'GenerateCatalogFiles': 'true', + 'InputResourceManifests': 'a_string', + 'OutputManifestFile': 'a_file_name', + 'RegistrarScriptFile': 'a_file_name', + 'ReplacementsFile': 'a_file_name', + 'SuppressStartupBanner': 'true', + 'TypeLibraryFile': 'a_file_name', + 'UpdateFileHashes': 'true', + 'UpdateFileHashesSearchPath': 'a_file_name', + 'VerboseOutput': 'true'}, + 'ManifestResourceCompile': { + 'ResourceOutputFileName': 'my_name'}, + 'ProjectReference': { + 'LinkLibraryDependencies': 'true', + 'UseLibraryDependencyInputs': 'false'}, + '': { + 'EmbedManifest': 'true', + 'GenerateManifest': 'true', + 'IgnoreImportLibrary': 'true', + 'LinkIncremental': 'false'}} + actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings( + msvs_settings, + self.stderr) + self.assertEqual(expected_msbuild_settings, actual_msbuild_settings) + self._ExpectedWarnings([]) + + def testConvertToMSBuildSettings_actual(self): + """Tests the conversion of an actual project. + + A VS2008 project with most of the options defined was created through the + VS2008 IDE. It was then converted to VS2010. The tool settings found in + the .vcproj and .vcxproj files were converted to the two dictionaries + msvs_settings and expected_msbuild_settings. + + Note that for many settings, the VS2010 converter adds macros like + %(AdditionalIncludeDirectories) to make sure than inherited values are + included. Since the Gyp projects we generate do not use inheritance, + we removed these macros. They were: + ClCompile: + AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)' + AdditionalOptions: ' %(AdditionalOptions)' + AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)' + DisableSpecificWarnings: ';%(DisableSpecificWarnings)', + ForcedIncludeFiles: ';%(ForcedIncludeFiles)', + ForcedUsingFiles: ';%(ForcedUsingFiles)', + PreprocessorDefinitions: ';%(PreprocessorDefinitions)', + UndefinePreprocessorDefinitions: + ';%(UndefinePreprocessorDefinitions)', + Link: + AdditionalDependencies: ';%(AdditionalDependencies)', + AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)', + AdditionalManifestDependencies: + ';%(AdditionalManifestDependencies)', + AdditionalOptions: ' %(AdditionalOptions)', + AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)', + AssemblyLinkResource: ';%(AssemblyLinkResource)', + DelayLoadDLLs: ';%(DelayLoadDLLs)', + EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)', + ForceSymbolReferences: ';%(ForceSymbolReferences)', + IgnoreSpecificDefaultLibraries: + ';%(IgnoreSpecificDefaultLibraries)', + ResourceCompile: + AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)', + AdditionalOptions: ' %(AdditionalOptions)', + PreprocessorDefinitions: ';%(PreprocessorDefinitions)', + Manifest: + AdditionalManifestFiles: ';%(AdditionalManifestFiles)', + AdditionalOptions: ' %(AdditionalOptions)', + InputResourceManifests: ';%(InputResourceManifests)', + """ + msvs_settings = { + 'VCCLCompilerTool': { + 'AdditionalIncludeDirectories': 'dir1', + 'AdditionalOptions': '/more', + 'AdditionalUsingDirectories': 'test', + 'AssemblerListingLocation': '$(IntDir)\\a', + 'AssemblerOutput': '1', + 'BasicRuntimeChecks': '3', + 'BrowseInformation': '1', + 'BrowseInformationFile': '$(IntDir)\\e', + 'BufferSecurityCheck': 'false', + 'CallingConvention': '1', + 'CompileAs': '1', + 'DebugInformationFormat': '4', + 'DefaultCharIsUnsigned': 'true', + 'Detect64BitPortabilityProblems': 'true', + 'DisableLanguageExtensions': 'true', + 'DisableSpecificWarnings': 'abc', + 'EnableEnhancedInstructionSet': '1', + 'EnableFiberSafeOptimizations': 'true', + 'EnableFunctionLevelLinking': 'true', + 'EnableIntrinsicFunctions': 'true', + 'EnablePREfast': 'true', + 'ErrorReporting': '2', + 'ExceptionHandling': '2', + 'ExpandAttributedSource': 'true', + 'FavorSizeOrSpeed': '2', + 'FloatingPointExceptions': 'true', + 'FloatingPointModel': '1', + 'ForceConformanceInForLoopScope': 'false', + 'ForcedIncludeFiles': 'def', + 'ForcedUsingFiles': 'ge', + 'GeneratePreprocessedFile': '2', + 'GenerateXMLDocumentationFiles': 'true', + 'IgnoreStandardIncludePath': 'true', + 'InlineFunctionExpansion': '1', + 'KeepComments': 'true', + 'MinimalRebuild': 'true', + 'ObjectFile': '$(IntDir)\\b', + 'OmitDefaultLibName': 'true', + 'OmitFramePointers': 'true', + 'OpenMP': 'true', + 'Optimization': '3', + 'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche', + 'PrecompiledHeaderThrough': 'StdAfx.hd', + 'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE', + 'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb', + 'RuntimeLibrary': '3', + 'RuntimeTypeInfo': 'false', + 'ShowIncludes': 'true', + 'SmallerTypeCheck': 'true', + 'StringPooling': 'true', + 'StructMemberAlignment': '3', + 'SuppressStartupBanner': 'false', + 'TreatWChar_tAsBuiltInType': 'false', + 'UndefineAllPreprocessorDefinitions': 'true', + 'UndefinePreprocessorDefinitions': 'wer', + 'UseFullPaths': 'true', + 'UsePrecompiledHeader': '0', + 'UseUnicodeResponseFiles': 'false', + 'WarnAsError': 'true', + 'WarningLevel': '3', + 'WholeProgramOptimization': 'true', + 'XMLDocumentationFileName': '$(IntDir)\\c'}, + 'VCLinkerTool': { + 'AdditionalDependencies': 'zx', + 'AdditionalLibraryDirectories': 'asd', + 'AdditionalManifestDependencies': 's2', + 'AdditionalOptions': '/mor2', + 'AddModuleNamesToAssembly': 'd1', + 'AllowIsolation': 'false', + 'AssemblyDebug': '1', + 'AssemblyLinkResource': 'd5', + 'BaseAddress': '23423', + 'CLRImageType': '3', + 'CLRThreadAttribute': '1', + 'CLRUnmanagedCodeCheck': 'true', + 'DataExecutionPrevention': '0', + 'DelayLoadDLLs': 'd4', + 'DelaySign': 'true', + 'Driver': '2', + 'EmbedManagedResourceFile': 'd2', + 'EnableCOMDATFolding': '1', + 'EnableUAC': 'false', + 'EntryPointSymbol': 'f5', + 'ErrorReporting': '2', + 'FixedBaseAddress': '1', + 'ForceSymbolReferences': 'd3', + 'FunctionOrder': 'fssdfsd', + 'GenerateDebugInformation': 'true', + 'GenerateManifest': 'false', + 'GenerateMapFile': 'true', + 'HeapCommitSize': '13', + 'HeapReserveSize': '12', + 'IgnoreAllDefaultLibraries': 'true', + 'IgnoreDefaultLibraryNames': 'flob;flok', + 'IgnoreEmbeddedIDL': 'true', + 'IgnoreImportLibrary': 'true', + 'ImportLibrary': 'f4', + 'KeyContainer': 'f7', + 'KeyFile': 'f6', + 'LargeAddressAware': '2', + 'LinkIncremental': '0', + 'LinkLibraryDependencies': 'false', + 'LinkTimeCodeGeneration': '1', + 'ManifestFile': + '$(IntDir)\\$(TargetFileName).2intermediate.manifest', + 'MapExports': 'true', + 'MapFileName': 'd5', + 'MergedIDLBaseFileName': 'f2', + 'MergeSections': 'f5', + 'MidlCommandFile': 'f1', + 'ModuleDefinitionFile': 'sdsd', + 'OptimizeForWindows98': '2', + 'OptimizeReferences': '2', + 'OutputFile': '$(OutDir)\\$(ProjectName)2.exe', + 'PerUserRedirection': 'true', + 'Profile': 'true', + 'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd', + 'ProgramDatabaseFile': 'Flob.pdb', + 'RandomizedBaseAddress': '1', + 'RegisterOutput': 'true', + 'ResourceOnlyDLL': 'true', + 'SetChecksum': 'false', + 'ShowProgress': '1', + 'StackCommitSize': '15', + 'StackReserveSize': '14', + 'StripPrivateSymbols': 'd3', + 'SubSystem': '1', + 'SupportUnloadOfDelayLoadedDLL': 'true', + 'SuppressStartupBanner': 'false', + 'SwapRunFromCD': 'true', + 'SwapRunFromNet': 'true', + 'TargetMachine': '1', + 'TerminalServerAware': '1', + 'TurnOffAssemblyGeneration': 'true', + 'TypeLibraryFile': 'f3', + 'TypeLibraryResourceID': '12', + 'UACExecutionLevel': '2', + 'UACUIAccess': 'true', + 'UseLibraryDependencyInputs': 'true', + 'UseUnicodeResponseFiles': 'false', + 'Version': '333'}, + 'VCResourceCompilerTool': { + 'AdditionalIncludeDirectories': 'f3', + 'AdditionalOptions': '/more3', + 'Culture': '3084', + 'IgnoreStandardIncludePath': 'true', + 'PreprocessorDefinitions': '_UNICODE;UNICODE2', + 'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res', + 'ShowProgress': 'true'}, + 'VCManifestTool': { + 'AdditionalManifestFiles': 'sfsdfsd', + 'AdditionalOptions': 'afdsdafsd', + 'AssemblyIdentity': 'sddfdsadfsa', + 'ComponentFileName': 'fsdfds', + 'DependencyInformationFile': '$(IntDir)\\mt.depdfd', + 'EmbedManifest': 'false', + 'GenerateCatalogFiles': 'true', + 'InputResourceManifests': 'asfsfdafs', + 'ManifestResourceFile': + '$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf', + 'OutputManifestFile': '$(TargetPath).manifestdfs', + 'RegistrarScriptFile': 'sdfsfd', + 'ReplacementsFile': 'sdffsd', + 'SuppressStartupBanner': 'false', + 'TypeLibraryFile': 'sfsd', + 'UpdateFileHashes': 'true', + 'UpdateFileHashesSearchPath': 'sfsd', + 'UseFAT32Workaround': 'true', + 'UseUnicodeResponseFiles': 'false', + 'VerboseOutput': 'true'}} + expected_msbuild_settings = { + 'ClCompile': { + 'AdditionalIncludeDirectories': 'dir1', + 'AdditionalOptions': '/more /J', + 'AdditionalUsingDirectories': 'test', + 'AssemblerListingLocation': '$(IntDir)a', + 'AssemblerOutput': 'AssemblyCode', + 'BasicRuntimeChecks': 'EnableFastChecks', + 'BrowseInformation': 'true', + 'BrowseInformationFile': '$(IntDir)e', + 'BufferSecurityCheck': 'false', + 'CallingConvention': 'FastCall', + 'CompileAs': 'CompileAsC', + 'DebugInformationFormat': 'EditAndContinue', + 'DisableLanguageExtensions': 'true', + 'DisableSpecificWarnings': 'abc', + 'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions', + 'EnableFiberSafeOptimizations': 'true', + 'EnablePREfast': 'true', + 'ErrorReporting': 'Queue', + 'ExceptionHandling': 'Async', + 'ExpandAttributedSource': 'true', + 'FavorSizeOrSpeed': 'Size', + 'FloatingPointExceptions': 'true', + 'FloatingPointModel': 'Strict', + 'ForceConformanceInForLoopScope': 'false', + 'ForcedIncludeFiles': 'def', + 'ForcedUsingFiles': 'ge', + 'FunctionLevelLinking': 'true', + 'GenerateXMLDocumentationFiles': 'true', + 'IgnoreStandardIncludePath': 'true', + 'InlineFunctionExpansion': 'OnlyExplicitInline', + 'IntrinsicFunctions': 'true', + 'MinimalRebuild': 'true', + 'ObjectFileName': '$(IntDir)b', + 'OmitDefaultLibName': 'true', + 'OmitFramePointers': 'true', + 'OpenMPSupport': 'true', + 'Optimization': 'Full', + 'PrecompiledHeader': 'NotUsing', # Actual conversion gives '' + 'PrecompiledHeaderFile': 'StdAfx.hd', + 'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche', + 'PreprocessKeepComments': 'true', + 'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE', + 'PreprocessSuppressLineNumbers': 'true', + 'PreprocessToFile': 'true', + 'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb', + 'RuntimeLibrary': 'MultiThreadedDebugDLL', + 'RuntimeTypeInfo': 'false', + 'ShowIncludes': 'true', + 'SmallerTypeCheck': 'true', + 'StringPooling': 'true', + 'StructMemberAlignment': '4Bytes', + 'SuppressStartupBanner': 'false', + 'TreatWarningAsError': 'true', + 'TreatWChar_tAsBuiltInType': 'false', + 'UndefineAllPreprocessorDefinitions': 'true', + 'UndefinePreprocessorDefinitions': 'wer', + 'UseFullPaths': 'true', + 'WarningLevel': 'Level3', + 'WholeProgramOptimization': 'true', + 'XMLDocumentationFileName': '$(IntDir)c'}, + 'Link': { + 'AdditionalDependencies': 'zx', + 'AdditionalLibraryDirectories': 'asd', + 'AdditionalManifestDependencies': 's2', + 'AdditionalOptions': '/mor2', + 'AddModuleNamesToAssembly': 'd1', + 'AllowIsolation': 'false', + 'AssemblyDebug': 'true', + 'AssemblyLinkResource': 'd5', + 'BaseAddress': '23423', + 'CLRImageType': 'ForceSafeILImage', + 'CLRThreadAttribute': 'MTAThreadingAttribute', + 'CLRUnmanagedCodeCheck': 'true', + 'DataExecutionPrevention': '', + 'DelayLoadDLLs': 'd4', + 'DelaySign': 'true', + 'Driver': 'UpOnly', + 'EmbedManagedResourceFile': 'd2', + 'EnableCOMDATFolding': 'false', + 'EnableUAC': 'false', + 'EntryPointSymbol': 'f5', + 'FixedBaseAddress': 'false', + 'ForceSymbolReferences': 'd3', + 'FunctionOrder': 'fssdfsd', + 'GenerateDebugInformation': 'true', + 'GenerateMapFile': 'true', + 'HeapCommitSize': '13', + 'HeapReserveSize': '12', + 'IgnoreAllDefaultLibraries': 'true', + 'IgnoreEmbeddedIDL': 'true', + 'IgnoreSpecificDefaultLibraries': 'flob;flok', + 'ImportLibrary': 'f4', + 'KeyContainer': 'f7', + 'KeyFile': 'f6', + 'LargeAddressAware': 'true', + 'LinkErrorReporting': 'QueueForNextLogin', + 'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration', + 'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest', + 'MapExports': 'true', + 'MapFileName': 'd5', + 'MergedIDLBaseFileName': 'f2', + 'MergeSections': 'f5', + 'MidlCommandFile': 'f1', + 'ModuleDefinitionFile': 'sdsd', + 'NoEntryPoint': 'true', + 'OptimizeReferences': 'true', + 'OutputFile': '$(OutDir)$(ProjectName)2.exe', + 'PerUserRedirection': 'true', + 'Profile': 'true', + 'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd', + 'ProgramDatabaseFile': 'Flob.pdb', + 'RandomizedBaseAddress': 'false', + 'RegisterOutput': 'true', + 'SetChecksum': 'false', + 'ShowProgress': 'LinkVerbose', + 'StackCommitSize': '15', + 'StackReserveSize': '14', + 'StripPrivateSymbols': 'd3', + 'SubSystem': 'Console', + 'SupportUnloadOfDelayLoadedDLL': 'true', + 'SuppressStartupBanner': 'false', + 'SwapRunFromCD': 'true', + 'SwapRunFromNET': 'true', + 'TargetMachine': 'MachineX86', + 'TerminalServerAware': 'false', + 'TurnOffAssemblyGeneration': 'true', + 'TypeLibraryFile': 'f3', + 'TypeLibraryResourceID': '12', + 'UACExecutionLevel': 'RequireAdministrator', + 'UACUIAccess': 'true', + 'Version': '333'}, + 'ResourceCompile': { + 'AdditionalIncludeDirectories': 'f3', + 'AdditionalOptions': '/more3', + 'Culture': '0x0c0c', + 'IgnoreStandardIncludePath': 'true', + 'PreprocessorDefinitions': '_UNICODE;UNICODE2', + 'ResourceOutputFileName': '$(IntDir)%(Filename)3.res', + 'ShowProgress': 'true'}, + 'Manifest': { + 'AdditionalManifestFiles': 'sfsdfsd', + 'AdditionalOptions': 'afdsdafsd', + 'AssemblyIdentity': 'sddfdsadfsa', + 'ComponentFileName': 'fsdfds', + 'GenerateCatalogFiles': 'true', + 'InputResourceManifests': 'asfsfdafs', + 'OutputManifestFile': '$(TargetPath).manifestdfs', + 'RegistrarScriptFile': 'sdfsfd', + 'ReplacementsFile': 'sdffsd', + 'SuppressStartupBanner': 'false', + 'TypeLibraryFile': 'sfsd', + 'UpdateFileHashes': 'true', + 'UpdateFileHashesSearchPath': 'sfsd', + 'VerboseOutput': 'true'}, + 'ProjectReference': { + 'LinkLibraryDependencies': 'false', + 'UseLibraryDependencyInputs': 'true'}, + '': { + 'EmbedManifest': 'false', + 'GenerateManifest': 'false', + 'IgnoreImportLibrary': 'true', + 'LinkIncremental': '' + }, + 'ManifestResourceCompile': { + 'ResourceOutputFileName': + '$(IntDir)$(TargetFileName).embed.manifest.resfdsf'} + } + actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings( + msvs_settings, + self.stderr) + self.assertEqual(expected_msbuild_settings, actual_msbuild_settings) + self._ExpectedWarnings([]) + + +if __name__ == '__main__': + unittest.main() diff --git a/deps/libuv/build/gyp/pylib/gyp/MSVSToolFile.py b/deps/libuv/build/gyp/pylib/gyp/MSVSToolFile.py new file mode 100644 index 00000000..74e529a1 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/MSVSToolFile.py @@ -0,0 +1,58 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Visual Studio project reader/writer.""" + +import gyp.common +import gyp.easy_xml as easy_xml + + +class Writer(object): + """Visual Studio XML tool file writer.""" + + def __init__(self, tool_file_path, name): + """Initializes the tool file. + + Args: + tool_file_path: Path to the tool file. + name: Name of the tool file. + """ + self.tool_file_path = tool_file_path + self.name = name + self.rules_section = ['Rules'] + + def AddCustomBuildRule(self, name, cmd, description, + additional_dependencies, + outputs, extensions): + """Adds a rule to the tool file. + + Args: + name: Name of the rule. + description: Description of the rule. + cmd: Command line of the rule. + additional_dependencies: other files which may trigger the rule. + outputs: outputs of the rule. + extensions: extensions handled by the rule. + """ + rule = ['CustomBuildRule', + {'Name': name, + 'ExecutionDescription': description, + 'CommandLine': cmd, + 'Outputs': ';'.join(outputs), + 'FileExtensions': ';'.join(extensions), + 'AdditionalDependencies': + ';'.join(additional_dependencies) + }] + self.rules_section.append(rule) + + def WriteIfChanged(self): + """Writes the tool file.""" + content = ['VisualStudioToolFile', + {'Version': '8.00', + 'Name': self.name + }, + self.rules_section + ] + easy_xml.WriteXmlIfChanged(content, self.tool_file_path, + encoding="Windows-1252") diff --git a/deps/libuv/build/gyp/pylib/gyp/MSVSUserFile.py b/deps/libuv/build/gyp/pylib/gyp/MSVSUserFile.py new file mode 100644 index 00000000..6c07e9a8 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/MSVSUserFile.py @@ -0,0 +1,147 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Visual Studio user preferences file writer.""" + +import os +import re +import socket # for gethostname + +import gyp.common +import gyp.easy_xml as easy_xml + + +#------------------------------------------------------------------------------ + +def _FindCommandInPath(command): + """If there are no slashes in the command given, this function + searches the PATH env to find the given command, and converts it + to an absolute path. We have to do this because MSVS is looking + for an actual file to launch a debugger on, not just a command + line. Note that this happens at GYP time, so anything needing to + be built needs to have a full path.""" + if '/' in command or '\\' in command: + # If the command already has path elements (either relative or + # absolute), then assume it is constructed properly. + return command + else: + # Search through the path list and find an existing file that + # we can access. + paths = os.environ.get('PATH','').split(os.pathsep) + for path in paths: + item = os.path.join(path, command) + if os.path.isfile(item) and os.access(item, os.X_OK): + return item + return command + +def _QuoteWin32CommandLineArgs(args): + new_args = [] + for arg in args: + # Replace all double-quotes with double-double-quotes to escape + # them for cmd shell, and then quote the whole thing if there + # are any. + if arg.find('"') != -1: + arg = '""'.join(arg.split('"')) + arg = '"%s"' % arg + + # Otherwise, if there are any spaces, quote the whole arg. + elif re.search(r'[ \t\n]', arg): + arg = '"%s"' % arg + new_args.append(arg) + return new_args + +class Writer(object): + """Visual Studio XML user user file writer.""" + + def __init__(self, user_file_path, version, name): + """Initializes the user file. + + Args: + user_file_path: Path to the user file. + version: Version info. + name: Name of the user file. + """ + self.user_file_path = user_file_path + self.version = version + self.name = name + self.configurations = {} + + def AddConfig(self, name): + """Adds a configuration to the project. + + Args: + name: Configuration name. + """ + self.configurations[name] = ['Configuration', {'Name': name}] + + def AddDebugSettings(self, config_name, command, environment = {}, + working_directory=""): + """Adds a DebugSettings node to the user file for a particular config. + + Args: + command: command line to run. First element in the list is the + executable. All elements of the command will be quoted if + necessary. + working_directory: other files which may trigger the rule. (optional) + """ + command = _QuoteWin32CommandLineArgs(command) + + abs_command = _FindCommandInPath(command[0]) + + if environment and isinstance(environment, dict): + env_list = ['%s="%s"' % (key, val) + for (key,val) in environment.iteritems()] + environment = ' '.join(env_list) + else: + environment = '' + + n_cmd = ['DebugSettings', + {'Command': abs_command, + 'WorkingDirectory': working_directory, + 'CommandArguments': " ".join(command[1:]), + 'RemoteMachine': socket.gethostname(), + 'Environment': environment, + 'EnvironmentMerge': 'true', + # Currently these are all "dummy" values that we're just setting + # in the default manner that MSVS does it. We could use some of + # these to add additional capabilities, I suppose, but they might + # not have parity with other platforms then. + 'Attach': 'false', + 'DebuggerType': '3', # 'auto' debugger + 'Remote': '1', + 'RemoteCommand': '', + 'HttpUrl': '', + 'PDBPath': '', + 'SQLDebugging': '', + 'DebuggerFlavor': '0', + 'MPIRunCommand': '', + 'MPIRunArguments': '', + 'MPIRunWorkingDirectory': '', + 'ApplicationCommand': '', + 'ApplicationArguments': '', + 'ShimCommand': '', + 'MPIAcceptMode': '', + 'MPIAcceptFilter': '' + }] + + # Find the config, and add it if it doesn't exist. + if config_name not in self.configurations: + self.AddConfig(config_name) + + # Add the DebugSettings onto the appropriate config. + self.configurations[config_name].append(n_cmd) + + def WriteIfChanged(self): + """Writes the user file.""" + configs = ['Configurations'] + for config, spec in sorted(self.configurations.iteritems()): + configs.append(spec) + + content = ['VisualStudioUserFile', + {'Version': self.version.ProjectVersion(), + 'Name': self.name + }, + configs] + easy_xml.WriteXmlIfChanged(content, self.user_file_path, + encoding="Windows-1252") diff --git a/deps/libuv/build/gyp/pylib/gyp/MSVSUtil.py b/deps/libuv/build/gyp/pylib/gyp/MSVSUtil.py new file mode 100644 index 00000000..96dea6c2 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/MSVSUtil.py @@ -0,0 +1,271 @@ +# Copyright (c) 2013 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Utility functions shared amongst the Windows generators.""" + +import copy +import os + + +# A dictionary mapping supported target types to extensions. +TARGET_TYPE_EXT = { + 'executable': 'exe', + 'loadable_module': 'dll', + 'shared_library': 'dll', + 'static_library': 'lib', + 'windows_driver': 'sys', +} + + +def _GetLargePdbShimCcPath(): + """Returns the path of the large_pdb_shim.cc file.""" + this_dir = os.path.abspath(os.path.dirname(__file__)) + src_dir = os.path.abspath(os.path.join(this_dir, '..', '..')) + win_data_dir = os.path.join(src_dir, 'data', 'win') + large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc') + return large_pdb_shim_cc + + +def _DeepCopySomeKeys(in_dict, keys): + """Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|. + + Arguments: + in_dict: The dictionary to copy. + keys: The keys to be copied. If a key is in this list and doesn't exist in + |in_dict| this is not an error. + Returns: + The partially deep-copied dictionary. + """ + d = {} + for key in keys: + if key not in in_dict: + continue + d[key] = copy.deepcopy(in_dict[key]) + return d + + +def _SuffixName(name, suffix): + """Add a suffix to the end of a target. + + Arguments: + name: name of the target (foo#target) + suffix: the suffix to be added + Returns: + Target name with suffix added (foo_suffix#target) + """ + parts = name.rsplit('#', 1) + parts[0] = '%s_%s' % (parts[0], suffix) + return '#'.join(parts) + + +def _ShardName(name, number): + """Add a shard number to the end of a target. + + Arguments: + name: name of the target (foo#target) + number: shard number + Returns: + Target name with shard added (foo_1#target) + """ + return _SuffixName(name, str(number)) + + +def ShardTargets(target_list, target_dicts): + """Shard some targets apart to work around the linkers limits. + + Arguments: + target_list: List of target pairs: 'base/base.gyp:base'. + target_dicts: Dict of target properties keyed on target pair. + Returns: + Tuple of the new sharded versions of the inputs. + """ + # Gather the targets to shard, and how many pieces. + targets_to_shard = {} + for t in target_dicts: + shards = int(target_dicts[t].get('msvs_shard', 0)) + if shards: + targets_to_shard[t] = shards + # Shard target_list. + new_target_list = [] + for t in target_list: + if t in targets_to_shard: + for i in range(targets_to_shard[t]): + new_target_list.append(_ShardName(t, i)) + else: + new_target_list.append(t) + # Shard target_dict. + new_target_dicts = {} + for t in target_dicts: + if t in targets_to_shard: + for i in range(targets_to_shard[t]): + name = _ShardName(t, i) + new_target_dicts[name] = copy.copy(target_dicts[t]) + new_target_dicts[name]['target_name'] = _ShardName( + new_target_dicts[name]['target_name'], i) + sources = new_target_dicts[name].get('sources', []) + new_sources = [] + for pos in range(i, len(sources), targets_to_shard[t]): + new_sources.append(sources[pos]) + new_target_dicts[name]['sources'] = new_sources + else: + new_target_dicts[t] = target_dicts[t] + # Shard dependencies. + for t in sorted(new_target_dicts): + for deptype in ('dependencies', 'dependencies_original'): + dependencies = copy.copy(new_target_dicts[t].get(deptype, [])) + new_dependencies = [] + for d in dependencies: + if d in targets_to_shard: + for i in range(targets_to_shard[d]): + new_dependencies.append(_ShardName(d, i)) + else: + new_dependencies.append(d) + new_target_dicts[t][deptype] = new_dependencies + + return (new_target_list, new_target_dicts) + + +def _GetPdbPath(target_dict, config_name, vars): + """Returns the path to the PDB file that will be generated by a given + configuration. + + The lookup proceeds as follows: + - Look for an explicit path in the VCLinkerTool configuration block. + - Look for an 'msvs_large_pdb_path' variable. + - Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is + specified. + - Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'. + + Arguments: + target_dict: The target dictionary to be searched. + config_name: The name of the configuration of interest. + vars: A dictionary of common GYP variables with generator-specific values. + Returns: + The path of the corresponding PDB file. + """ + config = target_dict['configurations'][config_name] + msvs = config.setdefault('msvs_settings', {}) + + linker = msvs.get('VCLinkerTool', {}) + + pdb_path = linker.get('ProgramDatabaseFile') + if pdb_path: + return pdb_path + + variables = target_dict.get('variables', {}) + pdb_path = variables.get('msvs_large_pdb_path', None) + if pdb_path: + return pdb_path + + + pdb_base = target_dict.get('product_name', target_dict['target_name']) + pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']]) + pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base + + return pdb_path + + +def InsertLargePdbShims(target_list, target_dicts, vars): + """Insert a shim target that forces the linker to use 4KB pagesize PDBs. + + This is a workaround for targets with PDBs greater than 1GB in size, the + limit for the 1KB pagesize PDBs created by the linker by default. + + Arguments: + target_list: List of target pairs: 'base/base.gyp:base'. + target_dicts: Dict of target properties keyed on target pair. + vars: A dictionary of common GYP variables with generator-specific values. + Returns: + Tuple of the shimmed version of the inputs. + """ + # Determine which targets need shimming. + targets_to_shim = [] + for t in target_dicts: + target_dict = target_dicts[t] + + # We only want to shim targets that have msvs_large_pdb enabled. + if not int(target_dict.get('msvs_large_pdb', 0)): + continue + # This is intended for executable, shared_library and loadable_module + # targets where every configuration is set up to produce a PDB output. + # If any of these conditions is not true then the shim logic will fail + # below. + targets_to_shim.append(t) + + large_pdb_shim_cc = _GetLargePdbShimCcPath() + + for t in targets_to_shim: + target_dict = target_dicts[t] + target_name = target_dict.get('target_name') + + base_dict = _DeepCopySomeKeys(target_dict, + ['configurations', 'default_configuration', 'toolset']) + + # This is the dict for copying the source file (part of the GYP tree) + # to the intermediate directory of the project. This is necessary because + # we can't always build a relative path to the shim source file (on Windows + # GYP and the project may be on different drives), and Ninja hates absolute + # paths (it ends up generating the .obj and .obj.d alongside the source + # file, polluting GYPs tree). + copy_suffix = 'large_pdb_copy' + copy_target_name = target_name + '_' + copy_suffix + full_copy_target_name = _SuffixName(t, copy_suffix) + shim_cc_basename = os.path.basename(large_pdb_shim_cc) + shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name + shim_cc_path = shim_cc_dir + '/' + shim_cc_basename + copy_dict = copy.deepcopy(base_dict) + copy_dict['target_name'] = copy_target_name + copy_dict['type'] = 'none' + copy_dict['sources'] = [ large_pdb_shim_cc ] + copy_dict['copies'] = [{ + 'destination': shim_cc_dir, + 'files': [ large_pdb_shim_cc ] + }] + + # This is the dict for the PDB generating shim target. It depends on the + # copy target. + shim_suffix = 'large_pdb_shim' + shim_target_name = target_name + '_' + shim_suffix + full_shim_target_name = _SuffixName(t, shim_suffix) + shim_dict = copy.deepcopy(base_dict) + shim_dict['target_name'] = shim_target_name + shim_dict['type'] = 'static_library' + shim_dict['sources'] = [ shim_cc_path ] + shim_dict['dependencies'] = [ full_copy_target_name ] + + # Set up the shim to output its PDB to the same location as the final linker + # target. + for config_name, config in shim_dict.get('configurations').iteritems(): + pdb_path = _GetPdbPath(target_dict, config_name, vars) + + # A few keys that we don't want to propagate. + for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']: + config.pop(key, None) + + msvs = config.setdefault('msvs_settings', {}) + + # Update the compiler directives in the shim target. + compiler = msvs.setdefault('VCCLCompilerTool', {}) + compiler['DebugInformationFormat'] = '3' + compiler['ProgramDataBaseFileName'] = pdb_path + + # Set the explicit PDB path in the appropriate configuration of the + # original target. + config = target_dict['configurations'][config_name] + msvs = config.setdefault('msvs_settings', {}) + linker = msvs.setdefault('VCLinkerTool', {}) + linker['GenerateDebugInformation'] = 'true' + linker['ProgramDatabaseFile'] = pdb_path + + # Add the new targets. They must go to the beginning of the list so that + # the dependency generation works as expected in ninja. + target_list.insert(0, full_copy_target_name) + target_list.insert(0, full_shim_target_name) + target_dicts[full_copy_target_name] = copy_dict + target_dicts[full_shim_target_name] = shim_dict + + # Update the original target to depend on the shim target. + target_dict.setdefault('dependencies', []).append(full_shim_target_name) + + return (target_list, target_dicts) diff --git a/deps/libuv/build/gyp/pylib/gyp/MSVSVersion.py b/deps/libuv/build/gyp/pylib/gyp/MSVSVersion.py new file mode 100644 index 00000000..edaf6eed --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/MSVSVersion.py @@ -0,0 +1,453 @@ +# Copyright (c) 2013 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Handle version information related to Visual Stuio.""" + +import errno +import os +import re +import subprocess +import sys +import gyp +import glob + + +class VisualStudioVersion(object): + """Information regarding a version of Visual Studio.""" + + def __init__(self, short_name, description, + solution_version, project_version, flat_sln, uses_vcxproj, + path, sdk_based, default_toolset=None): + self.short_name = short_name + self.description = description + self.solution_version = solution_version + self.project_version = project_version + self.flat_sln = flat_sln + self.uses_vcxproj = uses_vcxproj + self.path = path + self.sdk_based = sdk_based + self.default_toolset = default_toolset + + def ShortName(self): + return self.short_name + + def Description(self): + """Get the full description of the version.""" + return self.description + + def SolutionVersion(self): + """Get the version number of the sln files.""" + return self.solution_version + + def ProjectVersion(self): + """Get the version number of the vcproj or vcxproj files.""" + return self.project_version + + def FlatSolution(self): + return self.flat_sln + + def UsesVcxproj(self): + """Returns true if this version uses a vcxproj file.""" + return self.uses_vcxproj + + def ProjectExtension(self): + """Returns the file extension for the project.""" + return self.uses_vcxproj and '.vcxproj' or '.vcproj' + + def Path(self): + """Returns the path to Visual Studio installation.""" + return self.path + + def ToolPath(self, tool): + """Returns the path to a given compiler tool. """ + return os.path.normpath(os.path.join(self.path, "VC/bin", tool)) + + def DefaultToolset(self): + """Returns the msbuild toolset version that will be used in the absence + of a user override.""" + return self.default_toolset + + def _SetupScriptInternal(self, target_arch): + """Returns a command (with arguments) to be used to set up the + environment.""" + # If WindowsSDKDir is set and SetEnv.Cmd exists then we are using the + # depot_tools build tools and should run SetEnv.Cmd to set up the + # environment. The check for WindowsSDKDir alone is not sufficient because + # this is set by running vcvarsall.bat. + assert target_arch in ('x86', 'x64') + sdk_dir = os.environ.get('WindowsSDKDir') + if sdk_dir: + setup_path = os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')) + if self.sdk_based and sdk_dir and os.path.exists(setup_path): + return [setup_path, '/' + target_arch] + else: + # We don't use VC/vcvarsall.bat for x86 because vcvarsall calls + # vcvars32, which it can only find if VS??COMNTOOLS is set, which it + # isn't always. + if target_arch == 'x86': + if self.short_name >= '2013' and self.short_name[-1] != 'e' and ( + os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or + os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'): + # VS2013 and later, non-Express have a x64-x86 cross that we want + # to prefer. + return [os.path.normpath( + os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86'] + # Otherwise, the standard x86 compiler. + return [os.path.normpath( + os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))] + else: + assert target_arch == 'x64' + arg = 'x86_amd64' + # Use the 64-on-64 compiler if we're not using an express + # edition and we're running on a 64bit OS. + if self.short_name[-1] != 'e' and ( + os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or + os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'): + arg = 'amd64' + return [os.path.normpath( + os.path.join(self.path, 'VC/vcvarsall.bat')), arg] + + def SetupScript(self, target_arch): + script_data = self._SetupScriptInternal(target_arch) + script_path = script_data[0] + if not os.path.exists(script_path): + raise Exception('%s is missing - make sure VC++ tools are installed.' % + script_path) + return script_data + + +def _RegistryQueryBase(sysdir, key, value): + """Use reg.exe to read a particular key. + + While ideally we might use the win32 module, we would like gyp to be + python neutral, so for instance cygwin python lacks this module. + + Arguments: + sysdir: The system subdirectory to attempt to launch reg.exe from. + key: The registry key to read from. + value: The particular value to read. + Return: + stdout from reg.exe, or None for failure. + """ + # Skip if not on Windows or Python Win32 setup issue + if sys.platform not in ('win32', 'cygwin'): + return None + # Setup params to pass to and attempt to launch reg.exe + cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'), + 'query', key] + if value: + cmd.extend(['/v', value]) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # Obtain the stdout from reg.exe, reading to the end so p.returncode is valid + # Note that the error text may be in [1] in some cases + text = p.communicate()[0] + # Check return code from reg.exe; officially 0==success and 1==error + if p.returncode: + return None + return text + + +def _RegistryQuery(key, value=None): + r"""Use reg.exe to read a particular key through _RegistryQueryBase. + + First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If + that fails, it falls back to System32. Sysnative is available on Vista and + up and available on Windows Server 2003 and XP through KB patch 942589. Note + that Sysnative will always fail if using 64-bit python due to it being a + virtual directory and System32 will work correctly in the first place. + + KB 942589 - http://support.microsoft.com/kb/942589/en-us. + + Arguments: + key: The registry key. + value: The particular registry value to read (optional). + Return: + stdout from reg.exe, or None for failure. + """ + text = None + try: + text = _RegistryQueryBase('Sysnative', key, value) + except OSError, e: + if e.errno == errno.ENOENT: + text = _RegistryQueryBase('System32', key, value) + else: + raise + return text + + +def _RegistryGetValueUsingWinReg(key, value): + """Use the _winreg module to obtain the value of a registry key. + + Args: + key: The registry key. + value: The particular registry value to read. + Return: + contents of the registry key's value, or None on failure. Throws + ImportError if _winreg is unavailable. + """ + import _winreg + try: + root, subkey = key.split('\\', 1) + assert root == 'HKLM' # Only need HKLM for now. + with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey: + return _winreg.QueryValueEx(hkey, value)[0] + except WindowsError: + return None + + +def _RegistryGetValue(key, value): + """Use _winreg or reg.exe to obtain the value of a registry key. + + Using _winreg is preferable because it solves an issue on some corporate + environments where access to reg.exe is locked down. However, we still need + to fallback to reg.exe for the case where the _winreg module is not available + (for example in cygwin python). + + Args: + key: The registry key. + value: The particular registry value to read. + Return: + contents of the registry key's value, or None on failure. + """ + try: + return _RegistryGetValueUsingWinReg(key, value) + except ImportError: + pass + + # Fallback to reg.exe if we fail to import _winreg. + text = _RegistryQuery(key, value) + if not text: + return None + # Extract value. + match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text) + if not match: + return None + return match.group(1) + + +def _CreateVersion(name, path, sdk_based=False): + """Sets up MSVS project generation. + + Setup is based off the GYP_MSVS_VERSION environment variable or whatever is + autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is + passed in that doesn't match a value in versions python will throw a error. + """ + if path: + path = os.path.normpath(path) + versions = { + '2015': VisualStudioVersion('2015', + 'Visual Studio 2015', + solution_version='12.00', + project_version='14.0', + flat_sln=False, + uses_vcxproj=True, + path=path, + sdk_based=sdk_based, + default_toolset='v140'), + '2013': VisualStudioVersion('2013', + 'Visual Studio 2013', + solution_version='13.00', + project_version='12.0', + flat_sln=False, + uses_vcxproj=True, + path=path, + sdk_based=sdk_based, + default_toolset='v120'), + '2013e': VisualStudioVersion('2013e', + 'Visual Studio 2013', + solution_version='13.00', + project_version='12.0', + flat_sln=True, + uses_vcxproj=True, + path=path, + sdk_based=sdk_based, + default_toolset='v120'), + '2012': VisualStudioVersion('2012', + 'Visual Studio 2012', + solution_version='12.00', + project_version='4.0', + flat_sln=False, + uses_vcxproj=True, + path=path, + sdk_based=sdk_based, + default_toolset='v110'), + '2012e': VisualStudioVersion('2012e', + 'Visual Studio 2012', + solution_version='12.00', + project_version='4.0', + flat_sln=True, + uses_vcxproj=True, + path=path, + sdk_based=sdk_based, + default_toolset='v110'), + '2010': VisualStudioVersion('2010', + 'Visual Studio 2010', + solution_version='11.00', + project_version='4.0', + flat_sln=False, + uses_vcxproj=True, + path=path, + sdk_based=sdk_based), + '2010e': VisualStudioVersion('2010e', + 'Visual C++ Express 2010', + solution_version='11.00', + project_version='4.0', + flat_sln=True, + uses_vcxproj=True, + path=path, + sdk_based=sdk_based), + '2008': VisualStudioVersion('2008', + 'Visual Studio 2008', + solution_version='10.00', + project_version='9.00', + flat_sln=False, + uses_vcxproj=False, + path=path, + sdk_based=sdk_based), + '2008e': VisualStudioVersion('2008e', + 'Visual Studio 2008', + solution_version='10.00', + project_version='9.00', + flat_sln=True, + uses_vcxproj=False, + path=path, + sdk_based=sdk_based), + '2005': VisualStudioVersion('2005', + 'Visual Studio 2005', + solution_version='9.00', + project_version='8.00', + flat_sln=False, + uses_vcxproj=False, + path=path, + sdk_based=sdk_based), + '2005e': VisualStudioVersion('2005e', + 'Visual Studio 2005', + solution_version='9.00', + project_version='8.00', + flat_sln=True, + uses_vcxproj=False, + path=path, + sdk_based=sdk_based), + } + return versions[str(name)] + + +def _ConvertToCygpath(path): + """Convert to cygwin path if we are using cygwin.""" + if sys.platform == 'cygwin': + p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE) + path = p.communicate()[0].strip() + return path + + +def _DetectVisualStudioVersions(versions_to_check, force_express): + """Collect the list of installed visual studio versions. + + Returns: + A list of visual studio versions installed in descending order of + usage preference. + Base this on the registry and a quick check if devenv.exe exists. + Only versions 8-10 are considered. + Possibilities are: + 2005(e) - Visual Studio 2005 (8) + 2008(e) - Visual Studio 2008 (9) + 2010(e) - Visual Studio 2010 (10) + 2012(e) - Visual Studio 2012 (11) + 2013(e) - Visual Studio 2013 (12) + 2015 - Visual Studio 2015 (14) + Where (e) is e for express editions of MSVS and blank otherwise. + """ + version_to_year = { + '8.0': '2005', + '9.0': '2008', + '10.0': '2010', + '11.0': '2012', + '12.0': '2013', + '14.0': '2015', + } + versions = [] + for version in versions_to_check: + # Old method of searching for which VS version is installed + # We don't use the 2010-encouraged-way because we also want to get the + # path to the binaries, which it doesn't offer. + keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version, + r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version, + r'HKLM\Software\Microsoft\VCExpress\%s' % version, + r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version] + for index in range(len(keys)): + path = _RegistryGetValue(keys[index], 'InstallDir') + if not path: + continue + path = _ConvertToCygpath(path) + # Check for full. + full_path = os.path.join(path, 'devenv.exe') + express_path = os.path.join(path, '*express.exe') + if not force_express and os.path.exists(full_path): + # Add this one. + versions.append(_CreateVersion(version_to_year[version], + os.path.join(path, '..', '..'))) + # Check for express. + elif glob.glob(express_path): + # Add this one. + versions.append(_CreateVersion(version_to_year[version] + 'e', + os.path.join(path, '..', '..'))) + + # The old method above does not work when only SDK is installed. + keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7', + r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7'] + for index in range(len(keys)): + path = _RegistryGetValue(keys[index], version) + if not path: + continue + path = _ConvertToCygpath(path) + if version != '14.0': # There is no Express edition for 2015. + versions.append(_CreateVersion(version_to_year[version] + 'e', + os.path.join(path, '..'), sdk_based=True)) + + return versions + + +def SelectVisualStudioVersion(version='auto', allow_fallback=True): + """Select which version of Visual Studio projects to generate. + + Arguments: + version: Hook to allow caller to force a particular version (vs auto). + Returns: + An object representing a visual studio project format version. + """ + # In auto mode, check environment variable for override. + if version == 'auto': + version = os.environ.get('GYP_MSVS_VERSION', 'auto') + version_map = { + 'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'), + '2005': ('8.0',), + '2005e': ('8.0',), + '2008': ('9.0',), + '2008e': ('9.0',), + '2010': ('10.0',), + '2010e': ('10.0',), + '2012': ('11.0',), + '2012e': ('11.0',), + '2013': ('12.0',), + '2013e': ('12.0',), + '2015': ('14.0',), + } + override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH') + if override_path: + msvs_version = os.environ.get('GYP_MSVS_VERSION') + if not msvs_version: + raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be ' + 'set to a particular version (e.g. 2010e).') + return _CreateVersion(msvs_version, override_path, sdk_based=True) + version = str(version) + versions = _DetectVisualStudioVersions(version_map[version], 'e' in version) + if not versions: + if not allow_fallback: + raise ValueError('Could not locate Visual Studio installation.') + if version == 'auto': + # Default to 2005 if we couldn't find anything + return _CreateVersion('2005', None) + else: + return _CreateVersion(version, None) + return versions[0] diff --git a/deps/libuv/build/gyp/pylib/gyp/__init__.py b/deps/libuv/build/gyp/pylib/gyp/__init__.py new file mode 100755 index 00000000..668f38b6 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/__init__.py @@ -0,0 +1,548 @@ +#!/usr/bin/env python + +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import copy +import gyp.input +import optparse +import os.path +import re +import shlex +import sys +import traceback +from gyp.common import GypError + +# Default debug modes for GYP +debug = {} + +# List of "official" debug modes, but you can use anything you like. +DEBUG_GENERAL = 'general' +DEBUG_VARIABLES = 'variables' +DEBUG_INCLUDES = 'includes' + + +def DebugOutput(mode, message, *args): + if 'all' in gyp.debug or mode in gyp.debug: + ctx = ('unknown', 0, 'unknown') + try: + f = traceback.extract_stack(limit=2) + if f: + ctx = f[0][:3] + except: + pass + if args: + message %= args + print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]), + ctx[1], ctx[2], message) + +def FindBuildFiles(): + extension = '.gyp' + files = os.listdir(os.getcwd()) + build_files = [] + for file in files: + if file.endswith(extension): + build_files.append(file) + return build_files + + +def Load(build_files, format, default_variables={}, + includes=[], depth='.', params=None, check=False, + circular_check=True, duplicate_basename_check=True): + """ + Loads one or more specified build files. + default_variables and includes will be copied before use. + Returns the generator for the specified format and the + data returned by loading the specified build files. + """ + if params is None: + params = {} + + if '-' in format: + format, params['flavor'] = format.split('-', 1) + + default_variables = copy.copy(default_variables) + + # Default variables provided by this program and its modules should be + # named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace, + # avoiding collisions with user and automatic variables. + default_variables['GENERATOR'] = format + default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '') + + # Format can be a custom python file, or by default the name of a module + # within gyp.generator. + if format.endswith('.py'): + generator_name = os.path.splitext(format)[0] + path, generator_name = os.path.split(generator_name) + + # Make sure the path to the custom generator is in sys.path + # Don't worry about removing it once we are done. Keeping the path + # to each generator that is used in sys.path is likely harmless and + # arguably a good idea. + path = os.path.abspath(path) + if path not in sys.path: + sys.path.insert(0, path) + else: + generator_name = 'gyp.generator.' + format + + # These parameters are passed in order (as opposed to by key) + # because ActivePython cannot handle key parameters to __import__. + generator = __import__(generator_name, globals(), locals(), generator_name) + for (key, val) in generator.generator_default_variables.items(): + default_variables.setdefault(key, val) + + # Give the generator the opportunity to set additional variables based on + # the params it will receive in the output phase. + if getattr(generator, 'CalculateVariables', None): + generator.CalculateVariables(default_variables, params) + + # Give the generator the opportunity to set generator_input_info based on + # the params it will receive in the output phase. + if getattr(generator, 'CalculateGeneratorInputInfo', None): + generator.CalculateGeneratorInputInfo(params) + + # Fetch the generator specific info that gets fed to input, we use getattr + # so we can default things and the generators only have to provide what + # they need. + generator_input_info = { + 'non_configuration_keys': + getattr(generator, 'generator_additional_non_configuration_keys', []), + 'path_sections': + getattr(generator, 'generator_additional_path_sections', []), + 'extra_sources_for_rules': + getattr(generator, 'generator_extra_sources_for_rules', []), + 'generator_supports_multiple_toolsets': + getattr(generator, 'generator_supports_multiple_toolsets', False), + 'generator_wants_static_library_dependencies_adjusted': + getattr(generator, + 'generator_wants_static_library_dependencies_adjusted', True), + 'generator_wants_sorted_dependencies': + getattr(generator, 'generator_wants_sorted_dependencies', False), + 'generator_filelist_paths': + getattr(generator, 'generator_filelist_paths', None), + } + + # Process the input specific to this generator. + result = gyp.input.Load(build_files, default_variables, includes[:], + depth, generator_input_info, check, circular_check, + duplicate_basename_check, + params['parallel'], params['root_targets']) + return [generator] + result + +def NameValueListToDict(name_value_list): + """ + Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary + of the pairs. If a string is simply NAME, then the value in the dictionary + is set to True. If VALUE can be converted to an integer, it is. + """ + result = { } + for item in name_value_list: + tokens = item.split('=', 1) + if len(tokens) == 2: + # If we can make it an int, use that, otherwise, use the string. + try: + token_value = int(tokens[1]) + except ValueError: + token_value = tokens[1] + # Set the variable to the supplied value. + result[tokens[0]] = token_value + else: + # No value supplied, treat it as a boolean and set it. + result[tokens[0]] = True + return result + +def ShlexEnv(env_name): + flags = os.environ.get(env_name, []) + if flags: + flags = shlex.split(flags) + return flags + +def FormatOpt(opt, value): + if opt.startswith('--'): + return '%s=%s' % (opt, value) + return opt + value + +def RegenerateAppendFlag(flag, values, predicate, env_name, options): + """Regenerate a list of command line flags, for an option of action='append'. + + The |env_name|, if given, is checked in the environment and used to generate + an initial list of options, then the options that were specified on the + command line (given in |values|) are appended. This matches the handling of + environment variables and command line flags where command line flags override + the environment, while not requiring the environment to be set when the flags + are used again. + """ + flags = [] + if options.use_environment and env_name: + for flag_value in ShlexEnv(env_name): + value = FormatOpt(flag, predicate(flag_value)) + if value in flags: + flags.remove(value) + flags.append(value) + if values: + for flag_value in values: + flags.append(FormatOpt(flag, predicate(flag_value))) + return flags + +def RegenerateFlags(options): + """Given a parsed options object, and taking the environment variables into + account, returns a list of flags that should regenerate an equivalent options + object (even in the absence of the environment variables.) + + Any path options will be normalized relative to depth. + + The format flag is not included, as it is assumed the calling generator will + set that as appropriate. + """ + def FixPath(path): + path = gyp.common.FixIfRelativePath(path, options.depth) + if not path: + return os.path.curdir + return path + + def Noop(value): + return value + + # We always want to ignore the environment when regenerating, to avoid + # duplicate or changed flags in the environment at the time of regeneration. + flags = ['--ignore-environment'] + for name, metadata in options._regeneration_metadata.iteritems(): + opt = metadata['opt'] + value = getattr(options, name) + value_predicate = metadata['type'] == 'path' and FixPath or Noop + action = metadata['action'] + env_name = metadata['env_name'] + if action == 'append': + flags.extend(RegenerateAppendFlag(opt, value, value_predicate, + env_name, options)) + elif action in ('store', None): # None is a synonym for 'store'. + if value: + flags.append(FormatOpt(opt, value_predicate(value))) + elif options.use_environment and env_name and os.environ.get(env_name): + flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name)))) + elif action in ('store_true', 'store_false'): + if ((action == 'store_true' and value) or + (action == 'store_false' and not value)): + flags.append(opt) + elif options.use_environment and env_name: + print >>sys.stderr, ('Warning: environment regeneration unimplemented ' + 'for %s flag %r env_name %r' % (action, opt, + env_name)) + else: + print >>sys.stderr, ('Warning: regeneration unimplemented for action %r ' + 'flag %r' % (action, opt)) + + return flags + +class RegeneratableOptionParser(optparse.OptionParser): + def __init__(self): + self.__regeneratable_options = {} + optparse.OptionParser.__init__(self) + + def add_option(self, *args, **kw): + """Add an option to the parser. + + This accepts the same arguments as OptionParser.add_option, plus the + following: + regenerate: can be set to False to prevent this option from being included + in regeneration. + env_name: name of environment variable that additional values for this + option come from. + type: adds type='path', to tell the regenerator that the values of + this option need to be made relative to options.depth + """ + env_name = kw.pop('env_name', None) + if 'dest' in kw and kw.pop('regenerate', True): + dest = kw['dest'] + + # The path type is needed for regenerating, for optparse we can just treat + # it as a string. + type = kw.get('type') + if type == 'path': + kw['type'] = 'string' + + self.__regeneratable_options[dest] = { + 'action': kw.get('action'), + 'type': type, + 'env_name': env_name, + 'opt': args[0], + } + + optparse.OptionParser.add_option(self, *args, **kw) + + def parse_args(self, *args): + values, args = optparse.OptionParser.parse_args(self, *args) + values._regeneration_metadata = self.__regeneratable_options + return values, args + +def gyp_main(args): + my_name = os.path.basename(sys.argv[0]) + + parser = RegeneratableOptionParser() + usage = 'usage: %s [options ...] [build_file ...]' + parser.set_usage(usage.replace('%s', '%prog')) + parser.add_option('--build', dest='configs', action='append', + help='configuration for build after project generation') + parser.add_option('--check', dest='check', action='store_true', + help='check format of gyp files') + parser.add_option('--config-dir', dest='config_dir', action='store', + env_name='GYP_CONFIG_DIR', default=None, + help='The location for configuration files like ' + 'include.gypi.') + parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE', + action='append', default=[], help='turn on a debugging ' + 'mode for debugging GYP. Supported modes are "variables", ' + '"includes" and "general" or "all" for all of them.') + parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL', + env_name='GYP_DEFINES', + help='sets variable VAR to value VAL') + parser.add_option('--depth', dest='depth', metavar='PATH', type='path', + help='set DEPTH gyp variable to a relative path to PATH') + parser.add_option('-f', '--format', dest='formats', action='append', + env_name='GYP_GENERATORS', regenerate=False, + help='output formats to generate') + parser.add_option('-G', dest='generator_flags', action='append', default=[], + metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS', + help='sets generator flag FLAG to VAL') + parser.add_option('--generator-output', dest='generator_output', + action='store', default=None, metavar='DIR', type='path', + env_name='GYP_GENERATOR_OUTPUT', + help='puts generated build files under DIR') + parser.add_option('--ignore-environment', dest='use_environment', + action='store_false', default=True, regenerate=False, + help='do not read options from environment variables') + parser.add_option('-I', '--include', dest='includes', action='append', + metavar='INCLUDE', type='path', + help='files to include in all loaded .gyp files') + # --no-circular-check disables the check for circular relationships between + # .gyp files. These relationships should not exist, but they've only been + # observed to be harmful with the Xcode generator. Chromium's .gyp files + # currently have some circular relationships on non-Mac platforms, so this + # option allows the strict behavior to be used on Macs and the lenient + # behavior to be used elsewhere. + # TODO(mark): Remove this option when http://crbug.com/35878 is fixed. + parser.add_option('--no-circular-check', dest='circular_check', + action='store_false', default=True, regenerate=False, + help="don't check for circular relationships between files") + # --no-duplicate-basename-check disables the check for duplicate basenames + # in a static_library/shared_library project. Visual C++ 2008 generator + # doesn't support this configuration. Libtool on Mac also generates warnings + # when duplicate basenames are passed into Make generator on Mac. + # TODO(yukawa): Remove this option when these legacy generators are + # deprecated. + parser.add_option('--no-duplicate-basename-check', + dest='duplicate_basename_check', action='store_false', + default=True, regenerate=False, + help="don't check for duplicate basenames") + parser.add_option('--no-parallel', action='store_true', default=False, + help='Disable multiprocessing') + parser.add_option('-S', '--suffix', dest='suffix', default='', + help='suffix to add to generated files') + parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store', + default=None, metavar='DIR', type='path', + help='directory to use as the root of the source tree') + parser.add_option('-R', '--root-target', dest='root_targets', + action='append', metavar='TARGET', + help='include only TARGET and its deep dependencies') + + options, build_files_arg = parser.parse_args(args) + build_files = build_files_arg + + # Set up the configuration directory (defaults to ~/.gyp) + if not options.config_dir: + home = None + home_dot_gyp = None + if options.use_environment: + home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None) + if home_dot_gyp: + home_dot_gyp = os.path.expanduser(home_dot_gyp) + + if not home_dot_gyp: + home_vars = ['HOME'] + if sys.platform in ('cygwin', 'win32'): + home_vars.append('USERPROFILE') + for home_var in home_vars: + home = os.getenv(home_var) + if home != None: + home_dot_gyp = os.path.join(home, '.gyp') + if not os.path.exists(home_dot_gyp): + home_dot_gyp = None + else: + break + else: + home_dot_gyp = os.path.expanduser(options.config_dir) + + if home_dot_gyp and not os.path.exists(home_dot_gyp): + home_dot_gyp = None + + if not options.formats: + # If no format was given on the command line, then check the env variable. + generate_formats = [] + if options.use_environment: + generate_formats = os.environ.get('GYP_GENERATORS', []) + if generate_formats: + generate_formats = re.split(r'[\s,]', generate_formats) + if generate_formats: + options.formats = generate_formats + else: + # Nothing in the variable, default based on platform. + if sys.platform == 'darwin': + options.formats = ['xcode'] + elif sys.platform in ('win32', 'cygwin'): + options.formats = ['msvs'] + else: + options.formats = ['make'] + + if not options.generator_output and options.use_environment: + g_o = os.environ.get('GYP_GENERATOR_OUTPUT') + if g_o: + options.generator_output = g_o + + options.parallel = not options.no_parallel + + for mode in options.debug: + gyp.debug[mode] = 1 + + # Do an extra check to avoid work when we're not debugging. + if DEBUG_GENERAL in gyp.debug: + DebugOutput(DEBUG_GENERAL, 'running with these options:') + for option, value in sorted(options.__dict__.items()): + if option[0] == '_': + continue + if isinstance(value, basestring): + DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value) + else: + DebugOutput(DEBUG_GENERAL, " %s: %s", option, value) + + if not build_files: + build_files = FindBuildFiles() + if not build_files: + raise GypError((usage + '\n\n%s: error: no build_file') % + (my_name, my_name)) + + # TODO(mark): Chromium-specific hack! + # For Chromium, the gyp "depth" variable should always be a relative path + # to Chromium's top-level "src" directory. If no depth variable was set + # on the command line, try to find a "src" directory by looking at the + # absolute path to each build file's directory. The first "src" component + # found will be treated as though it were the path used for --depth. + if not options.depth: + for build_file in build_files: + build_file_dir = os.path.abspath(os.path.dirname(build_file)) + build_file_dir_components = build_file_dir.split(os.path.sep) + components_len = len(build_file_dir_components) + for index in xrange(components_len - 1, -1, -1): + if build_file_dir_components[index] == 'src': + options.depth = os.path.sep.join(build_file_dir_components) + break + del build_file_dir_components[index] + + # If the inner loop found something, break without advancing to another + # build file. + if options.depth: + break + + if not options.depth: + raise GypError('Could not automatically locate src directory. This is' + 'a temporary Chromium feature that will be removed. Use' + '--depth as a workaround.') + + # If toplevel-dir is not set, we assume that depth is the root of our source + # tree. + if not options.toplevel_dir: + options.toplevel_dir = options.depth + + # -D on the command line sets variable defaults - D isn't just for define, + # it's for default. Perhaps there should be a way to force (-F?) a + # variable's value so that it can't be overridden by anything else. + cmdline_default_variables = {} + defines = [] + if options.use_environment: + defines += ShlexEnv('GYP_DEFINES') + if options.defines: + defines += options.defines + cmdline_default_variables = NameValueListToDict(defines) + if DEBUG_GENERAL in gyp.debug: + DebugOutput(DEBUG_GENERAL, + "cmdline_default_variables: %s", cmdline_default_variables) + + # Set up includes. + includes = [] + + # If ~/.gyp/include.gypi exists, it'll be forcibly included into every + # .gyp file that's loaded, before anything else is included. + if home_dot_gyp != None: + default_include = os.path.join(home_dot_gyp, 'include.gypi') + if os.path.exists(default_include): + print 'Using overrides found in ' + default_include + includes.append(default_include) + + # Command-line --include files come after the default include. + if options.includes: + includes.extend(options.includes) + + # Generator flags should be prefixed with the target generator since they + # are global across all generator runs. + gen_flags = [] + if options.use_environment: + gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS') + if options.generator_flags: + gen_flags += options.generator_flags + generator_flags = NameValueListToDict(gen_flags) + if DEBUG_GENERAL in gyp.debug.keys(): + DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags) + + # Generate all requested formats (use a set in case we got one format request + # twice) + for format in set(options.formats): + params = {'options': options, + 'build_files': build_files, + 'generator_flags': generator_flags, + 'cwd': os.getcwd(), + 'build_files_arg': build_files_arg, + 'gyp_binary': sys.argv[0], + 'home_dot_gyp': home_dot_gyp, + 'parallel': options.parallel, + 'root_targets': options.root_targets, + 'target_arch': cmdline_default_variables.get('target_arch', '')} + + # Start with the default variables from the command line. + [generator, flat_list, targets, data] = Load( + build_files, format, cmdline_default_variables, includes, options.depth, + params, options.check, options.circular_check, + options.duplicate_basename_check) + + # TODO(mark): Pass |data| for now because the generator needs a list of + # build files that came in. In the future, maybe it should just accept + # a list, and not the whole data dict. + # NOTE: flat_list is the flattened dependency graph specifying the order + # that targets may be built. Build systems that operate serially or that + # need to have dependencies defined before dependents reference them should + # generate targets in the order specified in flat_list. + generator.GenerateOutput(flat_list, targets, data, params) + + if options.configs: + valid_configs = targets[flat_list[0]]['configurations'].keys() + for conf in options.configs: + if conf not in valid_configs: + raise GypError('Invalid config specified via --build: %s' % conf) + generator.PerformBuild(data, options.configs, params) + + # Done + return 0 + + +def main(args): + try: + return gyp_main(args) + except GypError, e: + sys.stderr.write("gyp: %s\n" % e) + return 1 + +# NOTE: setuptools generated console_scripts calls function with no arguments +def script_main(): + return main(sys.argv[1:]) + +if __name__ == '__main__': + sys.exit(script_main()) diff --git a/deps/libuv/build/gyp/pylib/gyp/common.py b/deps/libuv/build/gyp/pylib/gyp/common.py new file mode 100644 index 00000000..a1e1db5f --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/common.py @@ -0,0 +1,615 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from __future__ import with_statement + +import collections +import errno +import filecmp +import os.path +import re +import tempfile +import sys + + +# A minimal memoizing decorator. It'll blow up if the args aren't immutable, +# among other "problems". +class memoize(object): + def __init__(self, func): + self.func = func + self.cache = {} + def __call__(self, *args): + try: + return self.cache[args] + except KeyError: + result = self.func(*args) + self.cache[args] = result + return result + + +class GypError(Exception): + """Error class representing an error, which is to be presented + to the user. The main entry point will catch and display this. + """ + pass + + +def ExceptionAppend(e, msg): + """Append a message to the given exception's message.""" + if not e.args: + e.args = (msg,) + elif len(e.args) == 1: + e.args = (str(e.args[0]) + ' ' + msg,) + else: + e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:] + + +def FindQualifiedTargets(target, qualified_list): + """ + Given a list of qualified targets, return the qualified targets for the + specified |target|. + """ + return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target] + + +def ParseQualifiedTarget(target): + # Splits a qualified target into a build file, target name and toolset. + + # NOTE: rsplit is used to disambiguate the Windows drive letter separator. + target_split = target.rsplit(':', 1) + if len(target_split) == 2: + [build_file, target] = target_split + else: + build_file = None + + target_split = target.rsplit('#', 1) + if len(target_split) == 2: + [target, toolset] = target_split + else: + toolset = None + + return [build_file, target, toolset] + + +def ResolveTarget(build_file, target, toolset): + # This function resolves a target into a canonical form: + # - a fully defined build file, either absolute or relative to the current + # directory + # - a target name + # - a toolset + # + # build_file is the file relative to which 'target' is defined. + # target is the qualified target. + # toolset is the default toolset for that target. + [parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target) + + if parsed_build_file: + if build_file: + # If a relative path, parsed_build_file is relative to the directory + # containing build_file. If build_file is not in the current directory, + # parsed_build_file is not a usable path as-is. Resolve it by + # interpreting it as relative to build_file. If parsed_build_file is + # absolute, it is usable as a path regardless of the current directory, + # and os.path.join will return it as-is. + build_file = os.path.normpath(os.path.join(os.path.dirname(build_file), + parsed_build_file)) + # Further (to handle cases like ../cwd), make it relative to cwd) + if not os.path.isabs(build_file): + build_file = RelativePath(build_file, '.') + else: + build_file = parsed_build_file + + if parsed_toolset: + toolset = parsed_toolset + + return [build_file, target, toolset] + + +def BuildFile(fully_qualified_target): + # Extracts the build file from the fully qualified target. + return ParseQualifiedTarget(fully_qualified_target)[0] + + +def GetEnvironFallback(var_list, default): + """Look up a key in the environment, with fallback to secondary keys + and finally falling back to a default value.""" + for var in var_list: + if var in os.environ: + return os.environ[var] + return default + + +def QualifiedTarget(build_file, target, toolset): + # "Qualified" means the file that a target was defined in and the target + # name, separated by a colon, suffixed by a # and the toolset name: + # /path/to/file.gyp:target_name#toolset + fully_qualified = build_file + ':' + target + if toolset: + fully_qualified = fully_qualified + '#' + toolset + return fully_qualified + + +@memoize +def RelativePath(path, relative_to, follow_path_symlink=True): + # Assuming both |path| and |relative_to| are relative to the current + # directory, returns a relative path that identifies path relative to + # relative_to. + # If |follow_symlink_path| is true (default) and |path| is a symlink, then + # this method returns a path to the real file represented by |path|. If it is + # false, this method returns a path to the symlink. If |path| is not a + # symlink, this option has no effect. + + # Convert to normalized (and therefore absolute paths). + if follow_path_symlink: + path = os.path.realpath(path) + else: + path = os.path.abspath(path) + relative_to = os.path.realpath(relative_to) + + # On Windows, we can't create a relative path to a different drive, so just + # use the absolute path. + if sys.platform == 'win32': + if (os.path.splitdrive(path)[0].lower() != + os.path.splitdrive(relative_to)[0].lower()): + return path + + # Split the paths into components. + path_split = path.split(os.path.sep) + relative_to_split = relative_to.split(os.path.sep) + + # Determine how much of the prefix the two paths share. + prefix_len = len(os.path.commonprefix([path_split, relative_to_split])) + + # Put enough ".." components to back up out of relative_to to the common + # prefix, and then append the part of path_split after the common prefix. + relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \ + path_split[prefix_len:] + + if len(relative_split) == 0: + # The paths were the same. + return '' + + # Turn it back into a string and we're done. + return os.path.join(*relative_split) + + +@memoize +def InvertRelativePath(path, toplevel_dir=None): + """Given a path like foo/bar that is relative to toplevel_dir, return + the inverse relative path back to the toplevel_dir. + + E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path))) + should always produce the empty string, unless the path contains symlinks. + """ + if not path: + return path + toplevel_dir = '.' if toplevel_dir is None else toplevel_dir + return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path)) + + +def FixIfRelativePath(path, relative_to): + # Like RelativePath but returns |path| unchanged if it is absolute. + if os.path.isabs(path): + return path + return RelativePath(path, relative_to) + + +def UnrelativePath(path, relative_to): + # Assuming that |relative_to| is relative to the current directory, and |path| + # is a path relative to the dirname of |relative_to|, returns a path that + # identifies |path| relative to the current directory. + rel_dir = os.path.dirname(relative_to) + return os.path.normpath(os.path.join(rel_dir, path)) + + +# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at +# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02 +# and the documentation for various shells. + +# _quote is a pattern that should match any argument that needs to be quoted +# with double-quotes by EncodePOSIXShellArgument. It matches the following +# characters appearing anywhere in an argument: +# \t, \n, space parameter separators +# # comments +# $ expansions (quoted to always expand within one argument) +# % called out by IEEE 1003.1 XCU.2.2 +# & job control +# ' quoting +# (, ) subshell execution +# *, ?, [ pathname expansion +# ; command delimiter +# <, >, | redirection +# = assignment +# {, } brace expansion (bash) +# ~ tilde expansion +# It also matches the empty string, because "" (or '') is the only way to +# represent an empty string literal argument to a POSIX shell. +# +# This does not match the characters in _escape, because those need to be +# backslash-escaped regardless of whether they appear in a double-quoted +# string. +_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$') + +# _escape is a pattern that should match any character that needs to be +# escaped with a backslash, whether or not the argument matched the _quote +# pattern. _escape is used with re.sub to backslash anything in _escape's +# first match group, hence the (parentheses) in the regular expression. +# +# _escape matches the following characters appearing anywhere in an argument: +# " to prevent POSIX shells from interpreting this character for quoting +# \ to prevent POSIX shells from interpreting this character for escaping +# ` to prevent POSIX shells from interpreting this character for command +# substitution +# Missing from this list is $, because the desired behavior of +# EncodePOSIXShellArgument is to permit parameter (variable) expansion. +# +# Also missing from this list is !, which bash will interpret as the history +# expansion character when history is enabled. bash does not enable history +# by default in non-interactive shells, so this is not thought to be a problem. +# ! was omitted from this list because bash interprets "\!" as a literal string +# including the backslash character (avoiding history expansion but retaining +# the backslash), which would not be correct for argument encoding. Handling +# this case properly would also be problematic because bash allows the history +# character to be changed with the histchars shell variable. Fortunately, +# as history is not enabled in non-interactive shells and +# EncodePOSIXShellArgument is only expected to encode for non-interactive +# shells, there is no room for error here by ignoring !. +_escape = re.compile(r'(["\\`])') + +def EncodePOSIXShellArgument(argument): + """Encodes |argument| suitably for consumption by POSIX shells. + + argument may be quoted and escaped as necessary to ensure that POSIX shells + treat the returned value as a literal representing the argument passed to + this function. Parameter (variable) expansions beginning with $ are allowed + to remain intact without escaping the $, to allow the argument to contain + references to variables to be expanded by the shell. + """ + + if not isinstance(argument, str): + argument = str(argument) + + if _quote.search(argument): + quote = '"' + else: + quote = '' + + encoded = quote + re.sub(_escape, r'\\\1', argument) + quote + + return encoded + + +def EncodePOSIXShellList(list): + """Encodes |list| suitably for consumption by POSIX shells. + + Returns EncodePOSIXShellArgument for each item in list, and joins them + together using the space character as an argument separator. + """ + + encoded_arguments = [] + for argument in list: + encoded_arguments.append(EncodePOSIXShellArgument(argument)) + return ' '.join(encoded_arguments) + + +def DeepDependencyTargets(target_dicts, roots): + """Returns the recursive list of target dependencies.""" + dependencies = set() + pending = set(roots) + while pending: + # Pluck out one. + r = pending.pop() + # Skip if visited already. + if r in dependencies: + continue + # Add it. + dependencies.add(r) + # Add its children. + spec = target_dicts[r] + pending.update(set(spec.get('dependencies', []))) + pending.update(set(spec.get('dependencies_original', []))) + return list(dependencies - set(roots)) + + +def BuildFileTargets(target_list, build_file): + """From a target_list, returns the subset from the specified build_file. + """ + return [p for p in target_list if BuildFile(p) == build_file] + + +def AllTargets(target_list, target_dicts, build_file): + """Returns all targets (direct and dependencies) for the specified build_file. + """ + bftargets = BuildFileTargets(target_list, build_file) + deptargets = DeepDependencyTargets(target_dicts, bftargets) + return bftargets + deptargets + + +def WriteOnDiff(filename): + """Write to a file only if the new contents differ. + + Arguments: + filename: name of the file to potentially write to. + Returns: + A file like object which will write to temporary file and only overwrite + the target if it differs (on close). + """ + + class Writer(object): + """Wrapper around file which only covers the target if it differs.""" + def __init__(self): + # Pick temporary file. + tmp_fd, self.tmp_path = tempfile.mkstemp( + suffix='.tmp', + prefix=os.path.split(filename)[1] + '.gyp.', + dir=os.path.split(filename)[0]) + try: + self.tmp_file = os.fdopen(tmp_fd, 'wb') + except Exception: + # Don't leave turds behind. + os.unlink(self.tmp_path) + raise + + def __getattr__(self, attrname): + # Delegate everything else to self.tmp_file + return getattr(self.tmp_file, attrname) + + def close(self): + try: + # Close tmp file. + self.tmp_file.close() + # Determine if different. + same = False + try: + same = filecmp.cmp(self.tmp_path, filename, False) + except OSError, e: + if e.errno != errno.ENOENT: + raise + + if same: + # The new file is identical to the old one, just get rid of the new + # one. + os.unlink(self.tmp_path) + else: + # The new file is different from the old one, or there is no old one. + # Rename the new file to the permanent name. + # + # tempfile.mkstemp uses an overly restrictive mode, resulting in a + # file that can only be read by the owner, regardless of the umask. + # There's no reason to not respect the umask here, which means that + # an extra hoop is required to fetch it and reset the new file's mode. + # + # No way to get the umask without setting a new one? Set a safe one + # and then set it back to the old value. + umask = os.umask(077) + os.umask(umask) + os.chmod(self.tmp_path, 0666 & ~umask) + if sys.platform == 'win32' and os.path.exists(filename): + # NOTE: on windows (but not cygwin) rename will not replace an + # existing file, so it must be preceded with a remove. Sadly there + # is no way to make the switch atomic. + os.remove(filename) + os.rename(self.tmp_path, filename) + except Exception: + # Don't leave turds behind. + os.unlink(self.tmp_path) + raise + + return Writer() + + +def EnsureDirExists(path): + """Make sure the directory for |path| exists.""" + try: + os.makedirs(os.path.dirname(path)) + except OSError: + pass + + +def GetFlavor(params): + """Returns |params.flavor| if it's set, the system's default flavor else.""" + flavors = { + 'cygwin': 'win', + 'win32': 'win', + 'darwin': 'mac', + } + + if 'flavor' in params: + return params['flavor'] + if sys.platform in flavors: + return flavors[sys.platform] + if sys.platform.startswith('sunos'): + return 'solaris' + if sys.platform.startswith('freebsd'): + return 'freebsd' + if sys.platform.startswith('openbsd'): + return 'openbsd' + if sys.platform.startswith('netbsd'): + return 'netbsd' + if sys.platform.startswith('aix'): + return 'aix' + + return 'linux' + + +def CopyTool(flavor, out_path, generator_flags={}): + """Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it + to |out_path|.""" + # aix and solaris just need flock emulation. mac and win use more complicated + # support scripts. + prefix = { + 'aix': 'flock', + 'solaris': 'flock', + 'mac': 'mac', + 'win': 'win' + }.get(flavor, None) + if not prefix: + return + + # Slurp input file. + source_path = os.path.join( + os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix) + with open(source_path) as source_file: + source = source_file.readlines() + + # Set custom header flags. + header = '# Generated by gyp. Do not edit.\n' + mac_toolchain_dir = generator_flags.get('mac_toolchain_dir', None) + if flavor == 'mac' and mac_toolchain_dir: + header += "import os;\nos.environ['DEVELOPER_DIR']='%s'\n" \ + % mac_toolchain_dir + + # Add header and write it out. + tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix) + with open(tool_path, 'w') as tool_file: + tool_file.write( + ''.join([source[0], header] + source[1:])) + + # Make file executable. + os.chmod(tool_path, 0755) + + +# From Alex Martelli, +# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 +# ASPN: Python Cookbook: Remove duplicates from a sequence +# First comment, dated 2001/10/13. +# (Also in the printed Python Cookbook.) + +def uniquer(seq, idfun=None): + if idfun is None: + idfun = lambda x: x + seen = {} + result = [] + for item in seq: + marker = idfun(item) + if marker in seen: continue + seen[marker] = 1 + result.append(item) + return result + + +# Based on http://code.activestate.com/recipes/576694/. +class OrderedSet(collections.MutableSet): + def __init__(self, iterable=None): + self.end = end = [] + end += [None, end, end] # sentinel node for doubly linked list + self.map = {} # key --> [key, prev, next] + if iterable is not None: + self |= iterable + + def __len__(self): + return len(self.map) + + def __contains__(self, key): + return key in self.map + + def add(self, key): + if key not in self.map: + end = self.end + curr = end[1] + curr[2] = end[1] = self.map[key] = [key, curr, end] + + def discard(self, key): + if key in self.map: + key, prev_item, next_item = self.map.pop(key) + prev_item[2] = next_item + next_item[1] = prev_item + + def __iter__(self): + end = self.end + curr = end[2] + while curr is not end: + yield curr[0] + curr = curr[2] + + def __reversed__(self): + end = self.end + curr = end[1] + while curr is not end: + yield curr[0] + curr = curr[1] + + # The second argument is an addition that causes a pylint warning. + def pop(self, last=True): # pylint: disable=W0221 + if not self: + raise KeyError('set is empty') + key = self.end[1][0] if last else self.end[2][0] + self.discard(key) + return key + + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self)) + + def __eq__(self, other): + if isinstance(other, OrderedSet): + return len(self) == len(other) and list(self) == list(other) + return set(self) == set(other) + + # Extensions to the recipe. + def update(self, iterable): + for i in iterable: + if i not in self: + self.add(i) + + +class CycleError(Exception): + """An exception raised when an unexpected cycle is detected.""" + def __init__(self, nodes): + self.nodes = nodes + def __str__(self): + return 'CycleError: cycle involving: ' + str(self.nodes) + + +def TopologicallySorted(graph, get_edges): + r"""Topologically sort based on a user provided edge definition. + + Args: + graph: A list of node names. + get_edges: A function mapping from node name to a hashable collection + of node names which this node has outgoing edges to. + Returns: + A list containing all of the node in graph in topological order. + It is assumed that calling get_edges once for each node and caching is + cheaper than repeatedly calling get_edges. + Raises: + CycleError in the event of a cycle. + Example: + graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'} + def GetEdges(node): + return re.findall(r'\$\(([^))]\)', graph[node]) + print TopologicallySorted(graph.keys(), GetEdges) + ==> + ['a', 'c', b'] + """ + get_edges = memoize(get_edges) + visited = set() + visiting = set() + ordered_nodes = [] + def Visit(node): + if node in visiting: + raise CycleError(visiting) + if node in visited: + return + visited.add(node) + visiting.add(node) + for neighbor in get_edges(node): + Visit(neighbor) + visiting.remove(node) + ordered_nodes.insert(0, node) + for node in sorted(graph): + Visit(node) + return ordered_nodes + +def CrossCompileRequested(): + # TODO: figure out how to not build extra host objects in the + # non-cross-compile case when this is enabled, and enable unconditionally. + return (os.environ.get('GYP_CROSSCOMPILE') or + os.environ.get('AR_host') or + os.environ.get('CC_host') or + os.environ.get('CXX_host') or + os.environ.get('AR_target') or + os.environ.get('CC_target') or + os.environ.get('CXX_target')) diff --git a/deps/libuv/build/gyp/pylib/gyp/common_test.py b/deps/libuv/build/gyp/pylib/gyp/common_test.py new file mode 100755 index 00000000..ad6f9a14 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/common_test.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python + +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Unit tests for the common.py file.""" + +import gyp.common +import unittest +import sys + + +class TestTopologicallySorted(unittest.TestCase): + def test_Valid(self): + """Test that sorting works on a valid graph with one possible order.""" + graph = { + 'a': ['b', 'c'], + 'b': [], + 'c': ['d'], + 'd': ['b'], + } + def GetEdge(node): + return tuple(graph[node]) + self.assertEqual( + gyp.common.TopologicallySorted(graph.keys(), GetEdge), + ['a', 'c', 'd', 'b']) + + def test_Cycle(self): + """Test that an exception is thrown on a cyclic graph.""" + graph = { + 'a': ['b'], + 'b': ['c'], + 'c': ['d'], + 'd': ['a'], + } + def GetEdge(node): + return tuple(graph[node]) + self.assertRaises( + gyp.common.CycleError, gyp.common.TopologicallySorted, + graph.keys(), GetEdge) + + +class TestGetFlavor(unittest.TestCase): + """Test that gyp.common.GetFlavor works as intended""" + original_platform = '' + + def setUp(self): + self.original_platform = sys.platform + + def tearDown(self): + sys.platform = self.original_platform + + def assertFlavor(self, expected, argument, param): + sys.platform = argument + self.assertEqual(expected, gyp.common.GetFlavor(param)) + + def test_platform_default(self): + self.assertFlavor('freebsd', 'freebsd9' , {}) + self.assertFlavor('freebsd', 'freebsd10', {}) + self.assertFlavor('openbsd', 'openbsd5' , {}) + self.assertFlavor('solaris', 'sunos5' , {}); + self.assertFlavor('solaris', 'sunos' , {}); + self.assertFlavor('linux' , 'linux2' , {}); + self.assertFlavor('linux' , 'linux3' , {}); + + def test_param(self): + self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'}) + + +if __name__ == '__main__': + unittest.main() diff --git a/deps/libuv/build/gyp/pylib/gyp/easy_xml.py b/deps/libuv/build/gyp/pylib/gyp/easy_xml.py new file mode 100644 index 00000000..bf949b6a --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/easy_xml.py @@ -0,0 +1,157 @@ +# Copyright (c) 2011 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import re +import os + + +def XmlToString(content, encoding='utf-8', pretty=False): + """ Writes the XML content to disk, touching the file only if it has changed. + + Visual Studio files have a lot of pre-defined structures. This function makes + it easy to represent these structures as Python data structures, instead of + having to create a lot of function calls. + + Each XML element of the content is represented as a list composed of: + 1. The name of the element, a string, + 2. The attributes of the element, a dictionary (optional), and + 3+. The content of the element, if any. Strings are simple text nodes and + lists are child elements. + + Example 1: + + becomes + ['test'] + + Example 2: + + This is + it! + + + becomes + ['myelement', {'a':'value1', 'b':'value2'}, + ['childtype', 'This is'], + ['childtype', 'it!'], + ] + + Args: + content: The structured content to be converted. + encoding: The encoding to report on the first XML line. + pretty: True if we want pretty printing with indents and new lines. + + Returns: + The XML content as a string. + """ + # We create a huge list of all the elements of the file. + xml_parts = ['' % encoding] + if pretty: + xml_parts.append('\n') + _ConstructContentList(xml_parts, content, pretty) + + # Convert it to a string + return ''.join(xml_parts) + + +def _ConstructContentList(xml_parts, specification, pretty, level=0): + """ Appends the XML parts corresponding to the specification. + + Args: + xml_parts: A list of XML parts to be appended to. + specification: The specification of the element. See EasyXml docs. + pretty: True if we want pretty printing with indents and new lines. + level: Indentation level. + """ + # The first item in a specification is the name of the element. + if pretty: + indentation = ' ' * level + new_line = '\n' + else: + indentation = '' + new_line = '' + name = specification[0] + if not isinstance(name, str): + raise Exception('The first item of an EasyXml specification should be ' + 'a string. Specification was ' + str(specification)) + xml_parts.append(indentation + '<' + name) + + # Optionally in second position is a dictionary of the attributes. + rest = specification[1:] + if rest and isinstance(rest[0], dict): + for at, val in sorted(rest[0].iteritems()): + xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True))) + rest = rest[1:] + if rest: + xml_parts.append('>') + all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True) + multi_line = not all_strings + if multi_line and new_line: + xml_parts.append(new_line) + for child_spec in rest: + # If it's a string, append a text node. + # Otherwise recurse over that child definition + if isinstance(child_spec, str): + xml_parts.append(_XmlEscape(child_spec)) + else: + _ConstructContentList(xml_parts, child_spec, pretty, level + 1) + if multi_line and indentation: + xml_parts.append(indentation) + xml_parts.append('%s' % (name, new_line)) + else: + xml_parts.append('/>%s' % new_line) + + +def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False, + win32=False): + """ Writes the XML content to disk, touching the file only if it has changed. + + Args: + content: The structured content to be written. + path: Location of the file. + encoding: The encoding to report on the first line of the XML file. + pretty: True if we want pretty printing with indents and new lines. + """ + xml_string = XmlToString(content, encoding, pretty) + if win32 and os.linesep != '\r\n': + xml_string = xml_string.replace('\n', '\r\n') + + # Get the old content + try: + f = open(path, 'r') + existing = f.read() + f.close() + except: + existing = None + + # It has changed, write it + if existing != xml_string: + f = open(path, 'w') + f.write(xml_string) + f.close() + + +_xml_escape_map = { + '"': '"', + "'": ''', + '<': '<', + '>': '>', + '&': '&', + '\n': ' ', + '\r': ' ', +} + + +_xml_escape_re = re.compile( + "(%s)" % "|".join(map(re.escape, _xml_escape_map.keys()))) + + +def _XmlEscape(value, attr=False): + """ Escape a string for inclusion in XML.""" + def replace(match): + m = match.string[match.start() : match.end()] + # don't replace single quotes in attrs + if attr and m == "'": + return m + return _xml_escape_map[m] + return _xml_escape_re.sub(replace, value) diff --git a/deps/libuv/build/gyp/pylib/gyp/easy_xml_test.py b/deps/libuv/build/gyp/pylib/gyp/easy_xml_test.py new file mode 100755 index 00000000..df643549 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/easy_xml_test.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python + +# Copyright (c) 2011 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" Unit tests for the easy_xml.py file. """ + +import gyp.easy_xml as easy_xml +import unittest +import StringIO + + +class TestSequenceFunctions(unittest.TestCase): + + def setUp(self): + self.stderr = StringIO.StringIO() + + def test_EasyXml_simple(self): + self.assertEqual( + easy_xml.XmlToString(['test']), + '') + + self.assertEqual( + easy_xml.XmlToString(['test'], encoding='Windows-1252'), + '') + + def test_EasyXml_simple_with_attributes(self): + self.assertEqual( + easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]), + '') + + def test_EasyXml_escaping(self): + original = '\'"\r&\nfoo' + converted = '<test>\'" & foo' + converted_apos = converted.replace("'", ''') + self.assertEqual( + easy_xml.XmlToString(['test3', {'a': original}, original]), + '%s' % + (converted, converted_apos)) + + def test_EasyXml_pretty(self): + self.assertEqual( + easy_xml.XmlToString( + ['test3', + ['GrandParent', + ['Parent1', + ['Child'] + ], + ['Parent2'] + ] + ], + pretty=True), + '\n' + '\n' + ' \n' + ' \n' + ' \n' + ' \n' + ' \n' + ' \n' + '\n') + + + def test_EasyXml_complex(self): + # We want to create: + target = ( + '' + '' + '' + '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}' + 'Win32Proj' + 'automated_ui_tests' + '' + '' + '' + 'Application' + 'Unicode' + '' + '') + + xml = easy_xml.XmlToString( + ['Project', + ['PropertyGroup', {'Label': 'Globals'}, + ['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'], + ['Keyword', 'Win32Proj'], + ['RootNamespace', 'automated_ui_tests'] + ], + ['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}], + ['PropertyGroup', + {'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'", + 'Label': 'Configuration'}, + ['ConfigurationType', 'Application'], + ['CharacterSet', 'Unicode'] + ] + ]) + self.assertEqual(xml, target) + + +if __name__ == '__main__': + unittest.main() diff --git a/deps/libuv/build/gyp/pylib/gyp/flock_tool.py b/deps/libuv/build/gyp/pylib/gyp/flock_tool.py new file mode 100755 index 00000000..b38d8660 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/flock_tool.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# Copyright (c) 2011 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""These functions are executed via gyp-flock-tool when using the Makefile +generator. Used on systems that don't have a built-in flock.""" + +import fcntl +import os +import struct +import subprocess +import sys + + +def main(args): + executor = FlockTool() + executor.Dispatch(args) + + +class FlockTool(object): + """This class emulates the 'flock' command.""" + def Dispatch(self, args): + """Dispatches a string command to a method.""" + if len(args) < 1: + raise Exception("Not enough arguments") + + method = "Exec%s" % self._CommandifyName(args[0]) + getattr(self, method)(*args[1:]) + + def _CommandifyName(self, name_string): + """Transforms a tool name like copy-info-plist to CopyInfoPlist""" + return name_string.title().replace('-', '') + + def ExecFlock(self, lockfile, *cmd_list): + """Emulates the most basic behavior of Linux's flock(1).""" + # Rely on exception handling to report errors. + # Note that the stock python on SunOS has a bug + # where fcntl.flock(fd, LOCK_EX) always fails + # with EBADF, that's why we use this F_SETLK + # hack instead. + fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666) + if sys.platform.startswith('aix'): + # Python on AIX is compiled with LARGEFILE support, which changes the + # struct size. + op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0) + else: + op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0) + fcntl.fcntl(fd, fcntl.F_SETLK, op) + return subprocess.call(cmd_list) + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/__init__.py b/deps/libuv/build/gyp/pylib/gyp/generator/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/analyzer.py b/deps/libuv/build/gyp/pylib/gyp/generator/analyzer.py new file mode 100644 index 00000000..921c1a6b --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/analyzer.py @@ -0,0 +1,741 @@ +# Copyright (c) 2014 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +This script is intended for use as a GYP_GENERATOR. It takes as input (by way of +the generator flag config_path) the path of a json file that dictates the files +and targets to search for. The following keys are supported: +files: list of paths (relative) of the files to search for. +test_targets: unqualified target names to search for. Any target in this list +that depends upon a file in |files| is output regardless of the type of target +or chain of dependencies. +additional_compile_targets: Unqualified targets to search for in addition to +test_targets. Targets in the combined list that depend upon a file in |files| +are not necessarily output. For example, if the target is of type none then the +target is not output (but one of the descendants of the target will be). + +The following is output: +error: only supplied if there is an error. +compile_targets: minimal set of targets that directly or indirectly (for + targets of type none) depend on the files in |files| and is one of the + supplied targets or a target that one of the supplied targets depends on. + The expectation is this set of targets is passed into a build step. This list + always contains the output of test_targets as well. +test_targets: set of targets from the supplied |test_targets| that either + directly or indirectly depend upon a file in |files|. This list if useful + if additional processing needs to be done for certain targets after the + build, such as running tests. +status: outputs one of three values: none of the supplied files were found, + one of the include files changed so that it should be assumed everything + changed (in this case test_targets and compile_targets are not output) or at + least one file was found. +invalid_targets: list of supplied targets that were not found. + +Example: +Consider a graph like the following: + A D + / \ +B C +A depends upon both B and C, A is of type none and B and C are executables. +D is an executable, has no dependencies and nothing depends on it. +If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and +files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then +the following is output: +|compile_targets| = ["B"] B must built as it depends upon the changed file b.cc +and the supplied target A depends upon it. A is not output as a build_target +as it is of type none with no rules and actions. +|test_targets| = ["B"] B directly depends upon the change file b.cc. + +Even though the file d.cc, which D depends upon, has changed D is not output +as it was not supplied by way of |additional_compile_targets| or |test_targets|. + +If the generator flag analyzer_output_path is specified, output is written +there. Otherwise output is written to stdout. + +In Gyp the "all" target is shorthand for the root targets in the files passed +to gyp. For example, if file "a.gyp" contains targets "a1" and +"a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency +on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2". +Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not +directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp +then the "all" target includes "b1" and "b2". +""" + +import gyp.common +import gyp.ninja_syntax as ninja_syntax +import json +import os +import posixpath +import sys + +debug = False + +found_dependency_string = 'Found dependency' +no_dependency_string = 'No dependencies' +# Status when it should be assumed that everything has changed. +all_changed_string = 'Found dependency (all)' + +# MatchStatus is used indicate if and how a target depends upon the supplied +# sources. +# The target's sources contain one of the supplied paths. +MATCH_STATUS_MATCHES = 1 +# The target has a dependency on another target that contains one of the +# supplied paths. +MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2 +# The target's sources weren't in the supplied paths and none of the target's +# dependencies depend upon a target that matched. +MATCH_STATUS_DOESNT_MATCH = 3 +# The target doesn't contain the source, but the dependent targets have not yet +# been visited to determine a more specific status yet. +MATCH_STATUS_TBD = 4 + +generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested() + +generator_wants_static_library_dependencies_adjusted = False + +generator_default_variables = { +} +for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', + 'LIB_DIR', 'SHARED_LIB_DIR']: + generator_default_variables[dirname] = '!!!' + +for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', + 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', + 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', + 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', + 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', + 'CONFIGURATION_NAME']: + generator_default_variables[unused] = '' + + +def _ToGypPath(path): + """Converts a path to the format used by gyp.""" + if os.sep == '\\' and os.altsep == '/': + return path.replace('\\', '/') + return path + + +def _ResolveParent(path, base_path_components): + """Resolves |path|, which starts with at least one '../'. Returns an empty + string if the path shouldn't be considered. See _AddSources() for a + description of |base_path_components|.""" + depth = 0 + while path.startswith('../'): + depth += 1 + path = path[3:] + # Relative includes may go outside the source tree. For example, an action may + # have inputs in /usr/include, which are not in the source tree. + if depth > len(base_path_components): + return '' + if depth == len(base_path_components): + return path + return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \ + '/' + path + + +def _AddSources(sources, base_path, base_path_components, result): + """Extracts valid sources from |sources| and adds them to |result|. Each + source file is relative to |base_path|, but may contain '..'. To make + resolving '..' easier |base_path_components| contains each of the + directories in |base_path|. Additionally each source may contain variables. + Such sources are ignored as it is assumed dependencies on them are expressed + and tracked in some other means.""" + # NOTE: gyp paths are always posix style. + for source in sources: + if not len(source) or source.startswith('!!!') or source.startswith('$'): + continue + # variable expansion may lead to //. + org_source = source + source = source[0] + source[1:].replace('//', '/') + if source.startswith('../'): + source = _ResolveParent(source, base_path_components) + if len(source): + result.append(source) + continue + result.append(base_path + source) + if debug: + print 'AddSource', org_source, result[len(result) - 1] + + +def _ExtractSourcesFromAction(action, base_path, base_path_components, + results): + if 'inputs' in action: + _AddSources(action['inputs'], base_path, base_path_components, results) + + +def _ToLocalPath(toplevel_dir, path): + """Converts |path| to a path relative to |toplevel_dir|.""" + if path == toplevel_dir: + return '' + if path.startswith(toplevel_dir + '/'): + return path[len(toplevel_dir) + len('/'):] + return path + + +def _ExtractSources(target, target_dict, toplevel_dir): + # |target| is either absolute or relative and in the format of the OS. Gyp + # source paths are always posix. Convert |target| to a posix path relative to + # |toplevel_dir_|. This is done to make it easy to build source paths. + base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target))) + base_path_components = base_path.split('/') + + # Add a trailing '/' so that _AddSources() can easily build paths. + if len(base_path): + base_path += '/' + + if debug: + print 'ExtractSources', target, base_path + + results = [] + if 'sources' in target_dict: + _AddSources(target_dict['sources'], base_path, base_path_components, + results) + # Include the inputs from any actions. Any changes to these affect the + # resulting output. + if 'actions' in target_dict: + for action in target_dict['actions']: + _ExtractSourcesFromAction(action, base_path, base_path_components, + results) + if 'rules' in target_dict: + for rule in target_dict['rules']: + _ExtractSourcesFromAction(rule, base_path, base_path_components, results) + + return results + + +class Target(object): + """Holds information about a particular target: + deps: set of Targets this Target depends upon. This is not recursive, only the + direct dependent Targets. + match_status: one of the MatchStatus values. + back_deps: set of Targets that have a dependency on this Target. + visited: used during iteration to indicate whether we've visited this target. + This is used for two iterations, once in building the set of Targets and + again in _GetBuildTargets(). + name: fully qualified name of the target. + requires_build: True if the target type is such that it needs to be built. + See _DoesTargetTypeRequireBuild for details. + added_to_compile_targets: used when determining if the target was added to the + set of targets that needs to be built. + in_roots: true if this target is a descendant of one of the root nodes. + is_executable: true if the type of target is executable. + is_static_library: true if the type of target is static_library. + is_or_has_linked_ancestor: true if the target does a link (eg executable), or + if there is a target in back_deps that does a link.""" + def __init__(self, name): + self.deps = set() + self.match_status = MATCH_STATUS_TBD + self.back_deps = set() + self.name = name + # TODO(sky): I don't like hanging this off Target. This state is specific + # to certain functions and should be isolated there. + self.visited = False + self.requires_build = False + self.added_to_compile_targets = False + self.in_roots = False + self.is_executable = False + self.is_static_library = False + self.is_or_has_linked_ancestor = False + + +class Config(object): + """Details what we're looking for + files: set of files to search for + targets: see file description for details.""" + def __init__(self): + self.files = [] + self.targets = set() + self.additional_compile_target_names = set() + self.test_target_names = set() + + def Init(self, params): + """Initializes Config. This is a separate method as it raises an exception + if there is a parse error.""" + generator_flags = params.get('generator_flags', {}) + config_path = generator_flags.get('config_path', None) + if not config_path: + return + try: + f = open(config_path, 'r') + config = json.load(f) + f.close() + except IOError: + raise Exception('Unable to open file ' + config_path) + except ValueError as e: + raise Exception('Unable to parse config file ' + config_path + str(e)) + if not isinstance(config, dict): + raise Exception('config_path must be a JSON file containing a dictionary') + self.files = config.get('files', []) + self.additional_compile_target_names = set( + config.get('additional_compile_targets', [])) + self.test_target_names = set(config.get('test_targets', [])) + + +def _WasBuildFileModified(build_file, data, files, toplevel_dir): + """Returns true if the build file |build_file| is either in |files| or + one of the files included by |build_file| is in |files|. |toplevel_dir| is + the root of the source tree.""" + if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files: + if debug: + print 'gyp file modified', build_file + return True + + # First element of included_files is the file itself. + if len(data[build_file]['included_files']) <= 1: + return False + + for include_file in data[build_file]['included_files'][1:]: + # |included_files| are relative to the directory of the |build_file|. + rel_include_file = \ + _ToGypPath(gyp.common.UnrelativePath(include_file, build_file)) + if _ToLocalPath(toplevel_dir, rel_include_file) in files: + if debug: + print 'included gyp file modified, gyp_file=', build_file, \ + 'included file=', rel_include_file + return True + return False + + +def _GetOrCreateTargetByName(targets, target_name): + """Creates or returns the Target at targets[target_name]. If there is no + Target for |target_name| one is created. Returns a tuple of whether a new + Target was created and the Target.""" + if target_name in targets: + return False, targets[target_name] + target = Target(target_name) + targets[target_name] = target + return True, target + + +def _DoesTargetTypeRequireBuild(target_dict): + """Returns true if the target type is such that it needs to be built.""" + # If a 'none' target has rules or actions we assume it requires a build. + return bool(target_dict['type'] != 'none' or + target_dict.get('actions') or target_dict.get('rules')) + + +def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files, + build_files): + """Returns a tuple of the following: + . A dictionary mapping from fully qualified name to Target. + . A list of the targets that have a source file in |files|. + . Targets that constitute the 'all' target. See description at top of file + for details on the 'all' target. + This sets the |match_status| of the targets that contain any of the source + files in |files| to MATCH_STATUS_MATCHES. + |toplevel_dir| is the root of the source tree.""" + # Maps from target name to Target. + name_to_target = {} + + # Targets that matched. + matching_targets = [] + + # Queue of targets to visit. + targets_to_visit = target_list[:] + + # Maps from build file to a boolean indicating whether the build file is in + # |files|. + build_file_in_files = {} + + # Root targets across all files. + roots = set() + + # Set of Targets in |build_files|. + build_file_targets = set() + + while len(targets_to_visit) > 0: + target_name = targets_to_visit.pop() + created_target, target = _GetOrCreateTargetByName(name_to_target, + target_name) + if created_target: + roots.add(target) + elif target.visited: + continue + + target.visited = True + target.requires_build = _DoesTargetTypeRequireBuild( + target_dicts[target_name]) + target_type = target_dicts[target_name]['type'] + target.is_executable = target_type == 'executable' + target.is_static_library = target_type == 'static_library' + target.is_or_has_linked_ancestor = (target_type == 'executable' or + target_type == 'shared_library') + + build_file = gyp.common.ParseQualifiedTarget(target_name)[0] + if not build_file in build_file_in_files: + build_file_in_files[build_file] = \ + _WasBuildFileModified(build_file, data, files, toplevel_dir) + + if build_file in build_files: + build_file_targets.add(target) + + # If a build file (or any of its included files) is modified we assume all + # targets in the file are modified. + if build_file_in_files[build_file]: + print 'matching target from modified build file', target_name + target.match_status = MATCH_STATUS_MATCHES + matching_targets.append(target) + else: + sources = _ExtractSources(target_name, target_dicts[target_name], + toplevel_dir) + for source in sources: + if _ToGypPath(os.path.normpath(source)) in files: + print 'target', target_name, 'matches', source + target.match_status = MATCH_STATUS_MATCHES + matching_targets.append(target) + break + + # Add dependencies to visit as well as updating back pointers for deps. + for dep in target_dicts[target_name].get('dependencies', []): + targets_to_visit.append(dep) + + created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target, + dep) + if not created_dep_target: + roots.discard(dep_target) + + target.deps.add(dep_target) + dep_target.back_deps.add(target) + + return name_to_target, matching_targets, roots & build_file_targets + + +def _GetUnqualifiedToTargetMapping(all_targets, to_find): + """Returns a tuple of the following: + . mapping (dictionary) from unqualified name to Target for all the + Targets in |to_find|. + . any target names not found. If this is empty all targets were found.""" + result = {} + if not to_find: + return {}, [] + to_find = set(to_find) + for target_name in all_targets.keys(): + extracted = gyp.common.ParseQualifiedTarget(target_name) + if len(extracted) > 1 and extracted[1] in to_find: + to_find.remove(extracted[1]) + result[extracted[1]] = all_targets[target_name] + if not to_find: + return result, [] + return result, [x for x in to_find] + + +def _DoesTargetDependOnMatchingTargets(target): + """Returns true if |target| or any of its dependencies is one of the + targets containing the files supplied as input to analyzer. This updates + |matches| of the Targets as it recurses. + target: the Target to look for.""" + if target.match_status == MATCH_STATUS_DOESNT_MATCH: + return False + if target.match_status == MATCH_STATUS_MATCHES or \ + target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY: + return True + for dep in target.deps: + if _DoesTargetDependOnMatchingTargets(dep): + target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY + print '\t', target.name, 'matches by dep', dep.name + return True + target.match_status = MATCH_STATUS_DOESNT_MATCH + return False + + +def _GetTargetsDependingOnMatchingTargets(possible_targets): + """Returns the list of Targets in |possible_targets| that depend (either + directly on indirectly) on at least one of the targets containing the files + supplied as input to analyzer. + possible_targets: targets to search from.""" + found = [] + print 'Targets that matched by dependency:' + for target in possible_targets: + if _DoesTargetDependOnMatchingTargets(target): + found.append(target) + return found + + +def _AddCompileTargets(target, roots, add_if_no_ancestor, result): + """Recurses through all targets that depend on |target|, adding all targets + that need to be built (and are in |roots|) to |result|. + roots: set of root targets. + add_if_no_ancestor: If true and there are no ancestors of |target| then add + |target| to |result|. |target| must still be in |roots|. + result: targets that need to be built are added here.""" + if target.visited: + return + + target.visited = True + target.in_roots = target in roots + + for back_dep_target in target.back_deps: + _AddCompileTargets(back_dep_target, roots, False, result) + target.added_to_compile_targets |= back_dep_target.added_to_compile_targets + target.in_roots |= back_dep_target.in_roots + target.is_or_has_linked_ancestor |= ( + back_dep_target.is_or_has_linked_ancestor) + + # Always add 'executable' targets. Even though they may be built by other + # targets that depend upon them it makes detection of what is going to be + # built easier. + # And always add static_libraries that have no dependencies on them from + # linkables. This is necessary as the other dependencies on them may be + # static libraries themselves, which are not compile time dependencies. + if target.in_roots and \ + (target.is_executable or + (not target.added_to_compile_targets and + (add_if_no_ancestor or target.requires_build)) or + (target.is_static_library and add_if_no_ancestor and + not target.is_or_has_linked_ancestor)): + print '\t\tadding to compile targets', target.name, 'executable', \ + target.is_executable, 'added_to_compile_targets', \ + target.added_to_compile_targets, 'add_if_no_ancestor', \ + add_if_no_ancestor, 'requires_build', target.requires_build, \ + 'is_static_library', target.is_static_library, \ + 'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor + result.add(target) + target.added_to_compile_targets = True + + +def _GetCompileTargets(matching_targets, supplied_targets): + """Returns the set of Targets that require a build. + matching_targets: targets that changed and need to be built. + supplied_targets: set of targets supplied to analyzer to search from.""" + result = set() + for target in matching_targets: + print 'finding compile targets for match', target.name + _AddCompileTargets(target, supplied_targets, True, result) + return result + + +def _WriteOutput(params, **values): + """Writes the output, either to stdout or a file is specified.""" + if 'error' in values: + print 'Error:', values['error'] + if 'status' in values: + print values['status'] + if 'targets' in values: + values['targets'].sort() + print 'Supplied targets that depend on changed files:' + for target in values['targets']: + print '\t', target + if 'invalid_targets' in values: + values['invalid_targets'].sort() + print 'The following targets were not found:' + for target in values['invalid_targets']: + print '\t', target + if 'build_targets' in values: + values['build_targets'].sort() + print 'Targets that require a build:' + for target in values['build_targets']: + print '\t', target + if 'compile_targets' in values: + values['compile_targets'].sort() + print 'Targets that need to be built:' + for target in values['compile_targets']: + print '\t', target + if 'test_targets' in values: + values['test_targets'].sort() + print 'Test targets:' + for target in values['test_targets']: + print '\t', target + + output_path = params.get('generator_flags', {}).get( + 'analyzer_output_path', None) + if not output_path: + print json.dumps(values) + return + try: + f = open(output_path, 'w') + f.write(json.dumps(values) + '\n') + f.close() + except IOError as e: + print 'Error writing to output file', output_path, str(e) + + +def _WasGypIncludeFileModified(params, files): + """Returns true if one of the files in |files| is in the set of included + files.""" + if params['options'].includes: + for include in params['options'].includes: + if _ToGypPath(os.path.normpath(include)) in files: + print 'Include file modified, assuming all changed', include + return True + return False + + +def _NamesNotIn(names, mapping): + """Returns a list of the values in |names| that are not in |mapping|.""" + return [name for name in names if name not in mapping] + + +def _LookupTargets(names, mapping): + """Returns a list of the mapping[name] for each value in |names| that is in + |mapping|.""" + return [mapping[name] for name in names if name in mapping] + + +def CalculateVariables(default_variables, params): + """Calculate additional variables for use in the build (called by gyp).""" + flavor = gyp.common.GetFlavor(params) + if flavor == 'mac': + default_variables.setdefault('OS', 'mac') + elif flavor == 'win': + default_variables.setdefault('OS', 'win') + # Copy additional generator configuration data from VS, which is shared + # by the Windows Ninja generator. + import gyp.generator.msvs as msvs_generator + generator_additional_non_configuration_keys = getattr(msvs_generator, + 'generator_additional_non_configuration_keys', []) + generator_additional_path_sections = getattr(msvs_generator, + 'generator_additional_path_sections', []) + + gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) + else: + operating_system = flavor + if flavor == 'android': + operating_system = 'linux' # Keep this legacy behavior for now. + default_variables.setdefault('OS', operating_system) + + +class TargetCalculator(object): + """Calculates the matching test_targets and matching compile_targets.""" + def __init__(self, files, additional_compile_target_names, test_target_names, + data, target_list, target_dicts, toplevel_dir, build_files): + self._additional_compile_target_names = set(additional_compile_target_names) + self._test_target_names = set(test_target_names) + self._name_to_target, self._changed_targets, self._root_targets = ( + _GenerateTargets(data, target_list, target_dicts, toplevel_dir, + frozenset(files), build_files)) + self._unqualified_mapping, self.invalid_targets = ( + _GetUnqualifiedToTargetMapping(self._name_to_target, + self._supplied_target_names_no_all())) + + def _supplied_target_names(self): + return self._additional_compile_target_names | self._test_target_names + + def _supplied_target_names_no_all(self): + """Returns the supplied test targets without 'all'.""" + result = self._supplied_target_names(); + result.discard('all') + return result + + def is_build_impacted(self): + """Returns true if the supplied files impact the build at all.""" + return self._changed_targets + + def find_matching_test_target_names(self): + """Returns the set of output test targets.""" + assert self.is_build_impacted() + # Find the test targets first. 'all' is special cased to mean all the + # root targets. To deal with all the supplied |test_targets| are expanded + # to include the root targets during lookup. If any of the root targets + # match, we remove it and replace it with 'all'. + test_target_names_no_all = set(self._test_target_names) + test_target_names_no_all.discard('all') + test_targets_no_all = _LookupTargets(test_target_names_no_all, + self._unqualified_mapping) + test_target_names_contains_all = 'all' in self._test_target_names + if test_target_names_contains_all: + test_targets = [x for x in (set(test_targets_no_all) | + set(self._root_targets))] + else: + test_targets = [x for x in test_targets_no_all] + print 'supplied test_targets' + for target_name in self._test_target_names: + print '\t', target_name + print 'found test_targets' + for target in test_targets: + print '\t', target.name + print 'searching for matching test targets' + matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets) + matching_test_targets_contains_all = (test_target_names_contains_all and + set(matching_test_targets) & + set(self._root_targets)) + if matching_test_targets_contains_all: + # Remove any of the targets for all that were not explicitly supplied, + # 'all' is subsequentely added to the matching names below. + matching_test_targets = [x for x in (set(matching_test_targets) & + set(test_targets_no_all))] + print 'matched test_targets' + for target in matching_test_targets: + print '\t', target.name + matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1] + for target in matching_test_targets] + if matching_test_targets_contains_all: + matching_target_names.append('all') + print '\tall' + return matching_target_names + + def find_matching_compile_target_names(self): + """Returns the set of output compile targets.""" + assert self.is_build_impacted(); + # Compile targets are found by searching up from changed targets. + # Reset the visited status for _GetBuildTargets. + for target in self._name_to_target.itervalues(): + target.visited = False + + supplied_targets = _LookupTargets(self._supplied_target_names_no_all(), + self._unqualified_mapping) + if 'all' in self._supplied_target_names(): + supplied_targets = [x for x in (set(supplied_targets) | + set(self._root_targets))] + print 'Supplied test_targets & compile_targets' + for target in supplied_targets: + print '\t', target.name + print 'Finding compile targets' + compile_targets = _GetCompileTargets(self._changed_targets, + supplied_targets) + return [gyp.common.ParseQualifiedTarget(target.name)[1] + for target in compile_targets] + + +def GenerateOutput(target_list, target_dicts, data, params): + """Called by gyp as the final stage. Outputs results.""" + config = Config() + try: + config.Init(params) + + if not config.files: + raise Exception('Must specify files to analyze via config_path generator ' + 'flag') + + toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir)) + if debug: + print 'toplevel_dir', toplevel_dir + + if _WasGypIncludeFileModified(params, config.files): + result_dict = { 'status': all_changed_string, + 'test_targets': list(config.test_target_names), + 'compile_targets': list( + config.additional_compile_target_names | + config.test_target_names) } + _WriteOutput(params, **result_dict) + return + + calculator = TargetCalculator(config.files, + config.additional_compile_target_names, + config.test_target_names, data, + target_list, target_dicts, toplevel_dir, + params['build_files']) + if not calculator.is_build_impacted(): + result_dict = { 'status': no_dependency_string, + 'test_targets': [], + 'compile_targets': [] } + if calculator.invalid_targets: + result_dict['invalid_targets'] = calculator.invalid_targets + _WriteOutput(params, **result_dict) + return + + test_target_names = calculator.find_matching_test_target_names() + compile_target_names = calculator.find_matching_compile_target_names() + found_at_least_one_target = compile_target_names or test_target_names + result_dict = { 'test_targets': test_target_names, + 'status': found_dependency_string if + found_at_least_one_target else no_dependency_string, + 'compile_targets': list( + set(compile_target_names) | + set(test_target_names)) } + if calculator.invalid_targets: + result_dict['invalid_targets'] = calculator.invalid_targets + _WriteOutput(params, **result_dict) + + except Exception as e: + _WriteOutput(params, error=str(e)) diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/cmake.py b/deps/libuv/build/gyp/pylib/gyp/generator/cmake.py new file mode 100644 index 00000000..a2b96291 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/cmake.py @@ -0,0 +1,1248 @@ +# Copyright (c) 2013 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""cmake output module + +This module is under development and should be considered experimental. + +This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is +created for each configuration. + +This module's original purpose was to support editing in IDEs like KDevelop +which use CMake for project management. It is also possible to use CMake to +generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator +will convert the CMakeLists.txt to a code::blocks cbp for the editor to read, +but build using CMake. As a result QtCreator editor is unaware of compiler +defines. The generated CMakeLists.txt can also be used to build on Linux. There +is currently no support for building on platforms other than Linux. + +The generated CMakeLists.txt should properly compile all projects. However, +there is a mismatch between gyp and cmake with regard to linking. All attempts +are made to work around this, but CMake sometimes sees -Wl,--start-group as a +library and incorrectly repeats it. As a result the output of this generator +should not be relied on for building. + +When using with kdevelop, use version 4.4+. Previous versions of kdevelop will +not be able to find the header file directories described in the generated +CMakeLists.txt file. +""" + +import multiprocessing +import os +import signal +import string +import subprocess +import gyp.common +import gyp.xcode_emulation + +generator_default_variables = { + 'EXECUTABLE_PREFIX': '', + 'EXECUTABLE_SUFFIX': '', + 'STATIC_LIB_PREFIX': 'lib', + 'STATIC_LIB_SUFFIX': '.a', + 'SHARED_LIB_PREFIX': 'lib', + 'SHARED_LIB_SUFFIX': '.so', + 'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}', + 'LIB_DIR': '${obj}.${TOOLSET}', + 'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni', + 'SHARED_INTERMEDIATE_DIR': '${obj}/gen', + 'PRODUCT_DIR': '${builddir}', + 'RULE_INPUT_PATH': '${RULE_INPUT_PATH}', + 'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}', + 'RULE_INPUT_NAME': '${RULE_INPUT_NAME}', + 'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}', + 'RULE_INPUT_EXT': '${RULE_INPUT_EXT}', + 'CONFIGURATION_NAME': '${configuration}', +} + +FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}') + +generator_supports_multiple_toolsets = True +generator_wants_static_library_dependencies_adjusted = True + +COMPILABLE_EXTENSIONS = { + '.c': 'cc', + '.cc': 'cxx', + '.cpp': 'cxx', + '.cxx': 'cxx', + '.s': 's', # cc + '.S': 's', # cc +} + + +def RemovePrefix(a, prefix): + """Returns 'a' without 'prefix' if it starts with 'prefix'.""" + return a[len(prefix):] if a.startswith(prefix) else a + + +def CalculateVariables(default_variables, params): + """Calculate additional variables for use in the build (called by gyp).""" + default_variables.setdefault('OS', gyp.common.GetFlavor(params)) + + +def Compilable(filename): + """Return true if the file is compilable (should be in OBJS).""" + return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS) + + +def Linkable(filename): + """Return true if the file is linkable (should be on the link line).""" + return filename.endswith('.o') + + +def NormjoinPathForceCMakeSource(base_path, rel_path): + """Resolves rel_path against base_path and returns the result. + + If rel_path is an absolute path it is returned unchanged. + Otherwise it is resolved against base_path and normalized. + If the result is a relative path, it is forced to be relative to the + CMakeLists.txt. + """ + if os.path.isabs(rel_path): + return rel_path + if any([rel_path.startswith(var) for var in FULL_PATH_VARS]): + return rel_path + # TODO: do we need to check base_path for absolute variables as well? + return os.path.join('${CMAKE_CURRENT_LIST_DIR}', + os.path.normpath(os.path.join(base_path, rel_path))) + + +def NormjoinPath(base_path, rel_path): + """Resolves rel_path against base_path and returns the result. + TODO: what is this really used for? + If rel_path begins with '$' it is returned unchanged. + Otherwise it is resolved against base_path if relative, then normalized. + """ + if rel_path.startswith('$') and not rel_path.startswith('${configuration}'): + return rel_path + return os.path.normpath(os.path.join(base_path, rel_path)) + + +def CMakeStringEscape(a): + """Escapes the string 'a' for use inside a CMake string. + + This means escaping + '\' otherwise it may be seen as modifying the next character + '"' otherwise it will end the string + ';' otherwise the string becomes a list + + The following do not need to be escaped + '#' when the lexer is in string state, this does not start a comment + + The following are yet unknown + '$' generator variables (like ${obj}) must not be escaped, + but text $ should be escaped + what is wanted is to know which $ come from generator variables + """ + return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"') + + +def SetFileProperty(output, source_name, property_name, values, sep): + """Given a set of source file, sets the given property on them.""" + output.write('set_source_files_properties(') + output.write(source_name) + output.write(' PROPERTIES ') + output.write(property_name) + output.write(' "') + for value in values: + output.write(CMakeStringEscape(value)) + output.write(sep) + output.write('")\n') + + +def SetFilesProperty(output, variable, property_name, values, sep): + """Given a set of source files, sets the given property on them.""" + output.write('set_source_files_properties(') + WriteVariable(output, variable) + output.write(' PROPERTIES ') + output.write(property_name) + output.write(' "') + for value in values: + output.write(CMakeStringEscape(value)) + output.write(sep) + output.write('")\n') + + +def SetTargetProperty(output, target_name, property_name, values, sep=''): + """Given a target, sets the given property.""" + output.write('set_target_properties(') + output.write(target_name) + output.write(' PROPERTIES ') + output.write(property_name) + output.write(' "') + for value in values: + output.write(CMakeStringEscape(value)) + output.write(sep) + output.write('")\n') + + +def SetVariable(output, variable_name, value): + """Sets a CMake variable.""" + output.write('set(') + output.write(variable_name) + output.write(' "') + output.write(CMakeStringEscape(value)) + output.write('")\n') + + +def SetVariableList(output, variable_name, values): + """Sets a CMake variable to a list.""" + if not values: + return SetVariable(output, variable_name, "") + if len(values) == 1: + return SetVariable(output, variable_name, values[0]) + output.write('list(APPEND ') + output.write(variable_name) + output.write('\n "') + output.write('"\n "'.join([CMakeStringEscape(value) for value in values])) + output.write('")\n') + + +def UnsetVariable(output, variable_name): + """Unsets a CMake variable.""" + output.write('unset(') + output.write(variable_name) + output.write(')\n') + + +def WriteVariable(output, variable_name, prepend=None): + if prepend: + output.write(prepend) + output.write('${') + output.write(variable_name) + output.write('}') + + +class CMakeTargetType(object): + def __init__(self, command, modifier, property_modifier): + self.command = command + self.modifier = modifier + self.property_modifier = property_modifier + + +cmake_target_type_from_gyp_target_type = { + 'executable': CMakeTargetType('add_executable', None, 'RUNTIME'), + 'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'), + 'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'), + 'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'), + 'none': CMakeTargetType('add_custom_target', 'SOURCES', None), +} + + +def StringToCMakeTargetName(a): + """Converts the given string 'a' to a valid CMake target name. + + All invalid characters are replaced by '_'. + Invalid for cmake: ' ', '/', '(', ')', '"' + Invalid for make: ':' + Invalid for unknown reasons but cause failures: '.' + """ + return a.translate(string.maketrans(' /():."', '_______')) + + +def WriteActions(target_name, actions, extra_sources, extra_deps, + path_to_gyp, output): + """Write CMake for the 'actions' in the target. + + Args: + target_name: the name of the CMake target being generated. + actions: the Gyp 'actions' dict for this target. + extra_sources: [(, )] to append with generated source files. + extra_deps: [] to append with generated targets. + path_to_gyp: relative path from CMakeLists.txt being generated to + the Gyp file in which the target being generated is defined. + """ + for action in actions: + action_name = StringToCMakeTargetName(action['action_name']) + action_target_name = '%s__%s' % (target_name, action_name) + + inputs = action['inputs'] + inputs_name = action_target_name + '__input' + SetVariableList(output, inputs_name, + [NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs]) + + outputs = action['outputs'] + cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out) + for out in outputs] + outputs_name = action_target_name + '__output' + SetVariableList(output, outputs_name, cmake_outputs) + + # Build up a list of outputs. + # Collect the output dirs we'll need. + dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir) + + if int(action.get('process_outputs_as_sources', False)): + extra_sources.extend(zip(cmake_outputs, outputs)) + + # add_custom_command + output.write('add_custom_command(OUTPUT ') + WriteVariable(output, outputs_name) + output.write('\n') + + if len(dirs) > 0: + for directory in dirs: + output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ') + output.write(directory) + output.write('\n') + + output.write(' COMMAND ') + output.write(gyp.common.EncodePOSIXShellList(action['action'])) + output.write('\n') + + output.write(' DEPENDS ') + WriteVariable(output, inputs_name) + output.write('\n') + + output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/') + output.write(path_to_gyp) + output.write('\n') + + output.write(' COMMENT ') + if 'message' in action: + output.write(action['message']) + else: + output.write(action_target_name) + output.write('\n') + + output.write(' VERBATIM\n') + output.write(')\n') + + # add_custom_target + output.write('add_custom_target(') + output.write(action_target_name) + output.write('\n DEPENDS ') + WriteVariable(output, outputs_name) + output.write('\n SOURCES ') + WriteVariable(output, inputs_name) + output.write('\n)\n') + + extra_deps.append(action_target_name) + + +def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source): + if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")): + if any([rule_source.startswith(var) for var in FULL_PATH_VARS]): + return rel_path + return NormjoinPathForceCMakeSource(base_path, rel_path) + + +def WriteRules(target_name, rules, extra_sources, extra_deps, + path_to_gyp, output): + """Write CMake for the 'rules' in the target. + + Args: + target_name: the name of the CMake target being generated. + actions: the Gyp 'actions' dict for this target. + extra_sources: [(, )] to append with generated source files. + extra_deps: [] to append with generated targets. + path_to_gyp: relative path from CMakeLists.txt being generated to + the Gyp file in which the target being generated is defined. + """ + for rule in rules: + rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name']) + + inputs = rule.get('inputs', []) + inputs_name = rule_name + '__input' + SetVariableList(output, inputs_name, + [NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs]) + outputs = rule['outputs'] + var_outputs = [] + + for count, rule_source in enumerate(rule.get('rule_sources', [])): + action_name = rule_name + '_' + str(count) + + rule_source_dirname, rule_source_basename = os.path.split(rule_source) + rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename) + + SetVariable(output, 'RULE_INPUT_PATH', rule_source) + SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname) + SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename) + SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root) + SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext) + + # Build up a list of outputs. + # Collect the output dirs we'll need. + dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir) + + # Create variables for the output, as 'local' variable will be unset. + these_outputs = [] + for output_index, out in enumerate(outputs): + output_name = action_name + '_' + str(output_index) + SetVariable(output, output_name, + NormjoinRulePathForceCMakeSource(path_to_gyp, out, + rule_source)) + if int(rule.get('process_outputs_as_sources', False)): + extra_sources.append(('${' + output_name + '}', out)) + these_outputs.append('${' + output_name + '}') + var_outputs.append('${' + output_name + '}') + + # add_custom_command + output.write('add_custom_command(OUTPUT\n') + for out in these_outputs: + output.write(' ') + output.write(out) + output.write('\n') + + for directory in dirs: + output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ') + output.write(directory) + output.write('\n') + + output.write(' COMMAND ') + output.write(gyp.common.EncodePOSIXShellList(rule['action'])) + output.write('\n') + + output.write(' DEPENDS ') + WriteVariable(output, inputs_name) + output.write(' ') + output.write(NormjoinPath(path_to_gyp, rule_source)) + output.write('\n') + + # CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives. + # The cwd is the current build directory. + output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/') + output.write(path_to_gyp) + output.write('\n') + + output.write(' COMMENT ') + if 'message' in rule: + output.write(rule['message']) + else: + output.write(action_name) + output.write('\n') + + output.write(' VERBATIM\n') + output.write(')\n') + + UnsetVariable(output, 'RULE_INPUT_PATH') + UnsetVariable(output, 'RULE_INPUT_DIRNAME') + UnsetVariable(output, 'RULE_INPUT_NAME') + UnsetVariable(output, 'RULE_INPUT_ROOT') + UnsetVariable(output, 'RULE_INPUT_EXT') + + # add_custom_target + output.write('add_custom_target(') + output.write(rule_name) + output.write(' DEPENDS\n') + for out in var_outputs: + output.write(' ') + output.write(out) + output.write('\n') + output.write('SOURCES ') + WriteVariable(output, inputs_name) + output.write('\n') + for rule_source in rule.get('rule_sources', []): + output.write(' ') + output.write(NormjoinPath(path_to_gyp, rule_source)) + output.write('\n') + output.write(')\n') + + extra_deps.append(rule_name) + + +def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output): + """Write CMake for the 'copies' in the target. + + Args: + target_name: the name of the CMake target being generated. + actions: the Gyp 'actions' dict for this target. + extra_deps: [] to append with generated targets. + path_to_gyp: relative path from CMakeLists.txt being generated to + the Gyp file in which the target being generated is defined. + """ + copy_name = target_name + '__copies' + + # CMake gets upset with custom targets with OUTPUT which specify no output. + have_copies = any(copy['files'] for copy in copies) + if not have_copies: + output.write('add_custom_target(') + output.write(copy_name) + output.write(')\n') + extra_deps.append(copy_name) + return + + class Copy(object): + def __init__(self, ext, command): + self.cmake_inputs = [] + self.cmake_outputs = [] + self.gyp_inputs = [] + self.gyp_outputs = [] + self.ext = ext + self.inputs_name = None + self.outputs_name = None + self.command = command + + file_copy = Copy('', 'copy') + dir_copy = Copy('_dirs', 'copy_directory') + + for copy in copies: + files = copy['files'] + destination = copy['destination'] + for src in files: + path = os.path.normpath(src) + basename = os.path.split(path)[1] + dst = os.path.join(destination, basename) + + copy = file_copy if os.path.basename(src) else dir_copy + + copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src)) + copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst)) + copy.gyp_inputs.append(src) + copy.gyp_outputs.append(dst) + + for copy in (file_copy, dir_copy): + if copy.cmake_inputs: + copy.inputs_name = copy_name + '__input' + copy.ext + SetVariableList(output, copy.inputs_name, copy.cmake_inputs) + + copy.outputs_name = copy_name + '__output' + copy.ext + SetVariableList(output, copy.outputs_name, copy.cmake_outputs) + + # add_custom_command + output.write('add_custom_command(\n') + + output.write('OUTPUT') + for copy in (file_copy, dir_copy): + if copy.outputs_name: + WriteVariable(output, copy.outputs_name, ' ') + output.write('\n') + + for copy in (file_copy, dir_copy): + for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs): + # 'cmake -E copy src dst' will create the 'dst' directory if needed. + output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command) + output.write(src) + output.write(' ') + output.write(dst) + output.write("\n") + + output.write('DEPENDS') + for copy in (file_copy, dir_copy): + if copy.inputs_name: + WriteVariable(output, copy.inputs_name, ' ') + output.write('\n') + + output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/') + output.write(path_to_gyp) + output.write('\n') + + output.write('COMMENT Copying for ') + output.write(target_name) + output.write('\n') + + output.write('VERBATIM\n') + output.write(')\n') + + # add_custom_target + output.write('add_custom_target(') + output.write(copy_name) + output.write('\n DEPENDS') + for copy in (file_copy, dir_copy): + if copy.outputs_name: + WriteVariable(output, copy.outputs_name, ' ') + output.write('\n SOURCES') + if file_copy.inputs_name: + WriteVariable(output, file_copy.inputs_name, ' ') + output.write('\n)\n') + + extra_deps.append(copy_name) + + +def CreateCMakeTargetBaseName(qualified_target): + """This is the name we would like the target to have.""" + _, gyp_target_name, gyp_target_toolset = ( + gyp.common.ParseQualifiedTarget(qualified_target)) + cmake_target_base_name = gyp_target_name + if gyp_target_toolset and gyp_target_toolset != 'target': + cmake_target_base_name += '_' + gyp_target_toolset + return StringToCMakeTargetName(cmake_target_base_name) + + +def CreateCMakeTargetFullName(qualified_target): + """An unambiguous name for the target.""" + gyp_file, gyp_target_name, gyp_target_toolset = ( + gyp.common.ParseQualifiedTarget(qualified_target)) + cmake_target_full_name = gyp_file + ':' + gyp_target_name + if gyp_target_toolset and gyp_target_toolset != 'target': + cmake_target_full_name += '_' + gyp_target_toolset + return StringToCMakeTargetName(cmake_target_full_name) + + +class CMakeNamer(object): + """Converts Gyp target names into CMake target names. + + CMake requires that target names be globally unique. One way to ensure + this is to fully qualify the names of the targets. Unfortunatly, this + ends up with all targets looking like "chrome_chrome_gyp_chrome" instead + of just "chrome". If this generator were only interested in building, it + would be possible to fully qualify all target names, then create + unqualified target names which depend on all qualified targets which + should have had that name. This is more or less what the 'make' generator + does with aliases. However, one goal of this generator is to create CMake + files for use with IDEs, and fully qualified names are not as user + friendly. + + Since target name collision is rare, we do the above only when required. + + Toolset variants are always qualified from the base, as this is required for + building. However, it also makes sense for an IDE, as it is possible for + defines to be different. + """ + def __init__(self, target_list): + self.cmake_target_base_names_conficting = set() + + cmake_target_base_names_seen = set() + for qualified_target in target_list: + cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target) + + if cmake_target_base_name not in cmake_target_base_names_seen: + cmake_target_base_names_seen.add(cmake_target_base_name) + else: + self.cmake_target_base_names_conficting.add(cmake_target_base_name) + + def CreateCMakeTargetName(self, qualified_target): + base_name = CreateCMakeTargetBaseName(qualified_target) + if base_name in self.cmake_target_base_names_conficting: + return CreateCMakeTargetFullName(qualified_target) + return base_name + + +def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use, + options, generator_flags, all_qualified_targets, flavor, + output): + # The make generator does this always. + # TODO: It would be nice to be able to tell CMake all dependencies. + circular_libs = generator_flags.get('circular', True) + + if not generator_flags.get('standalone', False): + output.write('\n#') + output.write(qualified_target) + output.write('\n') + + gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target) + rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir) + rel_gyp_dir = os.path.dirname(rel_gyp_file) + + # Relative path from build dir to top dir. + build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir) + # Relative path from build dir to gyp dir. + build_to_gyp = os.path.join(build_to_top, rel_gyp_dir) + + path_from_cmakelists_to_gyp = build_to_gyp + + spec = target_dicts.get(qualified_target, {}) + config = spec.get('configurations', {}).get(config_to_use, {}) + + xcode_settings = None + if flavor == 'mac': + xcode_settings = gyp.xcode_emulation.XcodeSettings(spec) + + target_name = spec.get('target_name', '') + target_type = spec.get('type', '') + target_toolset = spec.get('toolset') + + cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type) + if cmake_target_type is None: + print ('Target %s has unknown target type %s, skipping.' % + ( target_name, target_type ) ) + return + + SetVariable(output, 'TARGET', target_name) + SetVariable(output, 'TOOLSET', target_toolset) + + cmake_target_name = namer.CreateCMakeTargetName(qualified_target) + + extra_sources = [] + extra_deps = [] + + # Actions must come first, since they can generate more OBJs for use below. + if 'actions' in spec: + WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps, + path_from_cmakelists_to_gyp, output) + + # Rules must be early like actions. + if 'rules' in spec: + WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps, + path_from_cmakelists_to_gyp, output) + + # Copies + if 'copies' in spec: + WriteCopies(cmake_target_name, spec['copies'], extra_deps, + path_from_cmakelists_to_gyp, output) + + # Target and sources + srcs = spec.get('sources', []) + + # Gyp separates the sheep from the goats based on file extensions. + # A full separation is done here because of flag handing (see below). + s_sources = [] + c_sources = [] + cxx_sources = [] + linkable_sources = [] + other_sources = [] + for src in srcs: + _, ext = os.path.splitext(src) + src_type = COMPILABLE_EXTENSIONS.get(ext, None) + src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src); + + if src_type == 's': + s_sources.append(src_norm_path) + elif src_type == 'cc': + c_sources.append(src_norm_path) + elif src_type == 'cxx': + cxx_sources.append(src_norm_path) + elif Linkable(ext): + linkable_sources.append(src_norm_path) + else: + other_sources.append(src_norm_path) + + for extra_source in extra_sources: + src, real_source = extra_source + _, ext = os.path.splitext(real_source) + src_type = COMPILABLE_EXTENSIONS.get(ext, None) + + if src_type == 's': + s_sources.append(src) + elif src_type == 'cc': + c_sources.append(src) + elif src_type == 'cxx': + cxx_sources.append(src) + elif Linkable(ext): + linkable_sources.append(src) + else: + other_sources.append(src) + + s_sources_name = None + if s_sources: + s_sources_name = cmake_target_name + '__asm_srcs' + SetVariableList(output, s_sources_name, s_sources) + + c_sources_name = None + if c_sources: + c_sources_name = cmake_target_name + '__c_srcs' + SetVariableList(output, c_sources_name, c_sources) + + cxx_sources_name = None + if cxx_sources: + cxx_sources_name = cmake_target_name + '__cxx_srcs' + SetVariableList(output, cxx_sources_name, cxx_sources) + + linkable_sources_name = None + if linkable_sources: + linkable_sources_name = cmake_target_name + '__linkable_srcs' + SetVariableList(output, linkable_sources_name, linkable_sources) + + other_sources_name = None + if other_sources: + other_sources_name = cmake_target_name + '__other_srcs' + SetVariableList(output, other_sources_name, other_sources) + + # CMake gets upset when executable targets provide no sources. + # http://www.cmake.org/pipermail/cmake/2010-July/038461.html + dummy_sources_name = None + has_sources = (s_sources_name or + c_sources_name or + cxx_sources_name or + linkable_sources_name or + other_sources_name) + if target_type == 'executable' and not has_sources: + dummy_sources_name = cmake_target_name + '__dummy_srcs' + SetVariable(output, dummy_sources_name, + "${obj}.${TOOLSET}/${TARGET}/genc/dummy.c") + output.write('if(NOT EXISTS "') + WriteVariable(output, dummy_sources_name) + output.write('")\n') + output.write(' file(WRITE "') + WriteVariable(output, dummy_sources_name) + output.write('" "")\n') + output.write("endif()\n") + + + # CMake is opposed to setting linker directories and considers the practice + # of setting linker directories dangerous. Instead, it favors the use of + # find_library and passing absolute paths to target_link_libraries. + # However, CMake does provide the command link_directories, which adds + # link directories to targets defined after it is called. + # As a result, link_directories must come before the target definition. + # CMake unfortunately has no means of removing entries from LINK_DIRECTORIES. + library_dirs = config.get('library_dirs') + if library_dirs is not None: + output.write('link_directories(') + for library_dir in library_dirs: + output.write(' ') + output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir)) + output.write('\n') + output.write(')\n') + + output.write(cmake_target_type.command) + output.write('(') + output.write(cmake_target_name) + + if cmake_target_type.modifier is not None: + output.write(' ') + output.write(cmake_target_type.modifier) + + if s_sources_name: + WriteVariable(output, s_sources_name, ' ') + if c_sources_name: + WriteVariable(output, c_sources_name, ' ') + if cxx_sources_name: + WriteVariable(output, cxx_sources_name, ' ') + if linkable_sources_name: + WriteVariable(output, linkable_sources_name, ' ') + if other_sources_name: + WriteVariable(output, other_sources_name, ' ') + if dummy_sources_name: + WriteVariable(output, dummy_sources_name, ' ') + + output.write(')\n') + + # Let CMake know if the 'all' target should depend on this target. + exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets + else 'FALSE') + SetTargetProperty(output, cmake_target_name, + 'EXCLUDE_FROM_ALL', exclude_from_all) + for extra_target_name in extra_deps: + SetTargetProperty(output, extra_target_name, + 'EXCLUDE_FROM_ALL', exclude_from_all) + + # Output name and location. + if target_type != 'none': + # Link as 'C' if there are no other files + if not c_sources and not cxx_sources: + SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C']) + + # Mark uncompiled sources as uncompiled. + if other_sources_name: + output.write('set_source_files_properties(') + WriteVariable(output, other_sources_name, '') + output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n') + + # Mark object sources as linkable. + if linkable_sources_name: + output.write('set_source_files_properties(') + WriteVariable(output, other_sources_name, '') + output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n') + + # Output directory + target_output_directory = spec.get('product_dir') + if target_output_directory is None: + if target_type in ('executable', 'loadable_module'): + target_output_directory = generator_default_variables['PRODUCT_DIR'] + elif target_type == 'shared_library': + target_output_directory = '${builddir}/lib.${TOOLSET}' + elif spec.get('standalone_static_library', False): + target_output_directory = generator_default_variables['PRODUCT_DIR'] + else: + base_path = gyp.common.RelativePath(os.path.dirname(gyp_file), + options.toplevel_dir) + target_output_directory = '${obj}.${TOOLSET}' + target_output_directory = ( + os.path.join(target_output_directory, base_path)) + + cmake_target_output_directory = NormjoinPathForceCMakeSource( + path_from_cmakelists_to_gyp, + target_output_directory) + SetTargetProperty(output, + cmake_target_name, + cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY', + cmake_target_output_directory) + + # Output name + default_product_prefix = '' + default_product_name = target_name + default_product_ext = '' + if target_type == 'static_library': + static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX'] + default_product_name = RemovePrefix(default_product_name, + static_library_prefix) + default_product_prefix = static_library_prefix + default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX'] + + elif target_type in ('loadable_module', 'shared_library'): + shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX'] + default_product_name = RemovePrefix(default_product_name, + shared_library_prefix) + default_product_prefix = shared_library_prefix + default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX'] + + elif target_type != 'executable': + print ('ERROR: What output file should be generated?', + 'type', target_type, 'target', target_name) + + product_prefix = spec.get('product_prefix', default_product_prefix) + product_name = spec.get('product_name', default_product_name) + product_ext = spec.get('product_extension') + if product_ext: + product_ext = '.' + product_ext + else: + product_ext = default_product_ext + + SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix) + SetTargetProperty(output, cmake_target_name, + cmake_target_type.property_modifier + '_OUTPUT_NAME', + product_name) + SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext) + + # Make the output of this target referenceable as a source. + cmake_target_output_basename = product_prefix + product_name + product_ext + cmake_target_output = os.path.join(cmake_target_output_directory, + cmake_target_output_basename) + SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '') + + # Includes + includes = config.get('include_dirs') + if includes: + # This (target include directories) is what requires CMake 2.8.8 + includes_name = cmake_target_name + '__include_dirs' + SetVariableList(output, includes_name, + [NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include) + for include in includes]) + output.write('set_property(TARGET ') + output.write(cmake_target_name) + output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ') + WriteVariable(output, includes_name, '') + output.write(')\n') + + # Defines + defines = config.get('defines') + if defines is not None: + SetTargetProperty(output, + cmake_target_name, + 'COMPILE_DEFINITIONS', + defines, + ';') + + # Compile Flags - http://www.cmake.org/Bug/view.php?id=6493 + # CMake currently does not have target C and CXX flags. + # So, instead of doing... + + # cflags_c = config.get('cflags_c') + # if cflags_c is not None: + # SetTargetProperty(output, cmake_target_name, + # 'C_COMPILE_FLAGS', cflags_c, ' ') + + # cflags_cc = config.get('cflags_cc') + # if cflags_cc is not None: + # SetTargetProperty(output, cmake_target_name, + # 'CXX_COMPILE_FLAGS', cflags_cc, ' ') + + # Instead we must... + cflags = config.get('cflags', []) + cflags_c = config.get('cflags_c', []) + cflags_cxx = config.get('cflags_cc', []) + if xcode_settings: + cflags = xcode_settings.GetCflags(config_to_use) + cflags_c = xcode_settings.GetCflagsC(config_to_use) + cflags_cxx = xcode_settings.GetCflagsCC(config_to_use) + #cflags_objc = xcode_settings.GetCflagsObjC(config_to_use) + #cflags_objcc = xcode_settings.GetCflagsObjCC(config_to_use) + + if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources): + SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ') + + elif c_sources and not (s_sources or cxx_sources): + flags = [] + flags.extend(cflags) + flags.extend(cflags_c) + SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ') + + elif cxx_sources and not (s_sources or c_sources): + flags = [] + flags.extend(cflags) + flags.extend(cflags_cxx) + SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ') + + else: + # TODO: This is broken, one cannot generally set properties on files, + # as other targets may require different properties on the same files. + if s_sources and cflags: + SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ') + + if c_sources and (cflags or cflags_c): + flags = [] + flags.extend(cflags) + flags.extend(cflags_c) + SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ') + + if cxx_sources and (cflags or cflags_cxx): + flags = [] + flags.extend(cflags) + flags.extend(cflags_cxx) + SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ') + + # Linker flags + ldflags = config.get('ldflags') + if ldflags is not None: + SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ') + + # XCode settings + xcode_settings = config.get('xcode_settings', {}) + for xcode_setting, xcode_value in xcode_settings.viewitems(): + SetTargetProperty(output, cmake_target_name, + "XCODE_ATTRIBUTE_%s" % xcode_setting, xcode_value, + '' if isinstance(xcode_value, str) else ' ') + + # Note on Dependencies and Libraries: + # CMake wants to handle link order, resolving the link line up front. + # Gyp does not retain or enforce specifying enough information to do so. + # So do as other gyp generators and use --start-group and --end-group. + # Give CMake as little information as possible so that it doesn't mess it up. + + # Dependencies + rawDeps = spec.get('dependencies', []) + + static_deps = [] + shared_deps = [] + other_deps = [] + for rawDep in rawDeps: + dep_cmake_name = namer.CreateCMakeTargetName(rawDep) + dep_spec = target_dicts.get(rawDep, {}) + dep_target_type = dep_spec.get('type', None) + + if dep_target_type == 'static_library': + static_deps.append(dep_cmake_name) + elif dep_target_type == 'shared_library': + shared_deps.append(dep_cmake_name) + else: + other_deps.append(dep_cmake_name) + + # ensure all external dependencies are complete before internal dependencies + # extra_deps currently only depend on their own deps, so otherwise run early + if static_deps or shared_deps or other_deps: + for extra_dep in extra_deps: + output.write('add_dependencies(') + output.write(extra_dep) + output.write('\n') + for deps in (static_deps, shared_deps, other_deps): + for dep in gyp.common.uniquer(deps): + output.write(' ') + output.write(dep) + output.write('\n') + output.write(')\n') + + linkable = target_type in ('executable', 'loadable_module', 'shared_library') + other_deps.extend(extra_deps) + if other_deps or (not linkable and (static_deps or shared_deps)): + output.write('add_dependencies(') + output.write(cmake_target_name) + output.write('\n') + for dep in gyp.common.uniquer(other_deps): + output.write(' ') + output.write(dep) + output.write('\n') + if not linkable: + for deps in (static_deps, shared_deps): + for lib_dep in gyp.common.uniquer(deps): + output.write(' ') + output.write(lib_dep) + output.write('\n') + output.write(')\n') + + # Libraries + if linkable: + external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0] + if external_libs or static_deps or shared_deps: + output.write('target_link_libraries(') + output.write(cmake_target_name) + output.write('\n') + if static_deps: + write_group = circular_libs and len(static_deps) > 1 and flavor != 'mac' + if write_group: + output.write('-Wl,--start-group\n') + for dep in gyp.common.uniquer(static_deps): + output.write(' ') + output.write(dep) + output.write('\n') + if write_group: + output.write('-Wl,--end-group\n') + if shared_deps: + for dep in gyp.common.uniquer(shared_deps): + output.write(' ') + output.write(dep) + output.write('\n') + if external_libs: + for lib in gyp.common.uniquer(external_libs): + output.write(' "') + output.write(RemovePrefix(lib, "$(SDKROOT)")) + output.write('"\n') + + output.write(')\n') + + UnsetVariable(output, 'TOOLSET') + UnsetVariable(output, 'TARGET') + + +def GenerateOutputForConfig(target_list, target_dicts, data, + params, config_to_use): + options = params['options'] + generator_flags = params['generator_flags'] + flavor = gyp.common.GetFlavor(params) + + # generator_dir: relative path from pwd to where make puts build files. + # Makes migrating from make to cmake easier, cmake doesn't put anything here. + # Each Gyp configuration creates a different CMakeLists.txt file + # to avoid incompatibilities between Gyp and CMake configurations. + generator_dir = os.path.relpath(options.generator_output or '.') + + # output_dir: relative path from generator_dir to the build directory. + output_dir = generator_flags.get('output_dir', 'out') + + # build_dir: relative path from source root to our output files. + # e.g. "out/Debug" + build_dir = os.path.normpath(os.path.join(generator_dir, + output_dir, + config_to_use)) + + toplevel_build = os.path.join(options.toplevel_dir, build_dir) + + output_file = os.path.join(toplevel_build, 'CMakeLists.txt') + gyp.common.EnsureDirExists(output_file) + + output = open(output_file, 'w') + output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n') + output.write('cmake_policy(VERSION 2.8.8)\n') + + gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1]) + output.write('project(') + output.write(project_target) + output.write(')\n') + + SetVariable(output, 'configuration', config_to_use) + + ar = None + cc = None + cxx = None + + make_global_settings = data[gyp_file].get('make_global_settings', []) + build_to_top = gyp.common.InvertRelativePath(build_dir, + options.toplevel_dir) + for key, value in make_global_settings: + if key == 'AR': + ar = os.path.join(build_to_top, value) + if key == 'CC': + cc = os.path.join(build_to_top, value) + if key == 'CXX': + cxx = os.path.join(build_to_top, value) + + ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar) + cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc) + cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx) + + if ar: + SetVariable(output, 'CMAKE_AR', ar) + if cc: + SetVariable(output, 'CMAKE_C_COMPILER', cc) + if cxx: + SetVariable(output, 'CMAKE_CXX_COMPILER', cxx) + + # The following appears to be as-yet undocumented. + # http://public.kitware.com/Bug/view.php?id=8392 + output.write('enable_language(ASM)\n') + # ASM-ATT does not support .S files. + # output.write('enable_language(ASM-ATT)\n') + + if cc: + SetVariable(output, 'CMAKE_ASM_COMPILER', cc) + + SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}') + SetVariable(output, 'obj', '${builddir}/obj') + output.write('\n') + + # TODO: Undocumented/unsupported (the CMake Java generator depends on it). + # CMake by default names the object resulting from foo.c to be foo.c.o. + # Gyp traditionally names the object resulting from foo.c foo.o. + # This should be irrelevant, but some targets extract .o files from .a + # and depend on the name of the extracted .o files. + output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n') + output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n') + output.write('\n') + + # Force ninja to use rsp files. Otherwise link and ar lines can get too long, + # resulting in 'Argument list too long' errors. + # However, rsp files don't work correctly on Mac. + if flavor != 'mac': + output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n') + output.write('\n') + + namer = CMakeNamer(target_list) + + # The list of targets upon which the 'all' target should depend. + # CMake has it's own implicit 'all' target, one is not created explicitly. + all_qualified_targets = set() + for build_file in params['build_files']: + for qualified_target in gyp.common.AllTargets(target_list, + target_dicts, + os.path.normpath(build_file)): + all_qualified_targets.add(qualified_target) + + for qualified_target in target_list: + if flavor == 'mac': + gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target) + spec = target_dicts[qualified_target] + gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[gyp_file], spec) + + WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use, + options, generator_flags, all_qualified_targets, flavor, output) + + output.close() + + +def PerformBuild(data, configurations, params): + options = params['options'] + generator_flags = params['generator_flags'] + + # generator_dir: relative path from pwd to where make puts build files. + # Makes migrating from make to cmake easier, cmake doesn't put anything here. + generator_dir = os.path.relpath(options.generator_output or '.') + + # output_dir: relative path from generator_dir to the build directory. + output_dir = generator_flags.get('output_dir', 'out') + + for config_name in configurations: + # build_dir: relative path from source root to our output files. + # e.g. "out/Debug" + build_dir = os.path.normpath(os.path.join(generator_dir, + output_dir, + config_name)) + arguments = ['cmake', '-G', 'Ninja'] + print 'Generating [%s]: %s' % (config_name, arguments) + subprocess.check_call(arguments, cwd=build_dir) + + arguments = ['ninja', '-C', build_dir] + print 'Building [%s]: %s' % (config_name, arguments) + subprocess.check_call(arguments) + + +def CallGenerateOutputForConfig(arglist): + # Ignore the interrupt signal so that the parent process catches it and + # kills all multiprocessing children. + signal.signal(signal.SIGINT, signal.SIG_IGN) + + target_list, target_dicts, data, params, config_name = arglist + GenerateOutputForConfig(target_list, target_dicts, data, params, config_name) + + +def GenerateOutput(target_list, target_dicts, data, params): + user_config = params.get('generator_flags', {}).get('config', None) + if user_config: + GenerateOutputForConfig(target_list, target_dicts, data, + params, user_config) + else: + config_names = target_dicts[target_list[0]]['configurations'].keys() + if params['parallel']: + try: + pool = multiprocessing.Pool(len(config_names)) + arglists = [] + for config_name in config_names: + arglists.append((target_list, target_dicts, data, + params, config_name)) + pool.map(CallGenerateOutputForConfig, arglists) + except KeyboardInterrupt, e: + pool.terminate() + raise e + else: + for config_name in config_names: + GenerateOutputForConfig(target_list, target_dicts, data, + params, config_name) diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/dump_dependency_json.py b/deps/libuv/build/gyp/pylib/gyp/generator/dump_dependency_json.py new file mode 100644 index 00000000..160eafe2 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/dump_dependency_json.py @@ -0,0 +1,99 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import collections +import os +import gyp +import gyp.common +import gyp.msvs_emulation +import json +import sys + +generator_supports_multiple_toolsets = True + +generator_wants_static_library_dependencies_adjusted = False + +generator_filelist_paths = { +} + +generator_default_variables = { +} +for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', + 'LIB_DIR', 'SHARED_LIB_DIR']: + # Some gyp steps fail if these are empty(!). + generator_default_variables[dirname] = 'dir' +for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', + 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', + 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', + 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', + 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', + 'CONFIGURATION_NAME']: + generator_default_variables[unused] = '' + + +def CalculateVariables(default_variables, params): + generator_flags = params.get('generator_flags', {}) + for key, val in generator_flags.items(): + default_variables.setdefault(key, val) + default_variables.setdefault('OS', gyp.common.GetFlavor(params)) + + flavor = gyp.common.GetFlavor(params) + if flavor =='win': + # Copy additional generator configuration data from VS, which is shared + # by the Windows Ninja generator. + import gyp.generator.msvs as msvs_generator + generator_additional_non_configuration_keys = getattr(msvs_generator, + 'generator_additional_non_configuration_keys', []) + generator_additional_path_sections = getattr(msvs_generator, + 'generator_additional_path_sections', []) + + gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) + + +def CalculateGeneratorInputInfo(params): + """Calculate the generator specific info that gets fed to input (called by + gyp).""" + generator_flags = params.get('generator_flags', {}) + if generator_flags.get('adjust_static_libraries', False): + global generator_wants_static_library_dependencies_adjusted + generator_wants_static_library_dependencies_adjusted = True + + toplevel = params['options'].toplevel_dir + generator_dir = os.path.relpath(params['options'].generator_output or '.') + # output_dir: relative path from generator_dir to the build directory. + output_dir = generator_flags.get('output_dir', 'out') + qualified_out_dir = os.path.normpath(os.path.join( + toplevel, generator_dir, output_dir, 'gypfiles')) + global generator_filelist_paths + generator_filelist_paths = { + 'toplevel': toplevel, + 'qualified_out_dir': qualified_out_dir, + } + +def GenerateOutput(target_list, target_dicts, data, params): + # Map of target -> list of targets it depends on. + edges = {} + + # Queue of targets to visit. + targets_to_visit = target_list[:] + + while len(targets_to_visit) > 0: + target = targets_to_visit.pop() + if target in edges: + continue + edges[target] = [] + + for dep in target_dicts[target].get('dependencies', []): + edges[target].append(dep) + targets_to_visit.append(dep) + + try: + filepath = params['generator_flags']['output_dir'] + except KeyError: + filepath = '.' + filename = os.path.join(filepath, 'dump.json') + f = open(filename, 'w') + json.dump(edges, f) + f.close() + print 'Wrote json to %s.' % filename diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/eclipse.py b/deps/libuv/build/gyp/pylib/gyp/generator/eclipse.py new file mode 100644 index 00000000..3544347b --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/eclipse.py @@ -0,0 +1,425 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""GYP backend that generates Eclipse CDT settings files. + +This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML +files that can be imported into an Eclipse CDT project. The XML file contains a +list of include paths and symbols (i.e. defines). + +Because a full .cproject definition is not created by this generator, it's not +possible to properly define the include dirs and symbols for each file +individually. Instead, one set of includes/symbols is generated for the entire +project. This works fairly well (and is a vast improvement in general), but may +still result in a few indexer issues here and there. + +This generator has no automated tests, so expect it to be broken. +""" + +from xml.sax.saxutils import escape +import os.path +import subprocess +import gyp +import gyp.common +import gyp.msvs_emulation +import shlex +import xml.etree.cElementTree as ET + +generator_wants_static_library_dependencies_adjusted = False + +generator_default_variables = { +} + +for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: + # Some gyp steps fail if these are empty(!), so we convert them to variables + generator_default_variables[dirname] = '$' + dirname + +for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', + 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', + 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', + 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', + 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', + 'CONFIGURATION_NAME']: + generator_default_variables[unused] = '' + +# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as +# part of the path when dealing with generated headers. This value will be +# replaced dynamically for each configuration. +generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \ + '$SHARED_INTERMEDIATE_DIR' + + +def CalculateVariables(default_variables, params): + generator_flags = params.get('generator_flags', {}) + for key, val in generator_flags.items(): + default_variables.setdefault(key, val) + flavor = gyp.common.GetFlavor(params) + default_variables.setdefault('OS', flavor) + if flavor == 'win': + # Copy additional generator configuration data from VS, which is shared + # by the Eclipse generator. + import gyp.generator.msvs as msvs_generator + generator_additional_non_configuration_keys = getattr(msvs_generator, + 'generator_additional_non_configuration_keys', []) + generator_additional_path_sections = getattr(msvs_generator, + 'generator_additional_path_sections', []) + + gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) + + +def CalculateGeneratorInputInfo(params): + """Calculate the generator specific info that gets fed to input (called by + gyp).""" + generator_flags = params.get('generator_flags', {}) + if generator_flags.get('adjust_static_libraries', False): + global generator_wants_static_library_dependencies_adjusted + generator_wants_static_library_dependencies_adjusted = True + + +def GetAllIncludeDirectories(target_list, target_dicts, + shared_intermediate_dirs, config_name, params, + compiler_path): + """Calculate the set of include directories to be used. + + Returns: + A list including all the include_dir's specified for every target followed + by any include directories that were added as cflag compiler options. + """ + + gyp_includes_set = set() + compiler_includes_list = [] + + # Find compiler's default include dirs. + if compiler_path: + command = shlex.split(compiler_path) + command.extend(['-E', '-xc++', '-v', '-']) + proc = subprocess.Popen(args=command, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output = proc.communicate()[1] + # Extract the list of include dirs from the output, which has this format: + # ... + # #include "..." search starts here: + # #include <...> search starts here: + # /usr/include/c++/4.6 + # /usr/local/include + # End of search list. + # ... + in_include_list = False + for line in output.splitlines(): + if line.startswith('#include'): + in_include_list = True + continue + if line.startswith('End of search list.'): + break + if in_include_list: + include_dir = line.strip() + if include_dir not in compiler_includes_list: + compiler_includes_list.append(include_dir) + + flavor = gyp.common.GetFlavor(params) + if flavor == 'win': + generator_flags = params.get('generator_flags', {}) + for target_name in target_list: + target = target_dicts[target_name] + if config_name in target['configurations']: + config = target['configurations'][config_name] + + # Look for any include dirs that were explicitly added via cflags. This + # may be done in gyp files to force certain includes to come at the end. + # TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and + # remove this. + if flavor == 'win': + msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags) + cflags = msvs_settings.GetCflags(config_name) + else: + cflags = config['cflags'] + for cflag in cflags: + if cflag.startswith('-I'): + include_dir = cflag[2:] + if include_dir not in compiler_includes_list: + compiler_includes_list.append(include_dir) + + # Find standard gyp include dirs. + if config.has_key('include_dirs'): + include_dirs = config['include_dirs'] + for shared_intermediate_dir in shared_intermediate_dirs: + for include_dir in include_dirs: + include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR', + shared_intermediate_dir) + if not os.path.isabs(include_dir): + base_dir = os.path.dirname(target_name) + + include_dir = base_dir + '/' + include_dir + include_dir = os.path.abspath(include_dir) + + gyp_includes_set.add(include_dir) + + # Generate a list that has all the include dirs. + all_includes_list = list(gyp_includes_set) + all_includes_list.sort() + for compiler_include in compiler_includes_list: + if not compiler_include in gyp_includes_set: + all_includes_list.append(compiler_include) + + # All done. + return all_includes_list + + +def GetCompilerPath(target_list, data, options): + """Determine a command that can be used to invoke the compiler. + + Returns: + If this is a gyp project that has explicit make settings, try to determine + the compiler from that. Otherwise, see if a compiler was specified via the + CC_target environment variable. + """ + # First, see if the compiler is configured in make's settings. + build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) + make_global_settings_dict = data[build_file].get('make_global_settings', {}) + for key, value in make_global_settings_dict: + if key in ['CC', 'CXX']: + return os.path.join(options.toplevel_dir, value) + + # Check to see if the compiler was specified as an environment variable. + for key in ['CC_target', 'CC', 'CXX']: + compiler = os.environ.get(key) + if compiler: + return compiler + + return 'gcc' + + +def GetAllDefines(target_list, target_dicts, data, config_name, params, + compiler_path): + """Calculate the defines for a project. + + Returns: + A dict that includes explict defines declared in gyp files along with all of + the default defines that the compiler uses. + """ + + # Get defines declared in the gyp files. + all_defines = {} + flavor = gyp.common.GetFlavor(params) + if flavor == 'win': + generator_flags = params.get('generator_flags', {}) + for target_name in target_list: + target = target_dicts[target_name] + + if flavor == 'win': + msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags) + extra_defines = msvs_settings.GetComputedDefines(config_name) + else: + extra_defines = [] + if config_name in target['configurations']: + config = target['configurations'][config_name] + target_defines = config['defines'] + else: + target_defines = [] + for define in target_defines + extra_defines: + split_define = define.split('=', 1) + if len(split_define) == 1: + split_define.append('1') + if split_define[0].strip() in all_defines: + # Already defined + continue + all_defines[split_define[0].strip()] = split_define[1].strip() + # Get default compiler defines (if possible). + if flavor == 'win': + return all_defines # Default defines already processed in the loop above. + if compiler_path: + command = shlex.split(compiler_path) + command.extend(['-E', '-dM', '-']) + cpp_proc = subprocess.Popen(args=command, cwd='.', + stdin=subprocess.PIPE, stdout=subprocess.PIPE) + cpp_output = cpp_proc.communicate()[0] + cpp_lines = cpp_output.split('\n') + for cpp_line in cpp_lines: + if not cpp_line.strip(): + continue + cpp_line_parts = cpp_line.split(' ', 2) + key = cpp_line_parts[1] + if len(cpp_line_parts) >= 3: + val = cpp_line_parts[2] + else: + val = '1' + all_defines[key] = val + + return all_defines + + +def WriteIncludePaths(out, eclipse_langs, include_dirs): + """Write the includes section of a CDT settings export file.""" + + out.write('
\n') + out.write(' \n') + for lang in eclipse_langs: + out.write(' \n' % lang) + for include_dir in include_dirs: + out.write(' %s\n' % + include_dir) + out.write(' \n') + out.write('
\n') + + +def WriteMacros(out, eclipse_langs, defines): + """Write the macros section of a CDT settings export file.""" + + out.write('
\n') + out.write(' \n') + for lang in eclipse_langs: + out.write(' \n' % lang) + for key in sorted(defines.iterkeys()): + out.write(' %s%s\n' % + (escape(key), escape(defines[key]))) + out.write(' \n') + out.write('
\n') + + +def GenerateOutputForConfig(target_list, target_dicts, data, params, + config_name): + options = params['options'] + generator_flags = params.get('generator_flags', {}) + + # build_dir: relative path from source root to our output files. + # e.g. "out/Debug" + build_dir = os.path.join(generator_flags.get('output_dir', 'out'), + config_name) + + toplevel_build = os.path.join(options.toplevel_dir, build_dir) + # Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the + # SHARED_INTERMEDIATE_DIR. Include both possible locations. + shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'), + os.path.join(toplevel_build, 'gen')] + + GenerateCdtSettingsFile(target_list, + target_dicts, + data, + params, + config_name, + os.path.join(toplevel_build, + 'eclipse-cdt-settings.xml'), + options, + shared_intermediate_dirs) + GenerateClasspathFile(target_list, + target_dicts, + options.toplevel_dir, + toplevel_build, + os.path.join(toplevel_build, + 'eclipse-classpath.xml')) + + +def GenerateCdtSettingsFile(target_list, target_dicts, data, params, + config_name, out_name, options, + shared_intermediate_dirs): + gyp.common.EnsureDirExists(out_name) + with open(out_name, 'w') as out: + out.write('\n') + out.write('\n') + + eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File', + 'GNU C++', 'GNU C', 'Assembly'] + compiler_path = GetCompilerPath(target_list, data, options) + include_dirs = GetAllIncludeDirectories(target_list, target_dicts, + shared_intermediate_dirs, + config_name, params, compiler_path) + WriteIncludePaths(out, eclipse_langs, include_dirs) + defines = GetAllDefines(target_list, target_dicts, data, config_name, + params, compiler_path) + WriteMacros(out, eclipse_langs, defines) + + out.write('\n') + + +def GenerateClasspathFile(target_list, target_dicts, toplevel_dir, + toplevel_build, out_name): + '''Generates a classpath file suitable for symbol navigation and code + completion of Java code (such as in Android projects) by finding all + .java and .jar files used as action inputs.''' + gyp.common.EnsureDirExists(out_name) + result = ET.Element('classpath') + + def AddElements(kind, paths): + # First, we need to normalize the paths so they are all relative to the + # toplevel dir. + rel_paths = set() + for path in paths: + if os.path.isabs(path): + rel_paths.add(os.path.relpath(path, toplevel_dir)) + else: + rel_paths.add(path) + + for path in sorted(rel_paths): + entry_element = ET.SubElement(result, 'classpathentry') + entry_element.set('kind', kind) + entry_element.set('path', path) + + AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir)) + AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir)) + # Include the standard JRE container and a dummy out folder + AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER']) + # Include a dummy out folder so that Eclipse doesn't use the default /bin + # folder in the root of the project. + AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')]) + + ET.ElementTree(result).write(out_name) + + +def GetJavaJars(target_list, target_dicts, toplevel_dir): + '''Generates a sequence of all .jars used as inputs.''' + for target_name in target_list: + target = target_dicts[target_name] + for action in target.get('actions', []): + for input_ in action['inputs']: + if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'): + if os.path.isabs(input_): + yield input_ + else: + yield os.path.join(os.path.dirname(target_name), input_) + + +def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir): + '''Generates a sequence of all likely java package root directories.''' + for target_name in target_list: + target = target_dicts[target_name] + for action in target.get('actions', []): + for input_ in action['inputs']: + if (os.path.splitext(input_)[1] == '.java' and + not input_.startswith('$')): + dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name), + input_)) + # If there is a parent 'src' or 'java' folder, navigate up to it - + # these are canonical package root names in Chromium. This will + # break if 'src' or 'java' exists in the package structure. This + # could be further improved by inspecting the java file for the + # package name if this proves to be too fragile in practice. + parent_search = dir_ + while os.path.basename(parent_search) not in ['src', 'java']: + parent_search, _ = os.path.split(parent_search) + if not parent_search or parent_search == toplevel_dir: + # Didn't find a known root, just return the original path + yield dir_ + break + else: + yield parent_search + + +def GenerateOutput(target_list, target_dicts, data, params): + """Generate an XML settings file that can be imported into a CDT project.""" + + if params['options'].generator_output: + raise NotImplementedError("--generator_output not implemented for eclipse") + + user_config = params.get('generator_flags', {}).get('config', None) + if user_config: + GenerateOutputForConfig(target_list, target_dicts, data, params, + user_config) + else: + config_names = target_dicts[target_list[0]]['configurations'].keys() + for config_name in config_names: + GenerateOutputForConfig(target_list, target_dicts, data, params, + config_name) + diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/gypd.py b/deps/libuv/build/gyp/pylib/gyp/generator/gypd.py new file mode 100644 index 00000000..3efdb996 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/gypd.py @@ -0,0 +1,94 @@ +# Copyright (c) 2011 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""gypd output module + +This module produces gyp input as its output. Output files are given the +.gypd extension to avoid overwriting the .gyp files that they are generated +from. Internal references to .gyp files (such as those found in +"dependencies" sections) are not adjusted to point to .gypd files instead; +unlike other paths, which are relative to the .gyp or .gypd file, such paths +are relative to the directory from which gyp was run to create the .gypd file. + +This generator module is intended to be a sample and a debugging aid, hence +the "d" for "debug" in .gypd. It is useful to inspect the results of the +various merges, expansions, and conditional evaluations performed by gyp +and to see a representation of what would be fed to a generator module. + +It's not advisable to rename .gypd files produced by this module to .gyp, +because they will have all merges, expansions, and evaluations already +performed and the relevant constructs not present in the output; paths to +dependencies may be wrong; and various sections that do not belong in .gyp +files such as such as "included_files" and "*_excluded" will be present. +Output will also be stripped of comments. This is not intended to be a +general-purpose gyp pretty-printer; for that, you probably just want to +run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip +comments but won't do all of the other things done to this module's output. + +The specific formatting of the output generated by this module is subject +to change. +""" + + +import gyp.common +import errno +import os +import pprint + + +# These variables should just be spit back out as variable references. +_generator_identity_variables = [ + 'CONFIGURATION_NAME', + 'EXECUTABLE_PREFIX', + 'EXECUTABLE_SUFFIX', + 'INTERMEDIATE_DIR', + 'LIB_DIR', + 'PRODUCT_DIR', + 'RULE_INPUT_ROOT', + 'RULE_INPUT_DIRNAME', + 'RULE_INPUT_EXT', + 'RULE_INPUT_NAME', + 'RULE_INPUT_PATH', + 'SHARED_INTERMEDIATE_DIR', + 'SHARED_LIB_DIR', + 'SHARED_LIB_PREFIX', + 'SHARED_LIB_SUFFIX', + 'STATIC_LIB_PREFIX', + 'STATIC_LIB_SUFFIX', +] + +# gypd doesn't define a default value for OS like many other generator +# modules. Specify "-D OS=whatever" on the command line to provide a value. +generator_default_variables = { +} + +# gypd supports multiple toolsets +generator_supports_multiple_toolsets = True + +# TODO(mark): This always uses <, which isn't right. The input module should +# notify the generator to tell it which phase it is operating in, and this +# module should use < for the early phase and then switch to > for the late +# phase. Bonus points for carrying @ back into the output too. +for v in _generator_identity_variables: + generator_default_variables[v] = '<(%s)' % v + + +def GenerateOutput(target_list, target_dicts, data, params): + output_files = {} + for qualified_target in target_list: + [input_file, target] = \ + gyp.common.ParseQualifiedTarget(qualified_target)[0:2] + + if input_file[-4:] != '.gyp': + continue + input_file_stem = input_file[:-4] + output_file = input_file_stem + params['options'].suffix + '.gypd' + + if not output_file in output_files: + output_files[output_file] = input_file + + for output_file, input_file in output_files.iteritems(): + output = open(output_file, 'w') + pprint.pprint(data[input_file], output) + output.close() diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/gypsh.py b/deps/libuv/build/gyp/pylib/gyp/generator/gypsh.py new file mode 100644 index 00000000..bd405f43 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/gypsh.py @@ -0,0 +1,56 @@ +# Copyright (c) 2011 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""gypsh output module + +gypsh is a GYP shell. It's not really a generator per se. All it does is +fire up an interactive Python session with a few local variables set to the +variables passed to the generator. Like gypd, it's intended as a debugging +aid, to facilitate the exploration of .gyp structures after being processed +by the input module. + +The expected usage is "gyp -f gypsh -D OS=desired_os". +""" + + +import code +import sys + + +# All of this stuff about generator variables was lovingly ripped from gypd.py. +# That module has a much better description of what's going on and why. +_generator_identity_variables = [ + 'EXECUTABLE_PREFIX', + 'EXECUTABLE_SUFFIX', + 'INTERMEDIATE_DIR', + 'PRODUCT_DIR', + 'RULE_INPUT_ROOT', + 'RULE_INPUT_DIRNAME', + 'RULE_INPUT_EXT', + 'RULE_INPUT_NAME', + 'RULE_INPUT_PATH', + 'SHARED_INTERMEDIATE_DIR', +] + +generator_default_variables = { +} + +for v in _generator_identity_variables: + generator_default_variables[v] = '<(%s)' % v + + +def GenerateOutput(target_list, target_dicts, data, params): + locals = { + 'target_list': target_list, + 'target_dicts': target_dicts, + 'data': data, + } + + # Use a banner that looks like the stock Python one and like what + # code.interact uses by default, but tack on something to indicate what + # locals are available, and identify gypsh. + banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \ + (sys.version, sys.platform, repr(sorted(locals.keys()))) + + code.interact(banner, local=locals) diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/make.py b/deps/libuv/build/gyp/pylib/gyp/generator/make.py new file mode 100644 index 00000000..e80ebaed --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/make.py @@ -0,0 +1,2229 @@ +# Copyright (c) 2013 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Notes: +# +# This is all roughly based on the Makefile system used by the Linux +# kernel, but is a non-recursive make -- we put the entire dependency +# graph in front of make and let it figure it out. +# +# The code below generates a separate .mk file for each target, but +# all are sourced by the top-level Makefile. This means that all +# variables in .mk-files clobber one another. Be careful to use := +# where appropriate for immediate evaluation, and similarly to watch +# that you're not relying on a variable value to last beween different +# .mk files. +# +# TODOs: +# +# Global settings and utility functions are currently stuffed in the +# toplevel Makefile. It may make sense to generate some .mk files on +# the side to keep the the files readable. + +import os +import re +import sys +import subprocess +import gyp +import gyp.common +import gyp.xcode_emulation +from gyp.common import GetEnvironFallback +from gyp.common import GypError + +import hashlib + +generator_default_variables = { + 'EXECUTABLE_PREFIX': '', + 'EXECUTABLE_SUFFIX': '', + 'STATIC_LIB_PREFIX': 'lib', + 'SHARED_LIB_PREFIX': 'lib', + 'STATIC_LIB_SUFFIX': '.a', + 'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni', + 'SHARED_INTERMEDIATE_DIR': '$(obj)/gen', + 'PRODUCT_DIR': '$(builddir)', + 'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python. + 'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python. + 'RULE_INPUT_PATH': '$(abspath $<)', + 'RULE_INPUT_EXT': '$(suffix $<)', + 'RULE_INPUT_NAME': '$(notdir $<)', + 'CONFIGURATION_NAME': '$(BUILDTYPE)', +} + +# Make supports multiple toolsets +generator_supports_multiple_toolsets = True + +# Request sorted dependencies in the order from dependents to dependencies. +generator_wants_sorted_dependencies = False + +# Placates pylint. +generator_additional_non_configuration_keys = [] +generator_additional_path_sections = [] +generator_extra_sources_for_rules = [] +generator_filelist_paths = None + + +def CalculateVariables(default_variables, params): + """Calculate additional variables for use in the build (called by gyp).""" + flavor = gyp.common.GetFlavor(params) + if flavor == 'mac': + default_variables.setdefault('OS', 'mac') + default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib') + default_variables.setdefault('SHARED_LIB_DIR', + generator_default_variables['PRODUCT_DIR']) + default_variables.setdefault('LIB_DIR', + generator_default_variables['PRODUCT_DIR']) + + # Copy additional generator configuration data from Xcode, which is shared + # by the Mac Make generator. + import gyp.generator.xcode as xcode_generator + global generator_additional_non_configuration_keys + generator_additional_non_configuration_keys = getattr(xcode_generator, + 'generator_additional_non_configuration_keys', []) + global generator_additional_path_sections + generator_additional_path_sections = getattr(xcode_generator, + 'generator_additional_path_sections', []) + global generator_extra_sources_for_rules + generator_extra_sources_for_rules = getattr(xcode_generator, + 'generator_extra_sources_for_rules', []) + COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'}) + else: + operating_system = flavor + if flavor == 'android': + operating_system = 'linux' # Keep this legacy behavior for now. + default_variables.setdefault('OS', operating_system) + if flavor == 'aix': + default_variables.setdefault('SHARED_LIB_SUFFIX', '.a') + else: + default_variables.setdefault('SHARED_LIB_SUFFIX', '.so') + default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)') + default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)') + + +def CalculateGeneratorInputInfo(params): + """Calculate the generator specific info that gets fed to input (called by + gyp).""" + generator_flags = params.get('generator_flags', {}) + android_ndk_version = generator_flags.get('android_ndk_version', None) + # Android NDK requires a strict link order. + if android_ndk_version: + global generator_wants_sorted_dependencies + generator_wants_sorted_dependencies = True + + output_dir = params['options'].generator_output or \ + params['options'].toplevel_dir + builddir_name = generator_flags.get('output_dir', 'out') + qualified_out_dir = os.path.normpath(os.path.join( + output_dir, builddir_name, 'gypfiles')) + + global generator_filelist_paths + generator_filelist_paths = { + 'toplevel': params['options'].toplevel_dir, + 'qualified_out_dir': qualified_out_dir, + } + + +# The .d checking code below uses these functions: +# wildcard, sort, foreach, shell, wordlist +# wildcard can handle spaces, the rest can't. +# Since I could find no way to make foreach work with spaces in filenames +# correctly, the .d files have spaces replaced with another character. The .d +# file for +# Chromium\ Framework.framework/foo +# is for example +# out/Release/.deps/out/Release/Chromium?Framework.framework/foo +# This is the replacement character. +SPACE_REPLACEMENT = '?' + + +LINK_COMMANDS_LINUX = """\ +quiet_cmd_alink = AR($(TOOLSET)) $@ +cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^) + +quiet_cmd_alink_thin = AR($(TOOLSET)) $@ +cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^) + +# Due to circular dependencies between libraries :(, we wrap the +# special "figure out circular dependencies" flags around the entire +# input list during linking. +quiet_cmd_link = LINK($(TOOLSET)) $@ +cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS) + +# We support two kinds of shared objects (.so): +# 1) shared_library, which is just bundling together many dependent libraries +# into a link line. +# 2) loadable_module, which is generating a module intended for dlopen(). +# +# They differ only slightly: +# In the former case, we want to package all dependent code into the .so. +# In the latter case, we want to package just the API exposed by the +# outermost module. +# This means shared_library uses --whole-archive, while loadable_module doesn't. +# (Note that --whole-archive is incompatible with the --start-group used in +# normal linking.) + +# Other shared-object link notes: +# - Set SONAME to the library filename so our binaries don't reference +# the local, absolute paths used on the link command-line. +quiet_cmd_solink = SOLINK($(TOOLSET)) $@ +cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS) + +quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ +cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS) +""" + +LINK_COMMANDS_MAC = """\ +quiet_cmd_alink = LIBTOOL-STATIC $@ +cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^) + +quiet_cmd_link = LINK($(TOOLSET)) $@ +cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS) + +quiet_cmd_solink = SOLINK($(TOOLSET)) $@ +cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS) + +quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ +cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) +""" + +LINK_COMMANDS_ANDROID = """\ +quiet_cmd_alink = AR($(TOOLSET)) $@ +cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^) + +quiet_cmd_alink_thin = AR($(TOOLSET)) $@ +cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^) + +# Due to circular dependencies between libraries :(, we wrap the +# special "figure out circular dependencies" flags around the entire +# input list during linking. +quiet_cmd_link = LINK($(TOOLSET)) $@ +quiet_cmd_link_host = LINK($(TOOLSET)) $@ +cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS) +cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) + +# Other shared-object link notes: +# - Set SONAME to the library filename so our binaries don't reference +# the local, absolute paths used on the link command-line. +quiet_cmd_solink = SOLINK($(TOOLSET)) $@ +cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS) + +quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ +cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS) +quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@ +cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) +""" + + +LINK_COMMANDS_AIX = """\ +quiet_cmd_alink = AR($(TOOLSET)) $@ +cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^) + +quiet_cmd_alink_thin = AR($(TOOLSET)) $@ +cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^) + +quiet_cmd_link = LINK($(TOOLSET)) $@ +cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) + +quiet_cmd_solink = SOLINK($(TOOLSET)) $@ +cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) + +quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ +cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) +""" + + +# Header of toplevel Makefile. +# This should go into the build tree, but it's easier to keep it here for now. +SHARED_HEADER = ("""\ +# We borrow heavily from the kernel build setup, though we are simpler since +# we don't have Kconfig tweaking settings on us. + +# The implicit make rules have it looking for RCS files, among other things. +# We instead explicitly write all the rules we care about. +# It's even quicker (saves ~200ms) to pass -r on the command line. +MAKEFLAGS=-r + +# The source directory tree. +srcdir := %(srcdir)s +abs_srcdir := $(abspath $(srcdir)) + +# The name of the builddir. +builddir_name ?= %(builddir)s + +# The V=1 flag on command line makes us verbosely print command lines. +ifdef V + quiet= +else + quiet=quiet_ +endif + +# Specify BUILDTYPE=Release on the command line for a release build. +BUILDTYPE ?= %(default_configuration)s + +# Directory all our build output goes into. +# Note that this must be two directories beneath src/ for unit tests to pass, +# as they reach into the src/ directory for data with relative paths. +builddir ?= $(builddir_name)/$(BUILDTYPE) +abs_builddir := $(abspath $(builddir)) +depsdir := $(builddir)/.deps + +# Object output directory. +obj := $(builddir)/obj +abs_obj := $(abspath $(obj)) + +# We build up a list of every single one of the targets so we can slurp in the +# generated dependency rule Makefiles in one pass. +all_deps := + +%(make_global_settings)s + +CC.target ?= %(CC.target)s +CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS) +CXX.target ?= %(CXX.target)s +CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS) +LINK.target ?= %(LINK.target)s +LDFLAGS.target ?= $(LDFLAGS) +AR.target ?= $(AR) + +# C++ apps need to be linked with g++. +LINK ?= $(CXX.target) + +# TODO(evan): move all cross-compilation logic to gyp-time so we don't need +# to replicate this environment fallback in make as well. +CC.host ?= %(CC.host)s +CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host) +CXX.host ?= %(CXX.host)s +CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host) +LINK.host ?= %(LINK.host)s +LDFLAGS.host ?= +AR.host ?= %(AR.host)s + +# Define a dir function that can handle spaces. +# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions +# "leading spaces cannot appear in the text of the first argument as written. +# These characters can be put into the argument value by variable substitution." +empty := +space := $(empty) $(empty) + +# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces +replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1) +unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1) +dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1))) + +# Flags to make gcc output dependency info. Note that you need to be +# careful here to use the flags that ccache and distcc can understand. +# We write to a dep file on the side first and then rename at the end +# so we can't end up with a broken dep file. +depfile = $(depsdir)/$(call replace_spaces,$@).d +DEPFLAGS = -MMD -MF $(depfile).raw + +# We have to fixup the deps output in a few ways. +# (1) the file output should mention the proper .o file. +# ccache or distcc lose the path to the target, so we convert a rule of +# the form: +# foobar.o: DEP1 DEP2 +# into +# path/to/foobar.o: DEP1 DEP2 +# (2) we want missing files not to cause us to fail to build. +# We want to rewrite +# foobar.o: DEP1 DEP2 \\ +# DEP3 +# to +# DEP1: +# DEP2: +# DEP3: +# so if the files are missing, they're just considered phony rules. +# We have to do some pretty insane escaping to get those backslashes +# and dollar signs past make, the shell, and sed at the same time. +# Doesn't work with spaces, but that's fine: .d files have spaces in +# their names replaced with other characters.""" +r""" +define fixup_dep +# The depfile may not exist if the input file didn't have any #includes. +touch $(depfile).raw +# Fixup path as in (1). +sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile) +# Add extra rules as in (2). +# We remove slashes and replace spaces with new lines; +# remove blank lines; +# delete the first line and append a colon to the remaining lines. +sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\ + grep -v '^$$' |\ + sed -e 1d -e 's|$$|:|' \ + >> $(depfile) +rm $(depfile).raw +endef +""" +""" +# Command definitions: +# - cmd_foo is the actual command to run; +# - quiet_cmd_foo is the brief-output summary of the command. + +quiet_cmd_cc = CC($(TOOLSET)) $@ +cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $< + +quiet_cmd_cxx = CXX($(TOOLSET)) $@ +cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< +%(extra_commands)s +quiet_cmd_touch = TOUCH $@ +cmd_touch = touch $@ + +quiet_cmd_copy = COPY $@ +# send stderr to /dev/null to ignore messages when linking directories. +cmd_copy = ln -f "$<" "$@" 2>/dev/null || (rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@") + +%(link_commands)s +""" + +r""" +# Define an escape_quotes function to escape single quotes. +# This allows us to handle quotes properly as long as we always use +# use single quotes and escape_quotes. +escape_quotes = $(subst ','\'',$(1)) +# This comment is here just to include a ' to unconfuse syntax highlighting. +# Define an escape_vars function to escape '$' variable syntax. +# This allows us to read/write command lines with shell variables (e.g. +# $LD_LIBRARY_PATH), without triggering make substitution. +escape_vars = $(subst $$,$$$$,$(1)) +# Helper that expands to a shell command to echo a string exactly as it is in +# make. This uses printf instead of echo because printf's behaviour with respect +# to escape sequences is more portable than echo's across different shells +# (e.g., dash, bash). +exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))' +""" +""" +# Helper to compare the command we're about to run against the command +# we logged the last time we ran the command. Produces an empty +# string (false) when the commands match. +# Tricky point: Make has no string-equality test function. +# The kernel uses the following, but it seems like it would have false +# positives, where one string reordered its arguments. +# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\ +# $(filter-out $(cmd_$@), $(cmd_$(1)))) +# We instead substitute each for the empty string into the other, and +# say they're equal if both substitutions produce the empty string. +# .d files contain """ + SPACE_REPLACEMENT + \ + """ instead of spaces, take that into account. +command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\ + $(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1)))) + +# Helper that is non-empty when a prerequisite changes. +# Normally make does this implicitly, but we force rules to always run +# so we can check their command lines. +# $? -- new prerequisites +# $| -- order-only dependencies +prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?)) + +# Helper that executes all postbuilds until one fails. +define do_postbuilds + @E=0;\\ + for p in $(POSTBUILDS); do\\ + eval $$p;\\ + E=$$?;\\ + if [ $$E -ne 0 ]; then\\ + break;\\ + fi;\\ + done;\\ + if [ $$E -ne 0 ]; then\\ + rm -rf "$@";\\ + exit $$E;\\ + fi +endef + +# do_cmd: run a command via the above cmd_foo names, if necessary. +# Should always run for a given target to handle command-line changes. +# Second argument, if non-zero, makes it do asm/C/C++ dependency munging. +# Third argument, if non-zero, makes it do POSTBUILDS processing. +# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \ + SPACE_REPLACEMENT + """ for +# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \ + """ characters. +define do_cmd +$(if $(or $(command_changed),$(prereq_changed)), + @$(call exact_echo, $($(quiet)cmd_$(1))) + @mkdir -p "$(call dirx,$@)" "$(dir $(depfile))" + $(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))), + @$(cmd_$(1)) + @echo " $(quiet_cmd_$(1)): Finished", + @$(cmd_$(1)) + ) + @$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile) + @$(if $(2),$(fixup_dep)) + $(if $(and $(3), $(POSTBUILDS)), + $(call do_postbuilds) + ) +) +endef + +# Declare the "%(default_target)s" target first so it is the default, +# even though we don't have the deps yet. +.PHONY: %(default_target)s +%(default_target)s: + +# make looks for ways to re-generate included makefiles, but in our case, we +# don't have a direct way. Explicitly telling make that it has nothing to do +# for them makes it go faster. +%%.d: ; + +# Use FORCE_DO_CMD to force a target to run. Should be coupled with +# do_cmd. +.PHONY: FORCE_DO_CMD +FORCE_DO_CMD: + +""") + +SHARED_HEADER_MAC_COMMANDS = """ +quiet_cmd_objc = CXX($(TOOLSET)) $@ +cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $< + +quiet_cmd_objcxx = CXX($(TOOLSET)) $@ +cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $< + +# Commands for precompiled header files. +quiet_cmd_pch_c = CXX($(TOOLSET)) $@ +cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< +quiet_cmd_pch_cc = CXX($(TOOLSET)) $@ +cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $< +quiet_cmd_pch_m = CXX($(TOOLSET)) $@ +cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $< +quiet_cmd_pch_mm = CXX($(TOOLSET)) $@ +cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $< + +# gyp-mac-tool is written next to the root Makefile by gyp. +# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd +# already. +quiet_cmd_mac_tool = MACTOOL $(4) $< +cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@" + +quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@ +cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4) + +quiet_cmd_infoplist = INFOPLIST $@ +cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@" +""" + + +def WriteRootHeaderSuffixRules(writer): + extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower) + + writer.write('# Suffix rules, putting all outputs into $(obj).\n') + for ext in extensions: + writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext) + writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext]) + + writer.write('\n# Try building from generated source, too.\n') + for ext in extensions: + writer.write( + '$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext) + writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext]) + writer.write('\n') + for ext in extensions: + writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext) + writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext]) + writer.write('\n') + + +SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\ +# Suffix rules, putting all outputs into $(obj). +""") + + +SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\ +# Try building from generated source, too. +""") + + +SHARED_FOOTER = """\ +# "all" is a concatenation of the "all" targets from all the included +# sub-makefiles. This is just here to clarify. +all: + +# Add in dependency-tracking rules. $(all_deps) is the list of every single +# target in our tree. Only consider the ones with .d (dependency) info: +d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d)) +ifneq ($(d_files),) + include $(d_files) +endif +""" + +header = """\ +# This file is generated by gyp; do not edit. + +""" + +# Maps every compilable file extension to the do_cmd that compiles it. +COMPILABLE_EXTENSIONS = { + '.c': 'cc', + '.cc': 'cxx', + '.cpp': 'cxx', + '.cxx': 'cxx', + '.s': 'cc', + '.S': 'cc', +} + +def Compilable(filename): + """Return true if the file is compilable (should be in OBJS).""" + for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS): + if res: + return True + return False + + +def Linkable(filename): + """Return true if the file is linkable (should be on the link line).""" + return filename.endswith('.o') + + +def Target(filename): + """Translate a compilable filename to its .o target.""" + return os.path.splitext(filename)[0] + '.o' + + +def EscapeShellArgument(s): + """Quotes an argument so that it will be interpreted literally by a POSIX + shell. Taken from + http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python + """ + return "'" + s.replace("'", "'\\''") + "'" + + +def EscapeMakeVariableExpansion(s): + """Make has its own variable expansion syntax using $. We must escape it for + string to be interpreted literally.""" + return s.replace('$', '$$') + + +def EscapeCppDefine(s): + """Escapes a CPP define so that it will reach the compiler unaltered.""" + s = EscapeShellArgument(s) + s = EscapeMakeVariableExpansion(s) + # '#' characters must be escaped even embedded in a string, else Make will + # treat it as the start of a comment. + return s.replace('#', r'\#') + + +def QuoteIfNecessary(string): + """TODO: Should this ideally be replaced with one or more of the above + functions?""" + if '"' in string: + string = '"' + string.replace('"', '\\"') + '"' + return string + + +def StringToMakefileVariable(string): + """Convert a string to a value that is acceptable as a make variable name.""" + return re.sub('[^a-zA-Z0-9_]', '_', string) + + +srcdir_prefix = '' +def Sourceify(path): + """Convert a path to its source directory form.""" + if '$(' in path: + return path + if os.path.isabs(path): + return path + return srcdir_prefix + path + + +def QuoteSpaces(s, quote=r'\ '): + return s.replace(' ', quote) + + +# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py. +def _ValidateSourcesForOSX(spec, all_sources): + """Makes sure if duplicate basenames are not specified in the source list. + + Arguments: + spec: The target dictionary containing the properties of the target. + """ + if spec.get('type', None) != 'static_library': + return + + basenames = {} + for source in all_sources: + name, ext = os.path.splitext(source) + is_compiled_file = ext in [ + '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S'] + if not is_compiled_file: + continue + basename = os.path.basename(name) # Don't include extension. + basenames.setdefault(basename, []).append(source) + + error = '' + for basename, files in basenames.iteritems(): + if len(files) > 1: + error += ' %s: %s\n' % (basename, ' '.join(files)) + + if error: + print('static library %s has several files with the same basename:\n' % + spec['target_name'] + error + 'libtool on OS X will generate' + + ' warnings for them.') + raise GypError('Duplicate basenames in sources section, see list above') + + +# Map from qualified target to path to output. +target_outputs = {} +# Map from qualified target to any linkable output. A subset +# of target_outputs. E.g. when mybinary depends on liba, we want to +# include liba in the linker line; when otherbinary depends on +# mybinary, we just want to build mybinary first. +target_link_deps = {} + + +class MakefileWriter(object): + """MakefileWriter packages up the writing of one target-specific foobar.mk. + + Its only real entry point is Write(), and is mostly used for namespacing. + """ + + def __init__(self, generator_flags, flavor): + self.generator_flags = generator_flags + self.flavor = flavor + + self.suffix_rules_srcdir = {} + self.suffix_rules_objdir1 = {} + self.suffix_rules_objdir2 = {} + + # Generate suffix rules for all compilable extensions. + for ext in COMPILABLE_EXTENSIONS.keys(): + # Suffix rules for source folder. + self.suffix_rules_srcdir.update({ext: ("""\ +$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD + @$(call do_cmd,%s,1) +""" % (ext, COMPILABLE_EXTENSIONS[ext]))}) + + # Suffix rules for generated source files. + self.suffix_rules_objdir1.update({ext: ("""\ +$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD + @$(call do_cmd,%s,1) +""" % (ext, COMPILABLE_EXTENSIONS[ext]))}) + self.suffix_rules_objdir2.update({ext: ("""\ +$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD + @$(call do_cmd,%s,1) +""" % (ext, COMPILABLE_EXTENSIONS[ext]))}) + + + def Write(self, qualified_target, base_path, output_filename, spec, configs, + part_of_all): + """The main entry point: writes a .mk file for a single target. + + Arguments: + qualified_target: target we're generating + base_path: path relative to source root we're building in, used to resolve + target-relative paths + output_filename: output .mk file name to write + spec, configs: gyp info + part_of_all: flag indicating this target is part of 'all' + """ + gyp.common.EnsureDirExists(output_filename) + + self.fp = open(output_filename, 'w') + + self.fp.write(header) + + self.qualified_target = qualified_target + self.path = base_path + self.target = spec['target_name'] + self.type = spec['type'] + self.toolset = spec['toolset'] + + self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec) + if self.flavor == 'mac': + self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec) + else: + self.xcode_settings = None + + deps, link_deps = self.ComputeDeps(spec) + + # Some of the generation below can add extra output, sources, or + # link dependencies. All of the out params of the functions that + # follow use names like extra_foo. + extra_outputs = [] + extra_sources = [] + extra_link_deps = [] + extra_mac_bundle_resources = [] + mac_bundle_deps = [] + + if self.is_mac_bundle: + self.output = self.ComputeMacBundleOutput(spec) + self.output_binary = self.ComputeMacBundleBinaryOutput(spec) + else: + self.output = self.output_binary = self.ComputeOutput(spec) + + self.is_standalone_static_library = bool( + spec.get('standalone_static_library', 0)) + self._INSTALLABLE_TARGETS = ('executable', 'loadable_module', + 'shared_library') + if (self.is_standalone_static_library or + self.type in self._INSTALLABLE_TARGETS): + self.alias = os.path.basename(self.output) + install_path = self._InstallableTargetInstallPath() + else: + self.alias = self.output + install_path = self.output + + self.WriteLn("TOOLSET := " + self.toolset) + self.WriteLn("TARGET := " + self.target) + + # Actions must come first, since they can generate more OBJs for use below. + if 'actions' in spec: + self.WriteActions(spec['actions'], extra_sources, extra_outputs, + extra_mac_bundle_resources, part_of_all) + + # Rules must be early like actions. + if 'rules' in spec: + self.WriteRules(spec['rules'], extra_sources, extra_outputs, + extra_mac_bundle_resources, part_of_all) + + if 'copies' in spec: + self.WriteCopies(spec['copies'], extra_outputs, part_of_all) + + # Bundle resources. + if self.is_mac_bundle: + all_mac_bundle_resources = ( + spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources) + self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps) + self.WriteMacInfoPlist(mac_bundle_deps) + + # Sources. + all_sources = spec.get('sources', []) + extra_sources + if all_sources: + if self.flavor == 'mac': + # libtool on OS X generates warnings for duplicate basenames in the same + # target. + _ValidateSourcesForOSX(spec, all_sources) + self.WriteSources( + configs, deps, all_sources, extra_outputs, + extra_link_deps, part_of_all, + gyp.xcode_emulation.MacPrefixHeader( + self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)), + self.Pchify)) + sources = filter(Compilable, all_sources) + if sources: + self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1) + extensions = set([os.path.splitext(s)[1] for s in sources]) + for ext in extensions: + if ext in self.suffix_rules_srcdir: + self.WriteLn(self.suffix_rules_srcdir[ext]) + self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2) + for ext in extensions: + if ext in self.suffix_rules_objdir1: + self.WriteLn(self.suffix_rules_objdir1[ext]) + for ext in extensions: + if ext in self.suffix_rules_objdir2: + self.WriteLn(self.suffix_rules_objdir2[ext]) + self.WriteLn('# End of this set of suffix rules') + + # Add dependency from bundle to bundle binary. + if self.is_mac_bundle: + mac_bundle_deps.append(self.output_binary) + + self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps, + mac_bundle_deps, extra_outputs, part_of_all) + + # Update global list of target outputs, used in dependency tracking. + target_outputs[qualified_target] = install_path + + # Update global list of link dependencies. + if self.type in ('static_library', 'shared_library'): + target_link_deps[qualified_target] = self.output_binary + + # Currently any versions have the same effect, but in future the behavior + # could be different. + if self.generator_flags.get('android_ndk_version', None): + self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps) + + self.fp.close() + + + def WriteSubMake(self, output_filename, makefile_path, targets, build_dir): + """Write a "sub-project" Makefile. + + This is a small, wrapper Makefile that calls the top-level Makefile to build + the targets from a single gyp file (i.e. a sub-project). + + Arguments: + output_filename: sub-project Makefile name to write + makefile_path: path to the top-level Makefile + targets: list of "all" targets for this sub-project + build_dir: build output directory, relative to the sub-project + """ + gyp.common.EnsureDirExists(output_filename) + self.fp = open(output_filename, 'w') + self.fp.write(header) + # For consistency with other builders, put sub-project build output in the + # sub-project dir (see test/subdirectory/gyptest-subdir-all.py). + self.WriteLn('export builddir_name ?= %s' % + os.path.join(os.path.dirname(output_filename), build_dir)) + self.WriteLn('.PHONY: all') + self.WriteLn('all:') + if makefile_path: + makefile_path = ' -C ' + makefile_path + self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets))) + self.fp.close() + + + def WriteActions(self, actions, extra_sources, extra_outputs, + extra_mac_bundle_resources, part_of_all): + """Write Makefile code for any 'actions' from the gyp input. + + extra_sources: a list that will be filled in with newly generated source + files, if any + extra_outputs: a list that will be filled in with any outputs of these + actions (used to make other pieces dependent on these + actions) + part_of_all: flag indicating this target is part of 'all' + """ + env = self.GetSortedXcodeEnv() + for action in actions: + name = StringToMakefileVariable('%s_%s' % (self.qualified_target, + action['action_name'])) + self.WriteLn('### Rules for action "%s":' % action['action_name']) + inputs = action['inputs'] + outputs = action['outputs'] + + # Build up a list of outputs. + # Collect the output dirs we'll need. + dirs = set() + for out in outputs: + dir = os.path.split(out)[0] + if dir: + dirs.add(dir) + if int(action.get('process_outputs_as_sources', False)): + extra_sources += outputs + if int(action.get('process_outputs_as_mac_bundle_resources', False)): + extra_mac_bundle_resources += outputs + + # Write the actual command. + action_commands = action['action'] + if self.flavor == 'mac': + action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env) + for command in action_commands] + command = gyp.common.EncodePOSIXShellList(action_commands) + if 'message' in action: + self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message'])) + else: + self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name)) + if len(dirs) > 0: + command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command + + cd_action = 'cd %s; ' % Sourceify(self.path or '.') + + # command and cd_action get written to a toplevel variable called + # cmd_foo. Toplevel variables can't handle things that change per + # makefile like $(TARGET), so hardcode the target. + command = command.replace('$(TARGET)', self.target) + cd_action = cd_action.replace('$(TARGET)', self.target) + + # Set LD_LIBRARY_PATH in case the action runs an executable from this + # build which links to shared libs from this build. + # actions run on the host, so they should in theory only use host + # libraries, but until everything is made cross-compile safe, also use + # target libraries. + # TODO(piman): when everything is cross-compile safe, remove lib.target + self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:' + '$(builddir)/lib.target:$$LD_LIBRARY_PATH; ' + 'export LD_LIBRARY_PATH; ' + '%s%s' + % (name, cd_action, command)) + self.WriteLn() + outputs = map(self.Absolutify, outputs) + # The makefile rules are all relative to the top dir, but the gyp actions + # are defined relative to their containing dir. This replaces the obj + # variable for the action rule with an absolute version so that the output + # goes in the right place. + # Only write the 'obj' and 'builddir' rules for the "primary" output (:1); + # it's superfluous for the "extra outputs", and this avoids accidentally + # writing duplicate dummy rules for those outputs. + # Same for environment. + self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0])) + self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0])) + self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv()) + + for input in inputs: + assert ' ' not in input, ( + "Spaces in action input filenames not supported (%s)" % input) + for output in outputs: + assert ' ' not in output, ( + "Spaces in action output filenames not supported (%s)" % output) + + # See the comment in WriteCopies about expanding env vars. + outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs] + inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs] + + self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)), + part_of_all=part_of_all, command=name) + + # Stuff the outputs in a variable so we can refer to them later. + outputs_variable = 'action_%s_outputs' % name + self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs))) + extra_outputs.append('$(%s)' % outputs_variable) + self.WriteLn() + + self.WriteLn() + + + def WriteRules(self, rules, extra_sources, extra_outputs, + extra_mac_bundle_resources, part_of_all): + """Write Makefile code for any 'rules' from the gyp input. + + extra_sources: a list that will be filled in with newly generated source + files, if any + extra_outputs: a list that will be filled in with any outputs of these + rules (used to make other pieces dependent on these rules) + part_of_all: flag indicating this target is part of 'all' + """ + env = self.GetSortedXcodeEnv() + for rule in rules: + name = StringToMakefileVariable('%s_%s' % (self.qualified_target, + rule['rule_name'])) + count = 0 + self.WriteLn('### Generated for rule %s:' % name) + + all_outputs = [] + + for rule_source in rule.get('rule_sources', []): + dirs = set() + (rule_source_dirname, rule_source_basename) = os.path.split(rule_source) + (rule_source_root, rule_source_ext) = \ + os.path.splitext(rule_source_basename) + + outputs = [self.ExpandInputRoot(out, rule_source_root, + rule_source_dirname) + for out in rule['outputs']] + + for out in outputs: + dir = os.path.dirname(out) + if dir: + dirs.add(dir) + if int(rule.get('process_outputs_as_sources', False)): + extra_sources += outputs + if int(rule.get('process_outputs_as_mac_bundle_resources', False)): + extra_mac_bundle_resources += outputs + inputs = map(Sourceify, map(self.Absolutify, [rule_source] + + rule.get('inputs', []))) + actions = ['$(call do_cmd,%s_%d)' % (name, count)] + + if name == 'resources_grit': + # HACK: This is ugly. Grit intentionally doesn't touch the + # timestamp of its output file when the file doesn't change, + # which is fine in hash-based dependency systems like scons + # and forge, but not kosher in the make world. After some + # discussion, hacking around it here seems like the least + # amount of pain. + actions += ['@touch --no-create $@'] + + # See the comment in WriteCopies about expanding env vars. + outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs] + inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs] + + outputs = map(self.Absolutify, outputs) + all_outputs += outputs + # Only write the 'obj' and 'builddir' rules for the "primary" output + # (:1); it's superfluous for the "extra outputs", and this avoids + # accidentally writing duplicate dummy rules for those outputs. + self.WriteLn('%s: obj := $(abs_obj)' % outputs[0]) + self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0]) + self.WriteMakeRule(outputs, inputs, actions, + command="%s_%d" % (name, count)) + # Spaces in rule filenames are not supported, but rule variables have + # spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)'). + # The spaces within the variables are valid, so remove the variables + # before checking. + variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)') + for output in outputs: + output = re.sub(variables_with_spaces, '', output) + assert ' ' not in output, ( + "Spaces in rule filenames not yet supported (%s)" % output) + self.WriteLn('all_deps += %s' % ' '.join(outputs)) + + action = [self.ExpandInputRoot(ac, rule_source_root, + rule_source_dirname) + for ac in rule['action']] + mkdirs = '' + if len(dirs) > 0: + mkdirs = 'mkdir -p %s; ' % ' '.join(dirs) + cd_action = 'cd %s; ' % Sourceify(self.path or '.') + + # action, cd_action, and mkdirs get written to a toplevel variable + # called cmd_foo. Toplevel variables can't handle things that change + # per makefile like $(TARGET), so hardcode the target. + if self.flavor == 'mac': + action = [gyp.xcode_emulation.ExpandEnvVars(command, env) + for command in action] + action = gyp.common.EncodePOSIXShellList(action) + action = action.replace('$(TARGET)', self.target) + cd_action = cd_action.replace('$(TARGET)', self.target) + mkdirs = mkdirs.replace('$(TARGET)', self.target) + + # Set LD_LIBRARY_PATH in case the rule runs an executable from this + # build which links to shared libs from this build. + # rules run on the host, so they should in theory only use host + # libraries, but until everything is made cross-compile safe, also use + # target libraries. + # TODO(piman): when everything is cross-compile safe, remove lib.target + self.WriteLn( + "cmd_%(name)s_%(count)d = LD_LIBRARY_PATH=" + "$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; " + "export LD_LIBRARY_PATH; " + "%(cd_action)s%(mkdirs)s%(action)s" % { + 'action': action, + 'cd_action': cd_action, + 'count': count, + 'mkdirs': mkdirs, + 'name': name, + }) + self.WriteLn( + 'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % { + 'count': count, + 'name': name, + }) + self.WriteLn() + count += 1 + + outputs_variable = 'rule_%s_outputs' % name + self.WriteList(all_outputs, outputs_variable) + extra_outputs.append('$(%s)' % outputs_variable) + + self.WriteLn('### Finished generating for rule: %s' % name) + self.WriteLn() + self.WriteLn('### Finished generating for all rules') + self.WriteLn('') + + + def WriteCopies(self, copies, extra_outputs, part_of_all): + """Write Makefile code for any 'copies' from the gyp input. + + extra_outputs: a list that will be filled in with any outputs of this action + (used to make other pieces dependent on this action) + part_of_all: flag indicating this target is part of 'all' + """ + self.WriteLn('### Generated for copy rule.') + + variable = StringToMakefileVariable(self.qualified_target + '_copies') + outputs = [] + for copy in copies: + for path in copy['files']: + # Absolutify() may call normpath, and will strip trailing slashes. + path = Sourceify(self.Absolutify(path)) + filename = os.path.split(path)[1] + output = Sourceify(self.Absolutify(os.path.join(copy['destination'], + filename))) + + # If the output path has variables in it, which happens in practice for + # 'copies', writing the environment as target-local doesn't work, + # because the variables are already needed for the target name. + # Copying the environment variables into global make variables doesn't + # work either, because then the .d files will potentially contain spaces + # after variable expansion, and .d file handling cannot handle spaces. + # As a workaround, manually expand variables at gyp time. Since 'copies' + # can't run scripts, there's no need to write the env then. + # WriteDoCmd() will escape spaces for .d files. + env = self.GetSortedXcodeEnv() + output = gyp.xcode_emulation.ExpandEnvVars(output, env) + path = gyp.xcode_emulation.ExpandEnvVars(path, env) + self.WriteDoCmd([output], [path], 'copy', part_of_all) + outputs.append(output) + self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs)))) + extra_outputs.append('$(%s)' % variable) + self.WriteLn() + + + def WriteMacBundleResources(self, resources, bundle_deps): + """Writes Makefile code for 'mac_bundle_resources'.""" + self.WriteLn('### Generated for mac_bundle_resources') + + for output, res in gyp.xcode_emulation.GetMacBundleResources( + generator_default_variables['PRODUCT_DIR'], self.xcode_settings, + map(Sourceify, map(self.Absolutify, resources))): + _, ext = os.path.splitext(output) + if ext != '.xcassets': + # Make does not supports '.xcassets' emulation. + self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource', + part_of_all=True) + bundle_deps.append(output) + + + def WriteMacInfoPlist(self, bundle_deps): + """Write Makefile code for bundle Info.plist files.""" + info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist( + generator_default_variables['PRODUCT_DIR'], self.xcode_settings, + lambda p: Sourceify(self.Absolutify(p))) + if not info_plist: + return + if defines: + # Create an intermediate file to store preprocessed results. + intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' + + os.path.basename(info_plist)) + self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D', + quoter=EscapeCppDefine) + self.WriteMakeRule([intermediate_plist], [info_plist], + ['$(call do_cmd,infoplist)', + # "Convert" the plist so that any weird whitespace changes from the + # preprocessor do not affect the XML parser in mac_tool. + '@plutil -convert xml1 $@ $@']) + info_plist = intermediate_plist + # plists can contain envvars and substitute them into the file. + self.WriteSortedXcodeEnv( + out, self.GetSortedXcodeEnv(additional_settings=extra_env)) + self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist', + part_of_all=True) + bundle_deps.append(out) + + + def WriteSources(self, configs, deps, sources, + extra_outputs, extra_link_deps, + part_of_all, precompiled_header): + """Write Makefile code for any 'sources' from the gyp input. + These are source files necessary to build the current target. + + configs, deps, sources: input from gyp. + extra_outputs: a list of extra outputs this action should be dependent on; + used to serialize action/rules before compilation + extra_link_deps: a list that will be filled in with any outputs of + compilation (to be used in link lines) + part_of_all: flag indicating this target is part of 'all' + """ + + # Write configuration-specific variables for CFLAGS, etc. + for configname in sorted(configs.keys()): + config = configs[configname] + self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D', + quoter=EscapeCppDefine) + + if self.flavor == 'mac': + cflags = self.xcode_settings.GetCflags(configname) + cflags_c = self.xcode_settings.GetCflagsC(configname) + cflags_cc = self.xcode_settings.GetCflagsCC(configname) + cflags_objc = self.xcode_settings.GetCflagsObjC(configname) + cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname) + else: + cflags = config.get('cflags') + cflags_c = config.get('cflags_c') + cflags_cc = config.get('cflags_cc') + + self.WriteLn("# Flags passed to all source files."); + self.WriteList(cflags, 'CFLAGS_%s' % configname) + self.WriteLn("# Flags passed to only C files."); + self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname) + self.WriteLn("# Flags passed to only C++ files."); + self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname) + if self.flavor == 'mac': + self.WriteLn("# Flags passed to only ObjC files."); + self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname) + self.WriteLn("# Flags passed to only ObjC++ files."); + self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname) + includes = config.get('include_dirs') + if includes: + includes = map(Sourceify, map(self.Absolutify, includes)) + self.WriteList(includes, 'INCS_%s' % configname, prefix='-I') + + compilable = filter(Compilable, sources) + objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable))) + self.WriteList(objs, 'OBJS') + + for obj in objs: + assert ' ' not in obj, ( + "Spaces in object filenames not supported (%s)" % obj) + self.WriteLn('# Add to the list of files we specially track ' + 'dependencies for.') + self.WriteLn('all_deps += $(OBJS)') + self.WriteLn() + + # Make sure our dependencies are built first. + if deps: + self.WriteMakeRule(['$(OBJS)'], deps, + comment = 'Make sure our dependencies are built ' + 'before any of us.', + order_only = True) + + # Make sure the actions and rules run first. + # If they generate any extra headers etc., the per-.o file dep tracking + # will catch the proper rebuilds, so order only is still ok here. + if extra_outputs: + self.WriteMakeRule(['$(OBJS)'], extra_outputs, + comment = 'Make sure our actions/rules run ' + 'before any of us.', + order_only = True) + + pchdeps = precompiled_header.GetObjDependencies(compilable, objs ) + if pchdeps: + self.WriteLn('# Dependencies from obj files to their precompiled headers') + for source, obj, gch in pchdeps: + self.WriteLn('%s: %s' % (obj, gch)) + self.WriteLn('# End precompiled header dependencies') + + if objs: + extra_link_deps.append('$(OBJS)') + self.WriteLn("""\ +# CFLAGS et al overrides must be target-local. +# See "Target-specific Variable Values" in the GNU Make manual.""") + self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)") + self.WriteLn("$(OBJS): GYP_CFLAGS := " + "$(DEFS_$(BUILDTYPE)) " + "$(INCS_$(BUILDTYPE)) " + "%s " % precompiled_header.GetInclude('c') + + "$(CFLAGS_$(BUILDTYPE)) " + "$(CFLAGS_C_$(BUILDTYPE))") + self.WriteLn("$(OBJS): GYP_CXXFLAGS := " + "$(DEFS_$(BUILDTYPE)) " + "$(INCS_$(BUILDTYPE)) " + "%s " % precompiled_header.GetInclude('cc') + + "$(CFLAGS_$(BUILDTYPE)) " + "$(CFLAGS_CC_$(BUILDTYPE))") + if self.flavor == 'mac': + self.WriteLn("$(OBJS): GYP_OBJCFLAGS := " + "$(DEFS_$(BUILDTYPE)) " + "$(INCS_$(BUILDTYPE)) " + "%s " % precompiled_header.GetInclude('m') + + "$(CFLAGS_$(BUILDTYPE)) " + "$(CFLAGS_C_$(BUILDTYPE)) " + "$(CFLAGS_OBJC_$(BUILDTYPE))") + self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := " + "$(DEFS_$(BUILDTYPE)) " + "$(INCS_$(BUILDTYPE)) " + "%s " % precompiled_header.GetInclude('mm') + + "$(CFLAGS_$(BUILDTYPE)) " + "$(CFLAGS_CC_$(BUILDTYPE)) " + "$(CFLAGS_OBJCC_$(BUILDTYPE))") + + self.WritePchTargets(precompiled_header.GetPchBuildCommands()) + + # If there are any object files in our input file list, link them into our + # output. + extra_link_deps += filter(Linkable, sources) + + self.WriteLn() + + def WritePchTargets(self, pch_commands): + """Writes make rules to compile prefix headers.""" + if not pch_commands: + return + + for gch, lang_flag, lang, input in pch_commands: + extra_flags = { + 'c': '$(CFLAGS_C_$(BUILDTYPE))', + 'cc': '$(CFLAGS_CC_$(BUILDTYPE))', + 'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))', + 'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))', + }[lang] + var_name = { + 'c': 'GYP_PCH_CFLAGS', + 'cc': 'GYP_PCH_CXXFLAGS', + 'm': 'GYP_PCH_OBJCFLAGS', + 'mm': 'GYP_PCH_OBJCXXFLAGS', + }[lang] + self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) + + "$(DEFS_$(BUILDTYPE)) " + "$(INCS_$(BUILDTYPE)) " + "$(CFLAGS_$(BUILDTYPE)) " + + extra_flags) + + self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input)) + self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang) + self.WriteLn('') + assert ' ' not in gch, ( + "Spaces in gch filenames not supported (%s)" % gch) + self.WriteLn('all_deps += %s' % gch) + self.WriteLn('') + + + def ComputeOutputBasename(self, spec): + """Return the 'output basename' of a gyp spec. + + E.g., the loadable module 'foobar' in directory 'baz' will produce + 'libfoobar.so' + """ + assert not self.is_mac_bundle + + if self.flavor == 'mac' and self.type in ( + 'static_library', 'executable', 'shared_library', 'loadable_module'): + return self.xcode_settings.GetExecutablePath() + + target = spec['target_name'] + target_prefix = '' + target_ext = '' + if self.type == 'static_library': + if target[:3] == 'lib': + target = target[3:] + target_prefix = 'lib' + target_ext = '.a' + elif self.type in ('loadable_module', 'shared_library'): + if target[:3] == 'lib': + target = target[3:] + target_prefix = 'lib' + if self.flavor == 'aix': + target_ext = '.a' + else: + target_ext = '.so' + elif self.type == 'none': + target = '%s.stamp' % target + elif self.type != 'executable': + print ("ERROR: What output file should be generated?", + "type", self.type, "target", target) + + target_prefix = spec.get('product_prefix', target_prefix) + target = spec.get('product_name', target) + product_ext = spec.get('product_extension') + if product_ext: + target_ext = '.' + product_ext + + return target_prefix + target + target_ext + + + def _InstallImmediately(self): + return self.toolset == 'target' and self.flavor == 'mac' and self.type in ( + 'static_library', 'executable', 'shared_library', 'loadable_module') + + + def ComputeOutput(self, spec): + """Return the 'output' (full output path) of a gyp spec. + + E.g., the loadable module 'foobar' in directory 'baz' will produce + '$(obj)/baz/libfoobar.so' + """ + assert not self.is_mac_bundle + + path = os.path.join('$(obj).' + self.toolset, self.path) + if self.type == 'executable' or self._InstallImmediately(): + path = '$(builddir)' + path = spec.get('product_dir', path) + return os.path.join(path, self.ComputeOutputBasename(spec)) + + + def ComputeMacBundleOutput(self, spec): + """Return the 'output' (full output path) to a bundle output directory.""" + assert self.is_mac_bundle + path = generator_default_variables['PRODUCT_DIR'] + return os.path.join(path, self.xcode_settings.GetWrapperName()) + + + def ComputeMacBundleBinaryOutput(self, spec): + """Return the 'output' (full output path) to the binary in a bundle.""" + path = generator_default_variables['PRODUCT_DIR'] + return os.path.join(path, self.xcode_settings.GetExecutablePath()) + + + def ComputeDeps(self, spec): + """Compute the dependencies of a gyp spec. + + Returns a tuple (deps, link_deps), where each is a list of + filenames that will need to be put in front of make for either + building (deps) or linking (link_deps). + """ + deps = [] + link_deps = [] + if 'dependencies' in spec: + deps.extend([target_outputs[dep] for dep in spec['dependencies'] + if target_outputs[dep]]) + for dep in spec['dependencies']: + if dep in target_link_deps: + link_deps.append(target_link_deps[dep]) + deps.extend(link_deps) + # TODO: It seems we need to transitively link in libraries (e.g. -lfoo)? + # This hack makes it work: + # link_deps.extend(spec.get('libraries', [])) + return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)) + + + def WriteDependencyOnExtraOutputs(self, target, extra_outputs): + self.WriteMakeRule([self.output_binary], extra_outputs, + comment = 'Build our special outputs first.', + order_only = True) + + + def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps, + extra_outputs, part_of_all): + """Write Makefile code to produce the final target of the gyp spec. + + spec, configs: input from gyp. + deps, link_deps: dependency lists; see ComputeDeps() + extra_outputs: any extra outputs that our target should depend on + part_of_all: flag indicating this target is part of 'all' + """ + + self.WriteLn('### Rules for final target.') + + if extra_outputs: + self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs) + self.WriteMakeRule(extra_outputs, deps, + comment=('Preserve order dependency of ' + 'special output on deps.'), + order_only = True) + + target_postbuilds = {} + if self.type != 'none': + for configname in sorted(configs.keys()): + config = configs[configname] + if self.flavor == 'mac': + ldflags = self.xcode_settings.GetLdflags(configname, + generator_default_variables['PRODUCT_DIR'], + lambda p: Sourceify(self.Absolutify(p))) + + # TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on. + gyp_to_build = gyp.common.InvertRelativePath(self.path) + target_postbuild = self.xcode_settings.AddImplicitPostbuilds( + configname, + QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build, + self.output))), + QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build, + self.output_binary)))) + if target_postbuild: + target_postbuilds[configname] = target_postbuild + else: + ldflags = config.get('ldflags', []) + # Compute an rpath for this output if needed. + if any(dep.endswith('.so') or '.so.' in dep for dep in deps): + # We want to get the literal string "$ORIGIN" into the link command, + # so we need lots of escaping. + ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset) + ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' % + self.toolset) + library_dirs = config.get('library_dirs', []) + ldflags += [('-L%s' % library_dir) for library_dir in library_dirs] + self.WriteList(ldflags, 'LDFLAGS_%s' % configname) + if self.flavor == 'mac': + self.WriteList(self.xcode_settings.GetLibtoolflags(configname), + 'LIBTOOLFLAGS_%s' % configname) + libraries = spec.get('libraries') + if libraries: + # Remove duplicate entries + libraries = gyp.common.uniquer(libraries) + if self.flavor == 'mac': + libraries = self.xcode_settings.AdjustLibraries(libraries) + self.WriteList(libraries, 'LIBS') + self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' % + QuoteSpaces(self.output_binary)) + self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary)) + + if self.flavor == 'mac': + self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' % + QuoteSpaces(self.output_binary)) + + # Postbuild actions. Like actions, but implicitly depend on the target's + # output. + postbuilds = [] + if self.flavor == 'mac': + if target_postbuilds: + postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))') + postbuilds.extend( + gyp.xcode_emulation.GetSpecPostbuildCommands(spec)) + + if postbuilds: + # Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE), + # so we must output its definition first, since we declare variables + # using ":=". + self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv()) + + for configname in target_postbuilds: + self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' % + (QuoteSpaces(self.output), + configname, + gyp.common.EncodePOSIXShellList(target_postbuilds[configname]))) + + # Postbuilds expect to be run in the gyp file's directory, so insert an + # implicit postbuild to cd to there. + postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path])) + for i in xrange(len(postbuilds)): + if not postbuilds[i].startswith('$'): + postbuilds[i] = EscapeShellArgument(postbuilds[i]) + self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output)) + self.WriteLn('%s: POSTBUILDS := %s' % ( + QuoteSpaces(self.output), ' '.join(postbuilds))) + + # A bundle directory depends on its dependencies such as bundle resources + # and bundle binary. When all dependencies have been built, the bundle + # needs to be packaged. + if self.is_mac_bundle: + # If the framework doesn't contain a binary, then nothing depends + # on the actions -- make the framework depend on them directly too. + self.WriteDependencyOnExtraOutputs(self.output, extra_outputs) + + # Bundle dependencies. Note that the code below adds actions to this + # target, so if you move these two lines, move the lines below as well. + self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS') + self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output)) + + # After the framework is built, package it. Needs to happen before + # postbuilds, since postbuilds depend on this. + if self.type in ('shared_library', 'loadable_module'): + self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' % + self.xcode_settings.GetFrameworkVersion()) + + # Bundle postbuilds can depend on the whole bundle, so run them after + # the bundle is packaged, not already after the bundle binary is done. + if postbuilds: + self.WriteLn('\t@$(call do_postbuilds)') + postbuilds = [] # Don't write postbuilds for target's output. + + # Needed by test/mac/gyptest-rebuild.py. + self.WriteLn('\t@true # No-op, used by tests') + + # Since this target depends on binary and resources which are in + # nested subfolders, the framework directory will be older than + # its dependencies usually. To prevent this rule from executing + # on every build (expensive, especially with postbuilds), expliclity + # update the time on the framework directory. + self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output)) + + if postbuilds: + assert not self.is_mac_bundle, ('Postbuilds for bundles should be done ' + 'on the bundle, not the binary (target \'%s\')' % self.target) + assert 'product_dir' not in spec, ('Postbuilds do not work with ' + 'custom product_dir') + + if self.type == 'executable': + self.WriteLn('%s: LD_INPUTS := %s' % ( + QuoteSpaces(self.output_binary), + ' '.join(map(QuoteSpaces, link_deps)))) + if self.toolset == 'host' and self.flavor == 'android': + self.WriteDoCmd([self.output_binary], link_deps, 'link_host', + part_of_all, postbuilds=postbuilds) + else: + self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all, + postbuilds=postbuilds) + + elif self.type == 'static_library': + for link_dep in link_deps: + assert ' ' not in link_dep, ( + "Spaces in alink input filenames not supported (%s)" % link_dep) + if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not + self.is_standalone_static_library): + self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin', + part_of_all, postbuilds=postbuilds) + else: + self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all, + postbuilds=postbuilds) + elif self.type == 'shared_library': + self.WriteLn('%s: LD_INPUTS := %s' % ( + QuoteSpaces(self.output_binary), + ' '.join(map(QuoteSpaces, link_deps)))) + self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all, + postbuilds=postbuilds) + elif self.type == 'loadable_module': + for link_dep in link_deps: + assert ' ' not in link_dep, ( + "Spaces in module input filenames not supported (%s)" % link_dep) + if self.toolset == 'host' and self.flavor == 'android': + self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host', + part_of_all, postbuilds=postbuilds) + else: + self.WriteDoCmd( + [self.output_binary], link_deps, 'solink_module', part_of_all, + postbuilds=postbuilds) + elif self.type == 'none': + # Write a stamp line. + self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all, + postbuilds=postbuilds) + else: + print "WARNING: no output for", self.type, target + + # Add an alias for each target (if there are any outputs). + # Installable target aliases are created below. + if ((self.output and self.output != self.target) and + (self.type not in self._INSTALLABLE_TARGETS)): + self.WriteMakeRule([self.target], [self.output], + comment='Add target alias', phony = True) + if part_of_all: + self.WriteMakeRule(['all'], [self.target], + comment = 'Add target alias to "all" target.', + phony = True) + + # Add special-case rules for our installable targets. + # 1) They need to install to the build dir or "product" dir. + # 2) They get shortcuts for building (e.g. "make chrome"). + # 3) They are part of "make all". + if (self.type in self._INSTALLABLE_TARGETS or + self.is_standalone_static_library): + if self.type == 'shared_library': + file_desc = 'shared library' + elif self.type == 'static_library': + file_desc = 'static library' + else: + file_desc = 'executable' + install_path = self._InstallableTargetInstallPath() + installable_deps = [self.output] + if (self.flavor == 'mac' and not 'product_dir' in spec and + self.toolset == 'target'): + # On mac, products are created in install_path immediately. + assert install_path == self.output, '%s != %s' % ( + install_path, self.output) + + # Point the target alias to the final binary output. + self.WriteMakeRule([self.target], [install_path], + comment='Add target alias', phony = True) + if install_path != self.output: + assert not self.is_mac_bundle # See comment a few lines above. + self.WriteDoCmd([install_path], [self.output], 'copy', + comment = 'Copy this to the %s output path.' % + file_desc, part_of_all=part_of_all) + installable_deps.append(install_path) + if self.output != self.alias and self.alias != self.target: + self.WriteMakeRule([self.alias], installable_deps, + comment = 'Short alias for building this %s.' % + file_desc, phony = True) + if part_of_all: + self.WriteMakeRule(['all'], [install_path], + comment = 'Add %s to "all" target.' % file_desc, + phony = True) + + + def WriteList(self, value_list, variable=None, prefix='', + quoter=QuoteIfNecessary): + """Write a variable definition that is a list of values. + + E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out + foo = blaha blahb + but in a pretty-printed style. + """ + values = '' + if value_list: + value_list = [quoter(prefix + l) for l in value_list] + values = ' \\\n\t' + ' \\\n\t'.join(value_list) + self.fp.write('%s :=%s\n\n' % (variable, values)) + + + def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None, + postbuilds=False): + """Write a Makefile rule that uses do_cmd. + + This makes the outputs dependent on the command line that was run, + as well as support the V= make command line flag. + """ + suffix = '' + if postbuilds: + assert ',' not in command + suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS + self.WriteMakeRule(outputs, inputs, + actions = ['$(call do_cmd,%s%s)' % (command, suffix)], + comment = comment, + command = command, + force = True) + # Add our outputs to the list of targets we read depfiles from. + # all_deps is only used for deps file reading, and for deps files we replace + # spaces with ? because escaping doesn't work with make's $(sort) and + # other functions. + outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs] + self.WriteLn('all_deps += %s' % ' '.join(outputs)) + + + def WriteMakeRule(self, outputs, inputs, actions=None, comment=None, + order_only=False, force=False, phony=False, command=None): + """Write a Makefile rule, with some extra tricks. + + outputs: a list of outputs for the rule (note: this is not directly + supported by make; see comments below) + inputs: a list of inputs for the rule + actions: a list of shell commands to run for the rule + comment: a comment to put in the Makefile above the rule (also useful + for making this Python script's code self-documenting) + order_only: if true, makes the dependency order-only + force: if true, include FORCE_DO_CMD as an order-only dep + phony: if true, the rule does not actually generate the named output, the + output is just a name to run the rule + command: (optional) command name to generate unambiguous labels + """ + outputs = map(QuoteSpaces, outputs) + inputs = map(QuoteSpaces, inputs) + + if comment: + self.WriteLn('# ' + comment) + if phony: + self.WriteLn('.PHONY: ' + ' '.join(outputs)) + if actions: + self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0]) + force_append = ' FORCE_DO_CMD' if force else '' + + if order_only: + # Order only rule: Just write a simple rule. + # TODO(evanm): just make order_only a list of deps instead of this hack. + self.WriteLn('%s: | %s%s' % + (' '.join(outputs), ' '.join(inputs), force_append)) + elif len(outputs) == 1: + # Regular rule, one output: Just write a simple rule. + self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append)) + else: + # Regular rule, more than one output: Multiple outputs are tricky in + # make. We will write three rules: + # - All outputs depend on an intermediate file. + # - Make .INTERMEDIATE depend on the intermediate. + # - The intermediate file depends on the inputs and executes the + # actual command. + # - The intermediate recipe will 'touch' the intermediate file. + # - The multi-output rule will have an do-nothing recipe. + + # Hash the target name to avoid generating overlong filenames. + cmddigest = hashlib.sha1(command if command else self.target).hexdigest() + intermediate = "%s.intermediate" % (cmddigest) + self.WriteLn('%s: %s' % (' '.join(outputs), intermediate)) + self.WriteLn('\t%s' % '@:'); + self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate)) + self.WriteLn('%s: %s%s' % + (intermediate, ' '.join(inputs), force_append)) + actions.insert(0, '$(call do_cmd,touch)') + + if actions: + for action in actions: + self.WriteLn('\t%s' % action) + self.WriteLn() + + + def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps): + """Write a set of LOCAL_XXX definitions for Android NDK. + + These variable definitions will be used by Android NDK but do nothing for + non-Android applications. + + Arguments: + module_name: Android NDK module name, which must be unique among all + module names. + all_sources: A list of source files (will be filtered by Compilable). + link_deps: A list of link dependencies, which must be sorted in + the order from dependencies to dependents. + """ + if self.type not in ('executable', 'shared_library', 'static_library'): + return + + self.WriteLn('# Variable definitions for Android applications') + self.WriteLn('include $(CLEAR_VARS)') + self.WriteLn('LOCAL_MODULE := ' + module_name) + self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) ' + '$(DEFS_$(BUILDTYPE)) ' + # LOCAL_CFLAGS is applied to both of C and C++. There is + # no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C + # sources. + '$(CFLAGS_C_$(BUILDTYPE)) ' + # $(INCS_$(BUILDTYPE)) includes the prefix '-I' while + # LOCAL_C_INCLUDES does not expect it. So put it in + # LOCAL_CFLAGS. + '$(INCS_$(BUILDTYPE))') + # LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred. + self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))') + self.WriteLn('LOCAL_C_INCLUDES :=') + self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)') + + # Detect the C++ extension. + cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0} + default_cpp_ext = '.cpp' + for filename in all_sources: + ext = os.path.splitext(filename)[1] + if ext in cpp_ext: + cpp_ext[ext] += 1 + if cpp_ext[ext] > cpp_ext[default_cpp_ext]: + default_cpp_ext = ext + self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext) + + self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)), + 'LOCAL_SRC_FILES') + + # Filter out those which do not match prefix and suffix and produce + # the resulting list without prefix and suffix. + def DepsToModules(deps, prefix, suffix): + modules = [] + for filepath in deps: + filename = os.path.basename(filepath) + if filename.startswith(prefix) and filename.endswith(suffix): + modules.append(filename[len(prefix):-len(suffix)]) + return modules + + # Retrieve the default value of 'SHARED_LIB_SUFFIX' + params = {'flavor': 'linux'} + default_variables = {} + CalculateVariables(default_variables, params) + + self.WriteList( + DepsToModules(link_deps, + generator_default_variables['SHARED_LIB_PREFIX'], + default_variables['SHARED_LIB_SUFFIX']), + 'LOCAL_SHARED_LIBRARIES') + self.WriteList( + DepsToModules(link_deps, + generator_default_variables['STATIC_LIB_PREFIX'], + generator_default_variables['STATIC_LIB_SUFFIX']), + 'LOCAL_STATIC_LIBRARIES') + + if self.type == 'executable': + self.WriteLn('include $(BUILD_EXECUTABLE)') + elif self.type == 'shared_library': + self.WriteLn('include $(BUILD_SHARED_LIBRARY)') + elif self.type == 'static_library': + self.WriteLn('include $(BUILD_STATIC_LIBRARY)') + self.WriteLn() + + + def WriteLn(self, text=''): + self.fp.write(text + '\n') + + + def GetSortedXcodeEnv(self, additional_settings=None): + return gyp.xcode_emulation.GetSortedXcodeEnv( + self.xcode_settings, "$(abs_builddir)", + os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)", + additional_settings) + + + def GetSortedXcodePostbuildEnv(self): + # CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack. + # TODO(thakis): It would be nice to have some general mechanism instead. + strip_save_file = self.xcode_settings.GetPerTargetSetting( + 'CHROMIUM_STRIP_SAVE_FILE', '') + # Even if strip_save_file is empty, explicitly write it. Else a postbuild + # might pick up an export from an earlier target. + return self.GetSortedXcodeEnv( + additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file}) + + + def WriteSortedXcodeEnv(self, target, env): + for k, v in env: + # For + # foo := a\ b + # the escaped space does the right thing. For + # export foo := a\ b + # it does not -- the backslash is written to the env as literal character. + # So don't escape spaces in |env[k]|. + self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v)) + + + def Objectify(self, path): + """Convert a path to its output directory form.""" + if '$(' in path: + path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset) + if not '$(obj)' in path: + path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path) + return path + + + def Pchify(self, path, lang): + """Convert a prefix header path to its output directory form.""" + path = self.Absolutify(path) + if '$(' in path: + path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' % + (self.toolset, lang)) + return path + return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path) + + + def Absolutify(self, path): + """Convert a subdirectory-relative path into a base-relative path. + Skips over paths that contain variables.""" + if '$(' in path: + # Don't call normpath in this case, as it might collapse the + # path too aggressively if it features '..'. However it's still + # important to strip trailing slashes. + return path.rstrip('/') + return os.path.normpath(os.path.join(self.path, path)) + + + def ExpandInputRoot(self, template, expansion, dirname): + if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template: + return template + path = template % { + 'INPUT_ROOT': expansion, + 'INPUT_DIRNAME': dirname, + } + return path + + + def _InstallableTargetInstallPath(self): + """Returns the location of the final output for an installable target.""" + # Xcode puts shared_library results into PRODUCT_DIR, and some gyp files + # rely on this. Emulate this behavior for mac. + if (self.type == 'shared_library' and + (self.flavor != 'mac' or self.toolset != 'target')): + # Install all shared libs into a common directory (per toolset) for + # convenient access with LD_LIBRARY_PATH. + return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias) + return '$(builddir)/' + self.alias + + +def WriteAutoRegenerationRule(params, root_makefile, makefile_name, + build_files): + """Write the target to regenerate the Makefile.""" + options = params['options'] + build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir) + for filename in params['build_files_arg']] + + gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'], + options.toplevel_dir) + if not gyp_binary.startswith(os.sep): + gyp_binary = os.path.join('.', gyp_binary) + + root_makefile.write( + "quiet_cmd_regen_makefile = ACTION Regenerating $@\n" + "cmd_regen_makefile = cd $(srcdir); %(cmd)s\n" + "%(makefile_name)s: %(deps)s\n" + "\t$(call do_cmd,regen_makefile)\n\n" % { + 'makefile_name': makefile_name, + 'deps': ' '.join(map(Sourceify, build_files)), + 'cmd': gyp.common.EncodePOSIXShellList( + [gyp_binary, '-fmake'] + + gyp.RegenerateFlags(options) + + build_files_args)}) + + +def PerformBuild(data, configurations, params): + options = params['options'] + for config in configurations: + arguments = ['make'] + if options.toplevel_dir and options.toplevel_dir != '.': + arguments += '-C', options.toplevel_dir + arguments.append('BUILDTYPE=' + config) + print 'Building [%s]: %s' % (config, arguments) + subprocess.check_call(arguments) + + +def GenerateOutput(target_list, target_dicts, data, params): + options = params['options'] + flavor = gyp.common.GetFlavor(params) + generator_flags = params.get('generator_flags', {}) + builddir_name = generator_flags.get('output_dir', 'out') + android_ndk_version = generator_flags.get('android_ndk_version', None) + default_target = generator_flags.get('default_target', 'all') + + def CalculateMakefilePath(build_file, base_name): + """Determine where to write a Makefile for a given gyp file.""" + # Paths in gyp files are relative to the .gyp file, but we want + # paths relative to the source root for the master makefile. Grab + # the path of the .gyp file as the base to relativize against. + # E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp". + base_path = gyp.common.RelativePath(os.path.dirname(build_file), + options.depth) + # We write the file in the base_path directory. + output_file = os.path.join(options.depth, base_path, base_name) + if options.generator_output: + output_file = os.path.join( + options.depth, options.generator_output, base_path, base_name) + base_path = gyp.common.RelativePath(os.path.dirname(build_file), + options.toplevel_dir) + return base_path, output_file + + # TODO: search for the first non-'Default' target. This can go + # away when we add verification that all targets have the + # necessary configurations. + default_configuration = None + toolsets = set([target_dicts[target]['toolset'] for target in target_list]) + for target in target_list: + spec = target_dicts[target] + if spec['default_configuration'] != 'Default': + default_configuration = spec['default_configuration'] + break + if not default_configuration: + default_configuration = 'Default' + + srcdir = '.' + makefile_name = 'Makefile' + options.suffix + makefile_path = os.path.join(options.toplevel_dir, makefile_name) + if options.generator_output: + global srcdir_prefix + makefile_path = os.path.join( + options.toplevel_dir, options.generator_output, makefile_name) + srcdir = gyp.common.RelativePath(srcdir, options.generator_output) + srcdir_prefix = '$(srcdir)/' + + flock_command= 'flock' + copy_archive_arguments = '-af' + header_params = { + 'default_target': default_target, + 'builddir': builddir_name, + 'default_configuration': default_configuration, + 'flock': flock_command, + 'flock_index': 1, + 'link_commands': LINK_COMMANDS_LINUX, + 'extra_commands': '', + 'srcdir': srcdir, + 'copy_archive_args': copy_archive_arguments, + } + if flavor == 'mac': + flock_command = './gyp-mac-tool flock' + header_params.update({ + 'flock': flock_command, + 'flock_index': 2, + 'link_commands': LINK_COMMANDS_MAC, + 'extra_commands': SHARED_HEADER_MAC_COMMANDS, + }) + elif flavor == 'android': + header_params.update({ + 'link_commands': LINK_COMMANDS_ANDROID, + }) + elif flavor == 'solaris': + header_params.update({ + 'flock': './gyp-flock-tool flock', + 'flock_index': 2, + }) + elif flavor == 'freebsd': + # Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific. + header_params.update({ + 'flock': 'lockf', + }) + elif flavor == 'openbsd': + copy_archive_arguments = '-pPRf' + header_params.update({ + 'copy_archive_args': copy_archive_arguments, + }) + elif flavor == 'aix': + copy_archive_arguments = '-pPRf' + header_params.update({ + 'copy_archive_args': copy_archive_arguments, + 'link_commands': LINK_COMMANDS_AIX, + 'flock': './gyp-flock-tool flock', + 'flock_index': 2, + }) + + header_params.update({ + 'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'), + 'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'), + 'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'), + 'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'), + 'CC.host': GetEnvironFallback(('CC_host',), 'gcc'), + 'AR.host': GetEnvironFallback(('AR_host',), 'ar'), + 'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'), + 'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'), + }) + + build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) + make_global_settings_array = data[build_file].get('make_global_settings', []) + wrappers = {} + for key, value in make_global_settings_array: + if key.endswith('_wrapper'): + wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value + make_global_settings = '' + for key, value in make_global_settings_array: + if re.match('.*_wrapper', key): + continue + if value[0] != '$': + value = '$(abspath %s)' % value + wrapper = wrappers.get(key) + if wrapper: + value = '%s %s' % (wrapper, value) + del wrappers[key] + if key in ('CC', 'CC.host', 'CXX', 'CXX.host'): + make_global_settings += ( + 'ifneq (,$(filter $(origin %s), undefined default))\n' % key) + # Let gyp-time envvars win over global settings. + env_key = key.replace('.', '_') # CC.host -> CC_host + if env_key in os.environ: + value = os.environ[env_key] + make_global_settings += ' %s = %s\n' % (key, value) + make_global_settings += 'endif\n' + else: + make_global_settings += '%s ?= %s\n' % (key, value) + # TODO(ukai): define cmd when only wrapper is specified in + # make_global_settings. + + header_params['make_global_settings'] = make_global_settings + + gyp.common.EnsureDirExists(makefile_path) + root_makefile = open(makefile_path, 'w') + root_makefile.write(SHARED_HEADER % header_params) + # Currently any versions have the same effect, but in future the behavior + # could be different. + if android_ndk_version: + root_makefile.write( + '# Define LOCAL_PATH for build of Android applications.\n' + 'LOCAL_PATH := $(call my-dir)\n' + '\n') + for toolset in toolsets: + root_makefile.write('TOOLSET := %s\n' % toolset) + WriteRootHeaderSuffixRules(root_makefile) + + # Put build-time support tools next to the root Makefile. + dest_path = os.path.dirname(makefile_path) + gyp.common.CopyTool(flavor, dest_path) + + # Find the list of targets that derive from the gyp file(s) being built. + needed_targets = set() + for build_file in params['build_files']: + for target in gyp.common.AllTargets(target_list, target_dicts, build_file): + needed_targets.add(target) + + build_files = set() + include_list = set() + for qualified_target in target_list: + build_file, target, toolset = gyp.common.ParseQualifiedTarget( + qualified_target) + + this_make_global_settings = data[build_file].get('make_global_settings', []) + assert make_global_settings_array == this_make_global_settings, ( + "make_global_settings needs to be the same for all targets. %s vs. %s" % + (this_make_global_settings, make_global_settings)) + + build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir)) + included_files = data[build_file]['included_files'] + for included_file in included_files: + # The included_files entries are relative to the dir of the build file + # that included them, so we have to undo that and then make them relative + # to the root dir. + relative_include_file = gyp.common.RelativePath( + gyp.common.UnrelativePath(included_file, build_file), + options.toplevel_dir) + abs_include_file = os.path.abspath(relative_include_file) + # If the include file is from the ~/.gyp dir, we should use absolute path + # so that relocating the src dir doesn't break the path. + if (params['home_dot_gyp'] and + abs_include_file.startswith(params['home_dot_gyp'])): + build_files.add(abs_include_file) + else: + build_files.add(relative_include_file) + + base_path, output_file = CalculateMakefilePath(build_file, + target + '.' + toolset + options.suffix + '.mk') + + spec = target_dicts[qualified_target] + configs = spec['configurations'] + + if flavor == 'mac': + gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec) + + writer = MakefileWriter(generator_flags, flavor) + writer.Write(qualified_target, base_path, output_file, spec, configs, + part_of_all=qualified_target in needed_targets) + + # Our root_makefile lives at the source root. Compute the relative path + # from there to the output_file for including. + mkfile_rel_path = gyp.common.RelativePath(output_file, + os.path.dirname(makefile_path)) + include_list.add(mkfile_rel_path) + + # Write out per-gyp (sub-project) Makefiles. + depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd()) + for build_file in build_files: + # The paths in build_files were relativized above, so undo that before + # testing against the non-relativized items in target_list and before + # calculating the Makefile path. + build_file = os.path.join(depth_rel_path, build_file) + gyp_targets = [target_dicts[target]['target_name'] for target in target_list + if target.startswith(build_file) and + target in needed_targets] + # Only generate Makefiles for gyp files with targets. + if not gyp_targets: + continue + base_path, output_file = CalculateMakefilePath(build_file, + os.path.splitext(os.path.basename(build_file))[0] + '.Makefile') + makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path), + os.path.dirname(output_file)) + writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets, + builddir_name) + + + # Write out the sorted list of includes. + root_makefile.write('\n') + for include_file in sorted(include_list): + # We wrap each .mk include in an if statement so users can tell make to + # not load a file by setting NO_LOAD. The below make code says, only + # load the .mk file if the .mk filename doesn't start with a token in + # NO_LOAD. + root_makefile.write( + "ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n" + " $(findstring $(join ^,$(prefix)),\\\n" + " $(join ^," + include_file + ")))),)\n") + root_makefile.write(" include " + include_file + "\n") + root_makefile.write("endif\n") + root_makefile.write('\n') + + if (not generator_flags.get('standalone') + and generator_flags.get('auto_regeneration', True)): + WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files) + + root_makefile.write(SHARED_FOOTER) + + root_makefile.close() diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/msvs.py b/deps/libuv/build/gyp/pylib/gyp/generator/msvs.py new file mode 100644 index 00000000..9daf4ec1 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/msvs.py @@ -0,0 +1,3518 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import copy +import ntpath +import os +import posixpath +import re +import subprocess +import sys + +import gyp.common +import gyp.easy_xml as easy_xml +import gyp.generator.ninja as ninja_generator +import gyp.MSVSNew as MSVSNew +import gyp.MSVSProject as MSVSProject +import gyp.MSVSSettings as MSVSSettings +import gyp.MSVSToolFile as MSVSToolFile +import gyp.MSVSUserFile as MSVSUserFile +import gyp.MSVSUtil as MSVSUtil +import gyp.MSVSVersion as MSVSVersion +from gyp.common import GypError +from gyp.common import OrderedSet + +# TODO: Remove once bots are on 2.7, http://crbug.com/241769 +def _import_OrderedDict(): + import collections + try: + return collections.OrderedDict + except AttributeError: + import gyp.ordered_dict + return gyp.ordered_dict.OrderedDict +OrderedDict = _import_OrderedDict() + + +# Regular expression for validating Visual Studio GUIDs. If the GUID +# contains lowercase hex letters, MSVS will be fine. However, +# IncrediBuild BuildConsole will parse the solution file, but then +# silently skip building the target causing hard to track down errors. +# Note that this only happens with the BuildConsole, and does not occur +# if IncrediBuild is executed from inside Visual Studio. This regex +# validates that the string looks like a GUID with all uppercase hex +# letters. +VALID_MSVS_GUID_CHARS = re.compile(r'^[A-F0-9\-]+$') + + +generator_default_variables = { + 'DRIVER_PREFIX': '', + 'DRIVER_SUFFIX': '.sys', + 'EXECUTABLE_PREFIX': '', + 'EXECUTABLE_SUFFIX': '.exe', + 'STATIC_LIB_PREFIX': '', + 'SHARED_LIB_PREFIX': '', + 'STATIC_LIB_SUFFIX': '.lib', + 'SHARED_LIB_SUFFIX': '.dll', + 'INTERMEDIATE_DIR': '$(IntDir)', + 'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate', + 'OS': 'win', + 'PRODUCT_DIR': '$(OutDir)', + 'LIB_DIR': '$(OutDir)lib', + 'RULE_INPUT_ROOT': '$(InputName)', + 'RULE_INPUT_DIRNAME': '$(InputDir)', + 'RULE_INPUT_EXT': '$(InputExt)', + 'RULE_INPUT_NAME': '$(InputFileName)', + 'RULE_INPUT_PATH': '$(InputPath)', + 'CONFIGURATION_NAME': '$(ConfigurationName)', +} + + +# The msvs specific sections that hold paths +generator_additional_path_sections = [ + 'msvs_cygwin_dirs', + 'msvs_props', +] + + +generator_additional_non_configuration_keys = [ + 'msvs_cygwin_dirs', + 'msvs_cygwin_shell', + 'msvs_large_pdb', + 'msvs_shard', + 'msvs_external_builder', + 'msvs_external_builder_out_dir', + 'msvs_external_builder_build_cmd', + 'msvs_external_builder_clean_cmd', + 'msvs_external_builder_clcompile_cmd', + 'msvs_enable_winrt', + 'msvs_requires_importlibrary', + 'msvs_enable_winphone', + 'msvs_application_type_revision', + 'msvs_target_platform_version', + 'msvs_target_platform_minversion', +] + +generator_filelist_paths = None + +# List of precompiled header related keys. +precomp_keys = [ + 'msvs_precompiled_header', + 'msvs_precompiled_source', +] + + +cached_username = None + + +cached_domain = None + + +# TODO(gspencer): Switch the os.environ calls to be +# win32api.GetDomainName() and win32api.GetUserName() once the +# python version in depot_tools has been updated to work on Vista +# 64-bit. +def _GetDomainAndUserName(): + if sys.platform not in ('win32', 'cygwin'): + return ('DOMAIN', 'USERNAME') + global cached_username + global cached_domain + if not cached_domain or not cached_username: + domain = os.environ.get('USERDOMAIN') + username = os.environ.get('USERNAME') + if not domain or not username: + call = subprocess.Popen(['net', 'config', 'Workstation'], + stdout=subprocess.PIPE) + config = call.communicate()[0] + username_re = re.compile(r'^User name\s+(\S+)', re.MULTILINE) + username_match = username_re.search(config) + if username_match: + username = username_match.group(1) + domain_re = re.compile(r'^Logon domain\s+(\S+)', re.MULTILINE) + domain_match = domain_re.search(config) + if domain_match: + domain = domain_match.group(1) + cached_domain = domain + cached_username = username + return (cached_domain, cached_username) + +fixpath_prefix = None + + +def _NormalizedSource(source): + """Normalize the path. + + But not if that gets rid of a variable, as this may expand to something + larger than one directory. + + Arguments: + source: The path to be normalize.d + + Returns: + The normalized path. + """ + normalized = os.path.normpath(source) + if source.count('$') == normalized.count('$'): + source = normalized + return source + + +def _FixPath(path): + """Convert paths to a form that will make sense in a vcproj file. + + Arguments: + path: The path to convert, may contain / etc. + Returns: + The path with all slashes made into backslashes. + """ + if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$': + path = os.path.join(fixpath_prefix, path) + path = path.replace('/', '\\') + path = _NormalizedSource(path) + if path and path[-1] == '\\': + path = path[:-1] + return path + + +def _FixPaths(paths): + """Fix each of the paths of the list.""" + return [_FixPath(i) for i in paths] + + +def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None, + list_excluded=True, msvs_version=None): + """Converts a list split source file paths into a vcproj folder hierarchy. + + Arguments: + sources: A list of source file paths split. + prefix: A list of source file path layers meant to apply to each of sources. + excluded: A set of excluded files. + msvs_version: A MSVSVersion object. + + Returns: + A hierarchy of filenames and MSVSProject.Filter objects that matches the + layout of the source tree. + For example: + _ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']], + prefix=['joe']) + --> + [MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']), + MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])] + """ + if not prefix: prefix = [] + result = [] + excluded_result = [] + folders = OrderedDict() + # Gather files into the final result, excluded, or folders. + for s in sources: + if len(s) == 1: + filename = _NormalizedSource('\\'.join(prefix + s)) + if filename in excluded: + excluded_result.append(filename) + else: + result.append(filename) + elif msvs_version and not msvs_version.UsesVcxproj(): + # For MSVS 2008 and earlier, we need to process all files before walking + # the sub folders. + if not folders.get(s[0]): + folders[s[0]] = [] + folders[s[0]].append(s[1:]) + else: + contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]], + excluded=excluded, + list_excluded=list_excluded, + msvs_version=msvs_version) + contents = MSVSProject.Filter(s[0], contents=contents) + result.append(contents) + # Add a folder for excluded files. + if excluded_result and list_excluded: + excluded_folder = MSVSProject.Filter('_excluded_files', + contents=excluded_result) + result.append(excluded_folder) + + if msvs_version and msvs_version.UsesVcxproj(): + return result + + # Populate all the folders. + for f in folders: + contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f], + excluded=excluded, + list_excluded=list_excluded, + msvs_version=msvs_version) + contents = MSVSProject.Filter(f, contents=contents) + result.append(contents) + return result + + +def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False): + if not value: return + _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset) + + +def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False): + # TODO(bradnelson): ugly hack, fix this more generally!!! + if 'Directories' in setting or 'Dependencies' in setting: + if type(value) == str: + value = value.replace('/', '\\') + else: + value = [i.replace('/', '\\') for i in value] + if not tools.get(tool_name): + tools[tool_name] = dict() + tool = tools[tool_name] + if 'CompileAsWinRT' == setting: + return + if tool.get(setting): + if only_if_unset: return + if type(tool[setting]) == list and type(value) == list: + tool[setting] += value + else: + raise TypeError( + 'Appending "%s" to a non-list setting "%s" for tool "%s" is ' + 'not allowed, previous value: %s' % ( + value, setting, tool_name, str(tool[setting]))) + else: + tool[setting] = value + + +def _ConfigTargetVersion(config_data): + return config_data.get('msvs_target_version', 'Windows7') + + +def _ConfigPlatform(config_data): + return config_data.get('msvs_configuration_platform', 'Win32') + + +def _ConfigBaseName(config_name, platform_name): + if config_name.endswith('_' + platform_name): + return config_name[0:-len(platform_name) - 1] + else: + return config_name + + +def _ConfigFullName(config_name, config_data): + platform_name = _ConfigPlatform(config_data) + return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name) + + +def _ConfigWindowsTargetPlatformVersion(config_data): + ver = config_data.get('msvs_windows_sdk_version') + + for key in [r'HKLM\Software\Microsoft\Microsoft SDKs\Windows\%s', + r'HKLM\Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows\%s']: + sdk_dir = MSVSVersion._RegistryGetValue(key % ver, 'InstallationFolder') + if not sdk_dir: + continue + version = MSVSVersion._RegistryGetValue(key % ver, 'ProductVersion') or '' + # Find a matching entry in sdk_dir\include. + names = sorted([x for x in os.listdir(r'%s\include' % sdk_dir) + if x.startswith(version)], reverse=True) + return names[0] + + +def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path, + quote_cmd, do_setup_env): + + if [x for x in cmd if '$(InputDir)' in x]: + input_dir_preamble = ( + 'set INPUTDIR=$(InputDir)\n' + 'if NOT DEFINED INPUTDIR set INPUTDIR=.\\\n' + 'set INPUTDIR=%INPUTDIR:~0,-1%\n' + ) + else: + input_dir_preamble = '' + + if cygwin_shell: + # Find path to cygwin. + cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0]) + # Prepare command. + direct_cmd = cmd + direct_cmd = [i.replace('$(IntDir)', + '`cygpath -m "${INTDIR}"`') for i in direct_cmd] + direct_cmd = [i.replace('$(OutDir)', + '`cygpath -m "${OUTDIR}"`') for i in direct_cmd] + direct_cmd = [i.replace('$(InputDir)', + '`cygpath -m "${INPUTDIR}"`') for i in direct_cmd] + if has_input_path: + direct_cmd = [i.replace('$(InputPath)', + '`cygpath -m "${INPUTPATH}"`') + for i in direct_cmd] + direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd] + # direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd) + direct_cmd = ' '.join(direct_cmd) + # TODO(quote): regularize quoting path names throughout the module + cmd = '' + if do_setup_env: + cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && ' + cmd += 'set CYGWIN=nontsec&& ' + if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0: + cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& ' + if direct_cmd.find('INTDIR') >= 0: + cmd += 'set INTDIR=$(IntDir)&& ' + if direct_cmd.find('OUTDIR') >= 0: + cmd += 'set OUTDIR=$(OutDir)&& ' + if has_input_path and direct_cmd.find('INPUTPATH') >= 0: + cmd += 'set INPUTPATH=$(InputPath) && ' + cmd += 'bash -c "%(cmd)s"' + cmd = cmd % {'cygwin_dir': cygwin_dir, + 'cmd': direct_cmd} + return input_dir_preamble + cmd + else: + # Convert cat --> type to mimic unix. + if cmd[0] == 'cat': + command = ['type'] + else: + command = [cmd[0].replace('/', '\\')] + # Add call before command to ensure that commands can be tied together one + # after the other without aborting in Incredibuild, since IB makes a bat + # file out of the raw command string, and some commands (like python) are + # actually batch files themselves. + command.insert(0, 'call') + # Fix the paths + # TODO(quote): This is a really ugly heuristic, and will miss path fixing + # for arguments like "--arg=path" or "/opt:path". + # If the argument starts with a slash or dash, it's probably a command line + # switch + arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]] + arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments] + arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments] + if quote_cmd: + # Support a mode for using cmd directly. + # Convert any paths to native form (first element is used directly). + # TODO(quote): regularize quoting path names throughout the module + arguments = ['"%s"' % i for i in arguments] + # Collapse into a single command. + return input_dir_preamble + ' '.join(command + arguments) + + +def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env): + # Currently this weird argument munging is used to duplicate the way a + # python script would need to be run as part of the chrome tree. + # Eventually we should add some sort of rule_default option to set this + # per project. For now the behavior chrome needs is the default. + mcs = rule.get('msvs_cygwin_shell') + if mcs is None: + mcs = int(spec.get('msvs_cygwin_shell', 1)) + elif isinstance(mcs, str): + mcs = int(mcs) + quote_cmd = int(rule.get('msvs_quote_cmd', 1)) + return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path, + quote_cmd, do_setup_env=do_setup_env) + + +def _AddActionStep(actions_dict, inputs, outputs, description, command): + """Merge action into an existing list of actions. + + Care must be taken so that actions which have overlapping inputs either don't + get assigned to the same input, or get collapsed into one. + + Arguments: + actions_dict: dictionary keyed on input name, which maps to a list of + dicts describing the actions attached to that input file. + inputs: list of inputs + outputs: list of outputs + description: description of the action + command: command line to execute + """ + # Require there to be at least one input (call sites will ensure this). + assert inputs + + action = { + 'inputs': inputs, + 'outputs': outputs, + 'description': description, + 'command': command, + } + + # Pick where to stick this action. + # While less than optimal in terms of build time, attach them to the first + # input for now. + chosen_input = inputs[0] + + # Add it there. + if chosen_input not in actions_dict: + actions_dict[chosen_input] = [] + actions_dict[chosen_input].append(action) + + +def _AddCustomBuildToolForMSVS(p, spec, primary_input, + inputs, outputs, description, cmd): + """Add a custom build tool to execute something. + + Arguments: + p: the target project + spec: the target project dict + primary_input: input file to attach the build tool to + inputs: list of inputs + outputs: list of outputs + description: description of the action + cmd: command line to execute + """ + inputs = _FixPaths(inputs) + outputs = _FixPaths(outputs) + tool = MSVSProject.Tool( + 'VCCustomBuildTool', + {'Description': description, + 'AdditionalDependencies': ';'.join(inputs), + 'Outputs': ';'.join(outputs), + 'CommandLine': cmd, + }) + # Add to the properties of primary input for each config. + for config_name, c_data in spec['configurations'].iteritems(): + p.AddFileConfig(_FixPath(primary_input), + _ConfigFullName(config_name, c_data), tools=[tool]) + + +def _AddAccumulatedActionsToMSVS(p, spec, actions_dict): + """Add actions accumulated into an actions_dict, merging as needed. + + Arguments: + p: the target project + spec: the target project dict + actions_dict: dictionary keyed on input name, which maps to a list of + dicts describing the actions attached to that input file. + """ + for primary_input in actions_dict: + inputs = OrderedSet() + outputs = OrderedSet() + descriptions = [] + commands = [] + for action in actions_dict[primary_input]: + inputs.update(OrderedSet(action['inputs'])) + outputs.update(OrderedSet(action['outputs'])) + descriptions.append(action['description']) + commands.append(action['command']) + # Add the custom build step for one input file. + description = ', and also '.join(descriptions) + command = '\r\n'.join(commands) + _AddCustomBuildToolForMSVS(p, spec, + primary_input=primary_input, + inputs=inputs, + outputs=outputs, + description=description, + cmd=command) + + +def _RuleExpandPath(path, input_file): + """Given the input file to which a rule applied, string substitute a path. + + Arguments: + path: a path to string expand + input_file: the file to which the rule applied. + Returns: + The string substituted path. + """ + path = path.replace('$(InputName)', + os.path.splitext(os.path.split(input_file)[1])[0]) + path = path.replace('$(InputDir)', os.path.dirname(input_file)) + path = path.replace('$(InputExt)', + os.path.splitext(os.path.split(input_file)[1])[1]) + path = path.replace('$(InputFileName)', os.path.split(input_file)[1]) + path = path.replace('$(InputPath)', input_file) + return path + + +def _FindRuleTriggerFiles(rule, sources): + """Find the list of files which a particular rule applies to. + + Arguments: + rule: the rule in question + sources: the set of all known source files for this project + Returns: + The list of sources that trigger a particular rule. + """ + return rule.get('rule_sources', []) + + +def _RuleInputsAndOutputs(rule, trigger_file): + """Find the inputs and outputs generated by a rule. + + Arguments: + rule: the rule in question. + trigger_file: the main trigger for this rule. + Returns: + The pair of (inputs, outputs) involved in this rule. + """ + raw_inputs = _FixPaths(rule.get('inputs', [])) + raw_outputs = _FixPaths(rule.get('outputs', [])) + inputs = OrderedSet() + outputs = OrderedSet() + inputs.add(trigger_file) + for i in raw_inputs: + inputs.add(_RuleExpandPath(i, trigger_file)) + for o in raw_outputs: + outputs.add(_RuleExpandPath(o, trigger_file)) + return (inputs, outputs) + + +def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options): + """Generate a native rules file. + + Arguments: + p: the target project + rules: the set of rules to include + output_dir: the directory in which the project/gyp resides + spec: the project dict + options: global generator options + """ + rules_filename = '%s%s.rules' % (spec['target_name'], + options.suffix) + rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename), + spec['target_name']) + # Add each rule. + for r in rules: + rule_name = r['rule_name'] + rule_ext = r['extension'] + inputs = _FixPaths(r.get('inputs', [])) + outputs = _FixPaths(r.get('outputs', [])) + # Skip a rule with no action and no inputs. + if 'action' not in r and not r.get('rule_sources', []): + continue + cmd = _BuildCommandLineForRule(spec, r, has_input_path=True, + do_setup_env=True) + rules_file.AddCustomBuildRule(name=rule_name, + description=r.get('message', rule_name), + extensions=[rule_ext], + additional_dependencies=inputs, + outputs=outputs, + cmd=cmd) + # Write out rules file. + rules_file.WriteIfChanged() + + # Add rules file to project. + p.AddToolFile(rules_filename) + + +def _Cygwinify(path): + path = path.replace('$(OutDir)', '$(OutDirCygwin)') + path = path.replace('$(IntDir)', '$(IntDirCygwin)') + return path + + +def _GenerateExternalRules(rules, output_dir, spec, + sources, options, actions_to_add): + """Generate an external makefile to do a set of rules. + + Arguments: + rules: the list of rules to include + output_dir: path containing project and gyp files + spec: project specification data + sources: set of sources known + options: global generator options + actions_to_add: The list of actions we will add to. + """ + filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix) + mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename)) + # Find cygwin style versions of some paths. + mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n') + mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n') + # Gather stuff needed to emit all: target. + all_inputs = OrderedSet() + all_outputs = OrderedSet() + all_output_dirs = OrderedSet() + first_outputs = [] + for rule in rules: + trigger_files = _FindRuleTriggerFiles(rule, sources) + for tf in trigger_files: + inputs, outputs = _RuleInputsAndOutputs(rule, tf) + all_inputs.update(OrderedSet(inputs)) + all_outputs.update(OrderedSet(outputs)) + # Only use one target from each rule as the dependency for + # 'all' so we don't try to build each rule multiple times. + first_outputs.append(list(outputs)[0]) + # Get the unique output directories for this rule. + output_dirs = [os.path.split(i)[0] for i in outputs] + for od in output_dirs: + all_output_dirs.add(od) + first_outputs_cyg = [_Cygwinify(i) for i in first_outputs] + # Write out all: target, including mkdir for each output directory. + mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg)) + for od in all_output_dirs: + if od: + mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od) + mk_file.write('\n') + # Define how each output is generated. + for rule in rules: + trigger_files = _FindRuleTriggerFiles(rule, sources) + for tf in trigger_files: + # Get all the inputs and outputs for this rule for this trigger file. + inputs, outputs = _RuleInputsAndOutputs(rule, tf) + inputs = [_Cygwinify(i) for i in inputs] + outputs = [_Cygwinify(i) for i in outputs] + # Prepare the command line for this rule. + cmd = [_RuleExpandPath(c, tf) for c in rule['action']] + cmd = ['"%s"' % i for i in cmd] + cmd = ' '.join(cmd) + # Add it to the makefile. + mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs))) + mk_file.write('\t%s\n\n' % cmd) + # Close up the file. + mk_file.close() + + # Add makefile to list of sources. + sources.add(filename) + # Add a build action to call makefile. + cmd = ['make', + 'OutDir=$(OutDir)', + 'IntDir=$(IntDir)', + '-j', '${NUMBER_OF_PROCESSORS_PLUS_1}', + '-f', filename] + cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True) + # Insert makefile as 0'th input, so it gets the action attached there, + # as this is easier to understand from in the IDE. + all_inputs = list(all_inputs) + all_inputs.insert(0, filename) + _AddActionStep(actions_to_add, + inputs=_FixPaths(all_inputs), + outputs=_FixPaths(all_outputs), + description='Running external rules for %s' % + spec['target_name'], + command=cmd) + + +def _EscapeEnvironmentVariableExpansion(s): + """Escapes % characters. + + Escapes any % characters so that Windows-style environment variable + expansions will leave them alone. + See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile + to understand why we have to do this. + + Args: + s: The string to be escaped. + + Returns: + The escaped string. + """ + s = s.replace('%', '%%') + return s + + +quote_replacer_regex = re.compile(r'(\\*)"') + + +def _EscapeCommandLineArgumentForMSVS(s): + """Escapes a Windows command-line argument. + + So that the Win32 CommandLineToArgv function will turn the escaped result back + into the original string. + See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx + ("Parsing C++ Command-Line Arguments") to understand why we have to do + this. + + Args: + s: the string to be escaped. + Returns: + the escaped string. + """ + + def _Replace(match): + # For a literal quote, CommandLineToArgv requires an odd number of + # backslashes preceding it, and it produces half as many literal backslashes + # (rounded down). So we need to produce 2n+1 backslashes. + return 2 * match.group(1) + '\\"' + + # Escape all quotes so that they are interpreted literally. + s = quote_replacer_regex.sub(_Replace, s) + # Now add unescaped quotes so that any whitespace is interpreted literally. + s = '"' + s + '"' + return s + + +delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)') + + +def _EscapeVCProjCommandLineArgListItem(s): + """Escapes command line arguments for MSVS. + + The VCProj format stores string lists in a single string using commas and + semi-colons as separators, which must be quoted if they are to be + interpreted literally. However, command-line arguments may already have + quotes, and the VCProj parser is ignorant of the backslash escaping + convention used by CommandLineToArgv, so the command-line quotes and the + VCProj quotes may not be the same quotes. So to store a general + command-line argument in a VCProj list, we need to parse the existing + quoting according to VCProj's convention and quote any delimiters that are + not already quoted by that convention. The quotes that we add will also be + seen by CommandLineToArgv, so if backslashes precede them then we also have + to escape those backslashes according to the CommandLineToArgv + convention. + + Args: + s: the string to be escaped. + Returns: + the escaped string. + """ + + def _Replace(match): + # For a non-literal quote, CommandLineToArgv requires an even number of + # backslashes preceding it, and it produces half as many literal + # backslashes. So we need to produce 2n backslashes. + return 2 * match.group(1) + '"' + match.group(2) + '"' + + segments = s.split('"') + # The unquoted segments are at the even-numbered indices. + for i in range(0, len(segments), 2): + segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i]) + # Concatenate back into a single string + s = '"'.join(segments) + if len(segments) % 2 == 0: + # String ends while still quoted according to VCProj's convention. This + # means the delimiter and the next list item that follow this one in the + # .vcproj file will be misinterpreted as part of this item. There is nothing + # we can do about this. Adding an extra quote would correct the problem in + # the VCProj but cause the same problem on the final command-line. Moving + # the item to the end of the list does works, but that's only possible if + # there's only one such item. Let's just warn the user. + print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' + + 'quotes in ' + s) + return s + + +def _EscapeCppDefineForMSVS(s): + """Escapes a CPP define so that it will reach the compiler unaltered.""" + s = _EscapeEnvironmentVariableExpansion(s) + s = _EscapeCommandLineArgumentForMSVS(s) + s = _EscapeVCProjCommandLineArgListItem(s) + # cl.exe replaces literal # characters with = in preprocesor definitions for + # some reason. Octal-encode to work around that. + s = s.replace('#', '\\%03o' % ord('#')) + return s + + +quote_replacer_regex2 = re.compile(r'(\\+)"') + + +def _EscapeCommandLineArgumentForMSBuild(s): + """Escapes a Windows command-line argument for use by MSBuild.""" + + def _Replace(match): + return (len(match.group(1)) / 2 * 4) * '\\' + '\\"' + + # Escape all quotes so that they are interpreted literally. + s = quote_replacer_regex2.sub(_Replace, s) + return s + + +def _EscapeMSBuildSpecialCharacters(s): + escape_dictionary = { + '%': '%25', + '$': '%24', + '@': '%40', + "'": '%27', + ';': '%3B', + '?': '%3F', + '*': '%2A' + } + result = ''.join([escape_dictionary.get(c, c) for c in s]) + return result + + +def _EscapeCppDefineForMSBuild(s): + """Escapes a CPP define so that it will reach the compiler unaltered.""" + s = _EscapeEnvironmentVariableExpansion(s) + s = _EscapeCommandLineArgumentForMSBuild(s) + s = _EscapeMSBuildSpecialCharacters(s) + # cl.exe replaces literal # characters with = in preprocesor definitions for + # some reason. Octal-encode to work around that. + s = s.replace('#', '\\%03o' % ord('#')) + return s + + +def _GenerateRulesForMSVS(p, output_dir, options, spec, + sources, excluded_sources, + actions_to_add): + """Generate all the rules for a particular project. + + Arguments: + p: the project + output_dir: directory to emit rules to + options: global options passed to the generator + spec: the specification for this project + sources: the set of all known source files in this project + excluded_sources: the set of sources excluded from normal processing + actions_to_add: deferred list of actions to add in + """ + rules = spec.get('rules', []) + rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))] + rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))] + + # Handle rules that use a native rules file. + if rules_native: + _GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options) + + # Handle external rules (non-native rules). + if rules_external: + _GenerateExternalRules(rules_external, output_dir, spec, + sources, options, actions_to_add) + _AdjustSourcesForRules(rules, sources, excluded_sources, False) + + +def _AdjustSourcesForRules(rules, sources, excluded_sources, is_msbuild): + # Add outputs generated by each rule (if applicable). + for rule in rules: + # Add in the outputs from this rule. + trigger_files = _FindRuleTriggerFiles(rule, sources) + for trigger_file in trigger_files: + # Remove trigger_file from excluded_sources to let the rule be triggered + # (e.g. rule trigger ax_enums.idl is added to excluded_sources + # because it's also in an action's inputs in the same project) + excluded_sources.discard(_FixPath(trigger_file)) + # Done if not processing outputs as sources. + if int(rule.get('process_outputs_as_sources', False)): + inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file) + inputs = OrderedSet(_FixPaths(inputs)) + outputs = OrderedSet(_FixPaths(outputs)) + inputs.remove(_FixPath(trigger_file)) + sources.update(inputs) + if not is_msbuild: + excluded_sources.update(inputs) + sources.update(outputs) + + +def _FilterActionsFromExcluded(excluded_sources, actions_to_add): + """Take inputs with actions attached out of the list of exclusions. + + Arguments: + excluded_sources: list of source files not to be built. + actions_to_add: dict of actions keyed on source file they're attached to. + Returns: + excluded_sources with files that have actions attached removed. + """ + must_keep = OrderedSet(_FixPaths(actions_to_add.keys())) + return [s for s in excluded_sources if s not in must_keep] + + +def _GetDefaultConfiguration(spec): + return spec['configurations'][spec['default_configuration']] + + +def _GetGuidOfProject(proj_path, spec): + """Get the guid for the project. + + Arguments: + proj_path: Path of the vcproj or vcxproj file to generate. + spec: The target dictionary containing the properties of the target. + Returns: + the guid. + Raises: + ValueError: if the specified GUID is invalid. + """ + # Pluck out the default configuration. + default_config = _GetDefaultConfiguration(spec) + # Decide the guid of the project. + guid = default_config.get('msvs_guid') + if guid: + if VALID_MSVS_GUID_CHARS.match(guid) is None: + raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' % + (guid, VALID_MSVS_GUID_CHARS.pattern)) + guid = '{%s}' % guid + guid = guid or MSVSNew.MakeGuid(proj_path) + return guid + + +def _GetMsbuildToolsetOfProject(proj_path, spec, version): + """Get the platform toolset for the project. + + Arguments: + proj_path: Path of the vcproj or vcxproj file to generate. + spec: The target dictionary containing the properties of the target. + version: The MSVSVersion object. + Returns: + the platform toolset string or None. + """ + # Pluck out the default configuration. + default_config = _GetDefaultConfiguration(spec) + toolset = default_config.get('msbuild_toolset') + if not toolset and version.DefaultToolset(): + toolset = version.DefaultToolset() + if spec['type'] == 'windows_driver': + toolset = 'WindowsKernelModeDriver10.0' + return toolset + + +def _GenerateProject(project, options, version, generator_flags): + """Generates a vcproj file. + + Arguments: + project: the MSVSProject object. + options: global generator options. + version: the MSVSVersion object. + generator_flags: dict of generator-specific flags. + Returns: + A list of source files that cannot be found on disk. + """ + default_config = _GetDefaultConfiguration(project.spec) + + # Skip emitting anything if told to with msvs_existing_vcproj option. + if default_config.get('msvs_existing_vcproj'): + return [] + + if version.UsesVcxproj(): + return _GenerateMSBuildProject(project, options, version, generator_flags) + else: + return _GenerateMSVSProject(project, options, version, generator_flags) + + +# TODO: Avoid code duplication with _ValidateSourcesForOSX in make.py. +def _ValidateSourcesForMSVSProject(spec, version): + """Makes sure if duplicate basenames are not specified in the source list. + + Arguments: + spec: The target dictionary containing the properties of the target. + version: The VisualStudioVersion object. + """ + # This validation should not be applied to MSVC2010 and later. + assert not version.UsesVcxproj() + + # TODO: Check if MSVC allows this for loadable_module targets. + if spec.get('type', None) not in ('static_library', 'shared_library'): + return + sources = spec.get('sources', []) + basenames = {} + for source in sources: + name, ext = os.path.splitext(source) + is_compiled_file = ext in [ + '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S'] + if not is_compiled_file: + continue + basename = os.path.basename(name) # Don't include extension. + basenames.setdefault(basename, []).append(source) + + error = '' + for basename, files in basenames.iteritems(): + if len(files) > 1: + error += ' %s: %s\n' % (basename, ' '.join(files)) + + if error: + print('static library %s has several files with the same basename:\n' % + spec['target_name'] + error + 'MSVC08 cannot handle that.') + raise GypError('Duplicate basenames in sources section, see list above') + + +def _GenerateMSVSProject(project, options, version, generator_flags): + """Generates a .vcproj file. It may create .rules and .user files too. + + Arguments: + project: The project object we will generate the file for. + options: Global options passed to the generator. + version: The VisualStudioVersion object. + generator_flags: dict of generator-specific flags. + """ + spec = project.spec + gyp.common.EnsureDirExists(project.path) + + platforms = _GetUniquePlatforms(spec) + p = MSVSProject.Writer(project.path, version, spec['target_name'], + project.guid, platforms) + + # Get directory project file is in. + project_dir = os.path.split(project.path)[0] + gyp_path = _NormalizedSource(project.build_file) + relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir) + + config_type = _GetMSVSConfigurationType(spec, project.build_file) + for config_name, config in spec['configurations'].iteritems(): + _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config) + + # MSVC08 and prior version cannot handle duplicate basenames in the same + # target. + # TODO: Take excluded sources into consideration if possible. + _ValidateSourcesForMSVSProject(spec, version) + + # Prepare list of sources and excluded sources. + gyp_file = os.path.split(project.build_file)[1] + sources, excluded_sources = _PrepareListOfSources(spec, generator_flags, + gyp_file) + + # Add rules. + actions_to_add = {} + _GenerateRulesForMSVS(p, project_dir, options, spec, + sources, excluded_sources, + actions_to_add) + list_excluded = generator_flags.get('msvs_list_excluded_files', True) + sources, excluded_sources, excluded_idl = ( + _AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir, + sources, excluded_sources, + list_excluded, version)) + + # Add in files. + missing_sources = _VerifySourcesExist(sources, project_dir) + p.AddFiles(sources) + + _AddToolFilesToMSVS(p, spec) + _HandlePreCompiledHeaders(p, sources, spec) + _AddActions(actions_to_add, spec, relative_path_of_gyp_file) + _AddCopies(actions_to_add, spec) + _WriteMSVSUserFile(project.path, version, spec) + + # NOTE: this stanza must appear after all actions have been decided. + # Don't excluded sources with actions attached, or they won't run. + excluded_sources = _FilterActionsFromExcluded( + excluded_sources, actions_to_add) + _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl, + list_excluded) + _AddAccumulatedActionsToMSVS(p, spec, actions_to_add) + + # Write it out. + p.WriteIfChanged() + + return missing_sources + + +def _GetUniquePlatforms(spec): + """Returns the list of unique platforms for this spec, e.g ['win32', ...]. + + Arguments: + spec: The target dictionary containing the properties of the target. + Returns: + The MSVSUserFile object created. + """ + # Gather list of unique platforms. + platforms = OrderedSet() + for configuration in spec['configurations']: + platforms.add(_ConfigPlatform(spec['configurations'][configuration])) + platforms = list(platforms) + return platforms + + +def _CreateMSVSUserFile(proj_path, version, spec): + """Generates a .user file for the user running this Gyp program. + + Arguments: + proj_path: The path of the project file being created. The .user file + shares the same path (with an appropriate suffix). + version: The VisualStudioVersion object. + spec: The target dictionary containing the properties of the target. + Returns: + The MSVSUserFile object created. + """ + (domain, username) = _GetDomainAndUserName() + vcuser_filename = '.'.join([proj_path, domain, username, 'user']) + user_file = MSVSUserFile.Writer(vcuser_filename, version, + spec['target_name']) + return user_file + + +def _GetMSVSConfigurationType(spec, build_file): + """Returns the configuration type for this project. + + It's a number defined by Microsoft. May raise an exception. + + Args: + spec: The target dictionary containing the properties of the target. + build_file: The path of the gyp file. + Returns: + An integer, the configuration type. + """ + try: + config_type = { + 'executable': '1', # .exe + 'shared_library': '2', # .dll + 'loadable_module': '2', # .dll + 'static_library': '4', # .lib + 'windows_driver': '5', # .sys + 'none': '10', # Utility type + }[spec['type']] + except KeyError: + if spec.get('type'): + raise GypError('Target type %s is not a valid target type for ' + 'target %s in %s.' % + (spec['type'], spec['target_name'], build_file)) + else: + raise GypError('Missing type field for target %s in %s.' % + (spec['target_name'], build_file)) + return config_type + + +def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config): + """Adds a configuration to the MSVS project. + + Many settings in a vcproj file are specific to a configuration. This + function the main part of the vcproj file that's configuration specific. + + Arguments: + p: The target project being generated. + spec: The target dictionary containing the properties of the target. + config_type: The configuration type, a number as defined by Microsoft. + config_name: The name of the configuration. + config: The dictionary that defines the special processing to be done + for this configuration. + """ + # Get the information for this configuration + include_dirs, midl_include_dirs, resource_include_dirs = \ + _GetIncludeDirs(config) + libraries = _GetLibraries(spec) + library_dirs = _GetLibraryDirs(config) + out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False) + defines = _GetDefines(config) + defines = [_EscapeCppDefineForMSVS(d) for d in defines] + disabled_warnings = _GetDisabledWarnings(config) + prebuild = config.get('msvs_prebuild') + postbuild = config.get('msvs_postbuild') + def_file = _GetModuleDefinition(spec) + precompiled_header = config.get('msvs_precompiled_header') + + # Prepare the list of tools as a dictionary. + tools = dict() + # Add in user specified msvs_settings. + msvs_settings = config.get('msvs_settings', {}) + MSVSSettings.ValidateMSVSSettings(msvs_settings) + + # Prevent default library inheritance from the environment. + _ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)']) + + for tool in msvs_settings: + settings = config['msvs_settings'][tool] + for setting in settings: + _ToolAppend(tools, tool, setting, settings[setting]) + # Add the information to the appropriate tool + _ToolAppend(tools, 'VCCLCompilerTool', + 'AdditionalIncludeDirectories', include_dirs) + _ToolAppend(tools, 'VCMIDLTool', + 'AdditionalIncludeDirectories', midl_include_dirs) + _ToolAppend(tools, 'VCResourceCompilerTool', + 'AdditionalIncludeDirectories', resource_include_dirs) + # Add in libraries. + _ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries) + _ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories', + library_dirs) + if out_file: + _ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True) + # Add defines. + _ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines) + _ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions', + defines) + # Change program database directory to prevent collisions. + _ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName', + '$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True) + # Add disabled warnings. + _ToolAppend(tools, 'VCCLCompilerTool', + 'DisableSpecificWarnings', disabled_warnings) + # Add Pre-build. + _ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild) + # Add Post-build. + _ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild) + # Turn on precompiled headers if appropriate. + if precompiled_header: + precompiled_header = os.path.split(precompiled_header)[1] + _ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2') + _ToolAppend(tools, 'VCCLCompilerTool', + 'PrecompiledHeaderThrough', precompiled_header) + _ToolAppend(tools, 'VCCLCompilerTool', + 'ForcedIncludeFiles', precompiled_header) + # Loadable modules don't generate import libraries; + # tell dependent projects to not expect one. + if spec['type'] == 'loadable_module': + _ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true') + # Set the module definition file if any. + if def_file: + _ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file) + + _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name) + + +def _GetIncludeDirs(config): + """Returns the list of directories to be used for #include directives. + + Arguments: + config: The dictionary that defines the special processing to be done + for this configuration. + Returns: + The list of directory paths. + """ + # TODO(bradnelson): include_dirs should really be flexible enough not to + # require this sort of thing. + include_dirs = ( + config.get('include_dirs', []) + + config.get('msvs_system_include_dirs', [])) + midl_include_dirs = ( + config.get('midl_include_dirs', []) + + config.get('msvs_system_include_dirs', [])) + resource_include_dirs = config.get('resource_include_dirs', include_dirs) + include_dirs = _FixPaths(include_dirs) + midl_include_dirs = _FixPaths(midl_include_dirs) + resource_include_dirs = _FixPaths(resource_include_dirs) + return include_dirs, midl_include_dirs, resource_include_dirs + + +def _GetLibraryDirs(config): + """Returns the list of directories to be used for library search paths. + + Arguments: + config: The dictionary that defines the special processing to be done + for this configuration. + Returns: + The list of directory paths. + """ + + library_dirs = config.get('library_dirs', []) + library_dirs = _FixPaths(library_dirs) + return library_dirs + + +def _GetLibraries(spec): + """Returns the list of libraries for this configuration. + + Arguments: + spec: The target dictionary containing the properties of the target. + Returns: + The list of directory paths. + """ + libraries = spec.get('libraries', []) + # Strip out -l, as it is not used on windows (but is needed so we can pass + # in libraries that are assumed to be in the default library path). + # Also remove duplicate entries, leaving only the last duplicate, while + # preserving order. + found = OrderedSet() + unique_libraries_list = [] + for entry in reversed(libraries): + library = re.sub(r'^\-l', '', entry) + if not os.path.splitext(library)[1]: + library += '.lib' + if library not in found: + found.add(library) + unique_libraries_list.append(library) + unique_libraries_list.reverse() + return unique_libraries_list + + +def _GetOutputFilePathAndTool(spec, msbuild): + """Returns the path and tool to use for this target. + + Figures out the path of the file this spec will create and the name of + the VC tool that will create it. + + Arguments: + spec: The target dictionary containing the properties of the target. + Returns: + A triple of (file path, name of the vc tool, name of the msbuild tool) + """ + # Select a name for the output file. + out_file = '' + vc_tool = '' + msbuild_tool = '' + output_file_map = { + 'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'), + 'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'), + 'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'), + 'windows_driver': ('VCLinkerTool', 'Link', '$(OutDir)', '.sys'), + 'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'), + } + output_file_props = output_file_map.get(spec['type']) + if output_file_props and int(spec.get('msvs_auto_output_file', 1)): + vc_tool, msbuild_tool, out_dir, suffix = output_file_props + if spec.get('standalone_static_library', 0): + out_dir = '$(OutDir)' + out_dir = spec.get('product_dir', out_dir) + product_extension = spec.get('product_extension') + if product_extension: + suffix = '.' + product_extension + elif msbuild: + suffix = '$(TargetExt)' + prefix = spec.get('product_prefix', '') + product_name = spec.get('product_name', '$(ProjectName)') + out_file = ntpath.join(out_dir, prefix + product_name + suffix) + return out_file, vc_tool, msbuild_tool + + +def _GetOutputTargetExt(spec): + """Returns the extension for this target, including the dot + + If product_extension is specified, set target_extension to this to avoid + MSB8012, returns None otherwise. Ignores any target_extension settings in + the input files. + + Arguments: + spec: The target dictionary containing the properties of the target. + Returns: + A string with the extension, or None + """ + target_extension = spec.get('product_extension') + if target_extension: + return '.' + target_extension + return None + + +def _GetDefines(config): + """Returns the list of preprocessor definitions for this configuation. + + Arguments: + config: The dictionary that defines the special processing to be done + for this configuration. + Returns: + The list of preprocessor definitions. + """ + defines = [] + for d in config.get('defines', []): + if type(d) == list: + fd = '='.join([str(dpart) for dpart in d]) + else: + fd = str(d) + defines.append(fd) + return defines + + +def _GetDisabledWarnings(config): + return [str(i) for i in config.get('msvs_disabled_warnings', [])] + + +def _GetModuleDefinition(spec): + def_file = '' + if spec['type'] in ['shared_library', 'loadable_module', 'executable', + 'windows_driver']: + def_files = [s for s in spec.get('sources', []) if s.endswith('.def')] + if len(def_files) == 1: + def_file = _FixPath(def_files[0]) + elif def_files: + raise ValueError( + 'Multiple module definition files in one target, target %s lists ' + 'multiple .def files: %s' % ( + spec['target_name'], ' '.join(def_files))) + return def_file + + +def _ConvertToolsToExpectedForm(tools): + """Convert tools to a form expected by Visual Studio. + + Arguments: + tools: A dictionary of settings; the tool name is the key. + Returns: + A list of Tool objects. + """ + tool_list = [] + for tool, settings in tools.iteritems(): + # Collapse settings with lists. + settings_fixed = {} + for setting, value in settings.iteritems(): + if type(value) == list: + if ((tool == 'VCLinkerTool' and + setting == 'AdditionalDependencies') or + setting == 'AdditionalOptions'): + settings_fixed[setting] = ' '.join(value) + else: + settings_fixed[setting] = ';'.join(value) + else: + settings_fixed[setting] = value + # Add in this tool. + tool_list.append(MSVSProject.Tool(tool, settings_fixed)) + return tool_list + + +def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name): + """Add to the project file the configuration specified by config. + + Arguments: + p: The target project being generated. + spec: the target project dict. + tools: A dictionary of settings; the tool name is the key. + config: The dictionary that defines the special processing to be done + for this configuration. + config_type: The configuration type, a number as defined by Microsoft. + config_name: The name of the configuration. + """ + attributes = _GetMSVSAttributes(spec, config, config_type) + # Add in this configuration. + tool_list = _ConvertToolsToExpectedForm(tools) + p.AddConfig(_ConfigFullName(config_name, config), + attrs=attributes, tools=tool_list) + + +def _GetMSVSAttributes(spec, config, config_type): + # Prepare configuration attributes. + prepared_attrs = {} + source_attrs = config.get('msvs_configuration_attributes', {}) + for a in source_attrs: + prepared_attrs[a] = source_attrs[a] + # Add props files. + vsprops_dirs = config.get('msvs_props', []) + vsprops_dirs = _FixPaths(vsprops_dirs) + if vsprops_dirs: + prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs) + # Set configuration type. + prepared_attrs['ConfigurationType'] = config_type + output_dir = prepared_attrs.get('OutputDirectory', + '$(SolutionDir)$(ConfigurationName)') + prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\' + if 'IntermediateDirectory' not in prepared_attrs: + intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)' + prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\' + else: + intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\' + intermediate = MSVSSettings.FixVCMacroSlashes(intermediate) + prepared_attrs['IntermediateDirectory'] = intermediate + return prepared_attrs + + +def _AddNormalizedSources(sources_set, sources_array): + sources_set.update(_NormalizedSource(s) for s in sources_array) + + +def _PrepareListOfSources(spec, generator_flags, gyp_file): + """Prepare list of sources and excluded sources. + + Besides the sources specified directly in the spec, adds the gyp file so + that a change to it will cause a re-compile. Also adds appropriate sources + for actions and copies. Assumes later stage will un-exclude files which + have custom build steps attached. + + Arguments: + spec: The target dictionary containing the properties of the target. + gyp_file: The name of the gyp file. + Returns: + A pair of (list of sources, list of excluded sources). + The sources will be relative to the gyp file. + """ + sources = OrderedSet() + _AddNormalizedSources(sources, spec.get('sources', [])) + excluded_sources = OrderedSet() + # Add in the gyp file. + if not generator_flags.get('standalone'): + sources.add(gyp_file) + + # Add in 'action' inputs and outputs. + for a in spec.get('actions', []): + inputs = a['inputs'] + inputs = [_NormalizedSource(i) for i in inputs] + # Add all inputs to sources and excluded sources. + inputs = OrderedSet(inputs) + sources.update(inputs) + if not spec.get('msvs_external_builder'): + excluded_sources.update(inputs) + if int(a.get('process_outputs_as_sources', False)): + _AddNormalizedSources(sources, a.get('outputs', [])) + # Add in 'copies' inputs and outputs. + for cpy in spec.get('copies', []): + _AddNormalizedSources(sources, cpy.get('files', [])) + return (sources, excluded_sources) + + +def _AdjustSourcesAndConvertToFilterHierarchy( + spec, options, gyp_dir, sources, excluded_sources, list_excluded, version): + """Adjusts the list of sources and excluded sources. + + Also converts the sets to lists. + + Arguments: + spec: The target dictionary containing the properties of the target. + options: Global generator options. + gyp_dir: The path to the gyp file being processed. + sources: A set of sources to be included for this project. + excluded_sources: A set of sources to be excluded for this project. + version: A MSVSVersion object. + Returns: + A trio of (list of sources, list of excluded sources, + path of excluded IDL file) + """ + # Exclude excluded sources coming into the generator. + excluded_sources.update(OrderedSet(spec.get('sources_excluded', []))) + # Add excluded sources into sources for good measure. + sources.update(excluded_sources) + # Convert to proper windows form. + # NOTE: sources goes from being a set to a list here. + # NOTE: excluded_sources goes from being a set to a list here. + sources = _FixPaths(sources) + # Convert to proper windows form. + excluded_sources = _FixPaths(excluded_sources) + + excluded_idl = _IdlFilesHandledNonNatively(spec, sources) + + precompiled_related = _GetPrecompileRelatedFiles(spec) + # Find the excluded ones, minus the precompiled header related ones. + fully_excluded = [i for i in excluded_sources if i not in precompiled_related] + + # Convert to folders and the right slashes. + sources = [i.split('\\') for i in sources] + sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded, + list_excluded=list_excluded, + msvs_version=version) + + # Prune filters with a single child to flatten ugly directory structures + # such as ../../src/modules/module1 etc. + if version.UsesVcxproj(): + while all([isinstance(s, MSVSProject.Filter) for s in sources]) \ + and len(set([s.name for s in sources])) == 1: + assert all([len(s.contents) == 1 for s in sources]) + sources = [s.contents[0] for s in sources] + else: + while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter): + sources = sources[0].contents + + return sources, excluded_sources, excluded_idl + + +def _IdlFilesHandledNonNatively(spec, sources): + # If any non-native rules use 'idl' as an extension exclude idl files. + # Gather a list here to use later. + using_idl = False + for rule in spec.get('rules', []): + if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)): + using_idl = True + break + if using_idl: + excluded_idl = [i for i in sources if i.endswith('.idl')] + else: + excluded_idl = [] + return excluded_idl + + +def _GetPrecompileRelatedFiles(spec): + # Gather a list of precompiled header related sources. + precompiled_related = [] + for _, config in spec['configurations'].iteritems(): + for k in precomp_keys: + f = config.get(k) + if f: + precompiled_related.append(_FixPath(f)) + return precompiled_related + + +def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl, + list_excluded): + exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl) + for file_name, excluded_configs in exclusions.iteritems(): + if (not list_excluded and + len(excluded_configs) == len(spec['configurations'])): + # If we're not listing excluded files, then they won't appear in the + # project, so don't try to configure them to be excluded. + pass + else: + for config_name, config in excluded_configs: + p.AddFileConfig(file_name, _ConfigFullName(config_name, config), + {'ExcludedFromBuild': 'true'}) + + +def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl): + exclusions = {} + # Exclude excluded sources from being built. + for f in excluded_sources: + excluded_configs = [] + for config_name, config in spec['configurations'].iteritems(): + precomped = [_FixPath(config.get(i, '')) for i in precomp_keys] + # Don't do this for ones that are precompiled header related. + if f not in precomped: + excluded_configs.append((config_name, config)) + exclusions[f] = excluded_configs + # If any non-native rules use 'idl' as an extension exclude idl files. + # Exclude them now. + for f in excluded_idl: + excluded_configs = [] + for config_name, config in spec['configurations'].iteritems(): + excluded_configs.append((config_name, config)) + exclusions[f] = excluded_configs + return exclusions + + +def _AddToolFilesToMSVS(p, spec): + # Add in tool files (rules). + tool_files = OrderedSet() + for _, config in spec['configurations'].iteritems(): + for f in config.get('msvs_tool_files', []): + tool_files.add(f) + for f in tool_files: + p.AddToolFile(f) + + +def _HandlePreCompiledHeaders(p, sources, spec): + # Pre-compiled header source stubs need a different compiler flag + # (generate precompiled header) and any source file not of the same + # kind (i.e. C vs. C++) as the precompiled header source stub needs + # to have use of precompiled headers disabled. + extensions_excluded_from_precompile = [] + for config_name, config in spec['configurations'].iteritems(): + source = config.get('msvs_precompiled_source') + if source: + source = _FixPath(source) + # UsePrecompiledHeader=1 for if using precompiled headers. + tool = MSVSProject.Tool('VCCLCompilerTool', + {'UsePrecompiledHeader': '1'}) + p.AddFileConfig(source, _ConfigFullName(config_name, config), + {}, tools=[tool]) + basename, extension = os.path.splitext(source) + if extension == '.c': + extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx'] + else: + extensions_excluded_from_precompile = ['.c'] + def DisableForSourceTree(source_tree): + for source in source_tree: + if isinstance(source, MSVSProject.Filter): + DisableForSourceTree(source.contents) + else: + basename, extension = os.path.splitext(source) + if extension in extensions_excluded_from_precompile: + for config_name, config in spec['configurations'].iteritems(): + tool = MSVSProject.Tool('VCCLCompilerTool', + {'UsePrecompiledHeader': '0', + 'ForcedIncludeFiles': '$(NOINHERIT)'}) + p.AddFileConfig(_FixPath(source), + _ConfigFullName(config_name, config), + {}, tools=[tool]) + # Do nothing if there was no precompiled source. + if extensions_excluded_from_precompile: + DisableForSourceTree(sources) + + +def _AddActions(actions_to_add, spec, relative_path_of_gyp_file): + # Add actions. + actions = spec.get('actions', []) + # Don't setup_env every time. When all the actions are run together in one + # batch file in VS, the PATH will grow too long. + # Membership in this set means that the cygwin environment has been set up, + # and does not need to be set up again. + have_setup_env = set() + for a in actions: + # Attach actions to the gyp file if nothing else is there. + inputs = a.get('inputs') or [relative_path_of_gyp_file] + attached_to = inputs[0] + need_setup_env = attached_to not in have_setup_env + cmd = _BuildCommandLineForRule(spec, a, has_input_path=False, + do_setup_env=need_setup_env) + have_setup_env.add(attached_to) + # Add the action. + _AddActionStep(actions_to_add, + inputs=inputs, + outputs=a.get('outputs', []), + description=a.get('message', a['action_name']), + command=cmd) + + +def _WriteMSVSUserFile(project_path, version, spec): + # Add run_as and test targets. + if 'run_as' in spec: + run_as = spec['run_as'] + action = run_as.get('action', []) + environment = run_as.get('environment', []) + working_directory = run_as.get('working_directory', '.') + elif int(spec.get('test', 0)): + action = ['$(TargetPath)', '--gtest_print_time'] + environment = [] + working_directory = '.' + else: + return # Nothing to add + # Write out the user file. + user_file = _CreateMSVSUserFile(project_path, version, spec) + for config_name, c_data in spec['configurations'].iteritems(): + user_file.AddDebugSettings(_ConfigFullName(config_name, c_data), + action, environment, working_directory) + user_file.WriteIfChanged() + + +def _AddCopies(actions_to_add, spec): + copies = _GetCopies(spec) + for inputs, outputs, cmd, description in copies: + _AddActionStep(actions_to_add, inputs=inputs, outputs=outputs, + description=description, command=cmd) + + +def _GetCopies(spec): + copies = [] + # Add copies. + for cpy in spec.get('copies', []): + for src in cpy.get('files', []): + dst = os.path.join(cpy['destination'], os.path.basename(src)) + # _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and + # outputs, so do the same for our generated command line. + if src.endswith('/'): + src_bare = src[:-1] + base_dir = posixpath.split(src_bare)[0] + outer_dir = posixpath.split(src_bare)[1] + cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % ( + _FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir) + copies.append(([src], ['dummy_copies', dst], cmd, + 'Copying %s to %s' % (src, dst))) + else: + cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % ( + _FixPath(cpy['destination']), _FixPath(src), _FixPath(dst)) + copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst))) + return copies + + +def _GetPathDict(root, path): + # |path| will eventually be empty (in the recursive calls) if it was initially + # relative; otherwise it will eventually end up as '\', 'D:\', etc. + if not path or path.endswith(os.sep): + return root + parent, folder = os.path.split(path) + parent_dict = _GetPathDict(root, parent) + if folder not in parent_dict: + parent_dict[folder] = dict() + return parent_dict[folder] + + +def _DictsToFolders(base_path, bucket, flat): + # Convert to folders recursively. + children = [] + for folder, contents in bucket.iteritems(): + if type(contents) == dict: + folder_children = _DictsToFolders(os.path.join(base_path, folder), + contents, flat) + if flat: + children += folder_children + else: + folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder), + name='(' + folder + ')', + entries=folder_children) + children.append(folder_children) + else: + children.append(contents) + return children + + +def _CollapseSingles(parent, node): + # Recursively explorer the tree of dicts looking for projects which are + # the sole item in a folder which has the same name as the project. Bring + # such projects up one level. + if (type(node) == dict and + len(node) == 1 and + node.keys()[0] == parent + '.vcproj'): + return node[node.keys()[0]] + if type(node) != dict: + return node + for child in node: + node[child] = _CollapseSingles(child, node[child]) + return node + + +def _GatherSolutionFolders(sln_projects, project_objects, flat): + root = {} + # Convert into a tree of dicts on path. + for p in sln_projects: + gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2] + gyp_dir = os.path.dirname(gyp_file) + path_dict = _GetPathDict(root, gyp_dir) + path_dict[target + '.vcproj'] = project_objects[p] + # Walk down from the top until we hit a folder that has more than one entry. + # In practice, this strips the top-level "src/" dir from the hierarchy in + # the solution. + while len(root) == 1 and type(root[root.keys()[0]]) == dict: + root = root[root.keys()[0]] + # Collapse singles. + root = _CollapseSingles('', root) + # Merge buckets until everything is a root entry. + return _DictsToFolders('', root, flat) + + +def _GetPathOfProject(qualified_target, spec, options, msvs_version): + default_config = _GetDefaultConfiguration(spec) + proj_filename = default_config.get('msvs_existing_vcproj') + if not proj_filename: + proj_filename = (spec['target_name'] + options.suffix + + msvs_version.ProjectExtension()) + + build_file = gyp.common.BuildFile(qualified_target) + proj_path = os.path.join(os.path.dirname(build_file), proj_filename) + fix_prefix = None + if options.generator_output: + project_dir_path = os.path.dirname(os.path.abspath(proj_path)) + proj_path = os.path.join(options.generator_output, proj_path) + fix_prefix = gyp.common.RelativePath(project_dir_path, + os.path.dirname(proj_path)) + return proj_path, fix_prefix + + +def _GetPlatformOverridesOfProject(spec): + # Prepare a dict indicating which project configurations are used for which + # solution configurations for this target. + config_platform_overrides = {} + for config_name, c in spec['configurations'].iteritems(): + config_fullname = _ConfigFullName(config_name, c) + platform = c.get('msvs_target_platform', _ConfigPlatform(c)) + fixed_config_fullname = '%s|%s' % ( + _ConfigBaseName(config_name, _ConfigPlatform(c)), platform) + config_platform_overrides[config_fullname] = fixed_config_fullname + return config_platform_overrides + + +def _CreateProjectObjects(target_list, target_dicts, options, msvs_version): + """Create a MSVSProject object for the targets found in target list. + + Arguments: + target_list: the list of targets to generate project objects for. + target_dicts: the dictionary of specifications. + options: global generator options. + msvs_version: the MSVSVersion object. + Returns: + A set of created projects, keyed by target. + """ + global fixpath_prefix + # Generate each project. + projects = {} + for qualified_target in target_list: + spec = target_dicts[qualified_target] + if spec['toolset'] != 'target': + raise GypError( + 'Multiple toolsets not supported in msvs build (target %s)' % + qualified_target) + proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec, + options, msvs_version) + guid = _GetGuidOfProject(proj_path, spec) + overrides = _GetPlatformOverridesOfProject(spec) + build_file = gyp.common.BuildFile(qualified_target) + # Create object for this project. + obj = MSVSNew.MSVSProject( + proj_path, + name=spec['target_name'], + guid=guid, + spec=spec, + build_file=build_file, + config_platform_overrides=overrides, + fixpath_prefix=fixpath_prefix) + # Set project toolset if any (MS build only) + if msvs_version.UsesVcxproj(): + obj.set_msbuild_toolset( + _GetMsbuildToolsetOfProject(proj_path, spec, msvs_version)) + projects[qualified_target] = obj + # Set all the dependencies, but not if we are using an external builder like + # ninja + for project in projects.values(): + if not project.spec.get('msvs_external_builder'): + deps = project.spec.get('dependencies', []) + deps = [projects[d] for d in deps] + project.set_dependencies(deps) + return projects + + +def _InitNinjaFlavor(params, target_list, target_dicts): + """Initialize targets for the ninja flavor. + + This sets up the necessary variables in the targets to generate msvs projects + that use ninja as an external builder. The variables in the spec are only set + if they have not been set. This allows individual specs to override the + default values initialized here. + Arguments: + params: Params provided to the generator. + target_list: List of target pairs: 'base/base.gyp:base'. + target_dicts: Dict of target properties keyed on target pair. + """ + for qualified_target in target_list: + spec = target_dicts[qualified_target] + if spec.get('msvs_external_builder'): + # The spec explicitly defined an external builder, so don't change it. + continue + + path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe') + + spec['msvs_external_builder'] = 'ninja' + if not spec.get('msvs_external_builder_out_dir'): + gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target) + gyp_dir = os.path.dirname(gyp_file) + configuration = '$(Configuration)' + if params.get('target_arch') == 'x64': + configuration += '_x64' + spec['msvs_external_builder_out_dir'] = os.path.join( + gyp.common.RelativePath(params['options'].toplevel_dir, gyp_dir), + ninja_generator.ComputeOutputDir(params), + configuration) + if not spec.get('msvs_external_builder_build_cmd'): + spec['msvs_external_builder_build_cmd'] = [ + path_to_ninja, + '-C', + '$(OutDir)', + '$(ProjectName)', + ] + if not spec.get('msvs_external_builder_clean_cmd'): + spec['msvs_external_builder_clean_cmd'] = [ + path_to_ninja, + '-C', + '$(OutDir)', + '-tclean', + '$(ProjectName)', + ] + + +def CalculateVariables(default_variables, params): + """Generated variables that require params to be known.""" + + generator_flags = params.get('generator_flags', {}) + + # Select project file format version (if unset, default to auto detecting). + msvs_version = MSVSVersion.SelectVisualStudioVersion( + generator_flags.get('msvs_version', 'auto')) + # Stash msvs_version for later (so we don't have to probe the system twice). + params['msvs_version'] = msvs_version + + # Set a variable so conditions can be based on msvs_version. + default_variables['MSVS_VERSION'] = msvs_version.ShortName() + + # To determine processor word size on Windows, in addition to checking + # PROCESSOR_ARCHITECTURE (which reflects the word size of the current + # process), it is also necessary to check PROCESSOR_ARCITEW6432 (which + # contains the actual word size of the system when running thru WOW64). + if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or + os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0): + default_variables['MSVS_OS_BITS'] = 64 + else: + default_variables['MSVS_OS_BITS'] = 32 + + if gyp.common.GetFlavor(params) == 'ninja': + default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen' + + +def PerformBuild(data, configurations, params): + options = params['options'] + msvs_version = params['msvs_version'] + devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com') + + for build_file, build_file_dict in data.iteritems(): + (build_file_root, build_file_ext) = os.path.splitext(build_file) + if build_file_ext != '.gyp': + continue + sln_path = build_file_root + options.suffix + '.sln' + if options.generator_output: + sln_path = os.path.join(options.generator_output, sln_path) + + for config in configurations: + arguments = [devenv, sln_path, '/Build', config] + print 'Building [%s]: %s' % (config, arguments) + rtn = subprocess.check_call(arguments) + + +def CalculateGeneratorInputInfo(params): + if params.get('flavor') == 'ninja': + toplevel = params['options'].toplevel_dir + qualified_out_dir = os.path.normpath(os.path.join( + toplevel, ninja_generator.ComputeOutputDir(params), + 'gypfiles-msvs-ninja')) + + global generator_filelist_paths + generator_filelist_paths = { + 'toplevel': toplevel, + 'qualified_out_dir': qualified_out_dir, + } + +def GenerateOutput(target_list, target_dicts, data, params): + """Generate .sln and .vcproj files. + + This is the entry point for this generator. + Arguments: + target_list: List of target pairs: 'base/base.gyp:base'. + target_dicts: Dict of target properties keyed on target pair. + data: Dictionary containing per .gyp data. + """ + global fixpath_prefix + + options = params['options'] + + # Get the project file format version back out of where we stashed it in + # GeneratorCalculatedVariables. + msvs_version = params['msvs_version'] + + generator_flags = params.get('generator_flags', {}) + + # Optionally shard targets marked with 'msvs_shard': SHARD_COUNT. + (target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts) + + # Optionally use the large PDB workaround for targets marked with + # 'msvs_large_pdb': 1. + (target_list, target_dicts) = MSVSUtil.InsertLargePdbShims( + target_list, target_dicts, generator_default_variables) + + # Optionally configure each spec to use ninja as the external builder. + if params.get('flavor') == 'ninja': + _InitNinjaFlavor(params, target_list, target_dicts) + + # Prepare the set of configurations. + configs = set() + for qualified_target in target_list: + spec = target_dicts[qualified_target] + for config_name, config in spec['configurations'].iteritems(): + configs.add(_ConfigFullName(config_name, config)) + configs = list(configs) + + # Figure out all the projects that will be generated and their guids + project_objects = _CreateProjectObjects(target_list, target_dicts, options, + msvs_version) + + # Generate each project. + missing_sources = [] + for project in project_objects.values(): + fixpath_prefix = project.fixpath_prefix + missing_sources.extend(_GenerateProject(project, options, msvs_version, + generator_flags)) + fixpath_prefix = None + + for build_file in data: + # Validate build_file extension + if not build_file.endswith('.gyp'): + continue + sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln' + if options.generator_output: + sln_path = os.path.join(options.generator_output, sln_path) + # Get projects in the solution, and their dependents. + sln_projects = gyp.common.BuildFileTargets(target_list, build_file) + sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects) + # Create folder hierarchy. + root_entries = _GatherSolutionFolders( + sln_projects, project_objects, flat=msvs_version.FlatSolution()) + # Create solution. + sln = MSVSNew.MSVSSolution(sln_path, + entries=root_entries, + variants=configs, + websiteProperties=False, + version=msvs_version) + sln.Write() + + if missing_sources: + error_message = "Missing input files:\n" + \ + '\n'.join(set(missing_sources)) + if generator_flags.get('msvs_error_on_missing_sources', False): + raise GypError(error_message) + else: + print >> sys.stdout, "Warning: " + error_message + + +def _GenerateMSBuildFiltersFile(filters_path, source_files, + rule_dependencies, extension_to_rule_name): + """Generate the filters file. + + This file is used by Visual Studio to organize the presentation of source + files into folders. + + Arguments: + filters_path: The path of the file to be created. + source_files: The hierarchical structure of all the sources. + extension_to_rule_name: A dictionary mapping file extensions to rules. + """ + filter_group = [] + source_group = [] + _AppendFiltersForMSBuild('', source_files, rule_dependencies, + extension_to_rule_name, filter_group, source_group) + if filter_group: + content = ['Project', + {'ToolsVersion': '4.0', + 'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003' + }, + ['ItemGroup'] + filter_group, + ['ItemGroup'] + source_group + ] + easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True) + elif os.path.exists(filters_path): + # We don't need this filter anymore. Delete the old filter file. + os.unlink(filters_path) + + +def _AppendFiltersForMSBuild(parent_filter_name, sources, rule_dependencies, + extension_to_rule_name, + filter_group, source_group): + """Creates the list of filters and sources to be added in the filter file. + + Args: + parent_filter_name: The name of the filter under which the sources are + found. + sources: The hierarchy of filters and sources to process. + extension_to_rule_name: A dictionary mapping file extensions to rules. + filter_group: The list to which filter entries will be appended. + source_group: The list to which source entries will be appeneded. + """ + for source in sources: + if isinstance(source, MSVSProject.Filter): + # We have a sub-filter. Create the name of that sub-filter. + if not parent_filter_name: + filter_name = source.name + else: + filter_name = '%s\\%s' % (parent_filter_name, source.name) + # Add the filter to the group. + filter_group.append( + ['Filter', {'Include': filter_name}, + ['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]]) + # Recurse and add its dependents. + _AppendFiltersForMSBuild(filter_name, source.contents, + rule_dependencies, extension_to_rule_name, + filter_group, source_group) + else: + # It's a source. Create a source entry. + _, element = _MapFileToMsBuildSourceType(source, rule_dependencies, + extension_to_rule_name) + source_entry = [element, {'Include': source}] + # Specify the filter it is part of, if any. + if parent_filter_name: + source_entry.append(['Filter', parent_filter_name]) + source_group.append(source_entry) + + +def _MapFileToMsBuildSourceType(source, rule_dependencies, + extension_to_rule_name): + """Returns the group and element type of the source file. + + Arguments: + source: The source file name. + extension_to_rule_name: A dictionary mapping file extensions to rules. + + Returns: + A pair of (group this file should be part of, the label of element) + """ + _, ext = os.path.splitext(source) + if ext in extension_to_rule_name: + group = 'rule' + element = extension_to_rule_name[ext] + elif ext in ['.cc', '.cpp', '.c', '.cxx']: + group = 'compile' + element = 'ClCompile' + elif ext in ['.h', '.hxx']: + group = 'include' + element = 'ClInclude' + elif ext == '.rc': + group = 'resource' + element = 'ResourceCompile' + elif ext == '.asm': + group = 'masm' + element = 'MASM' + elif ext == '.idl': + group = 'midl' + element = 'Midl' + elif source in rule_dependencies: + group = 'rule_dependency' + element = 'CustomBuild' + else: + group = 'none' + element = 'None' + return (group, element) + + +def _GenerateRulesForMSBuild(output_dir, options, spec, + sources, excluded_sources, + props_files_of_rules, targets_files_of_rules, + actions_to_add, rule_dependencies, + extension_to_rule_name): + # MSBuild rules are implemented using three files: an XML file, a .targets + # file and a .props file. + # See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx + # for more details. + rules = spec.get('rules', []) + rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))] + rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))] + + msbuild_rules = [] + for rule in rules_native: + # Skip a rule with no action and no inputs. + if 'action' not in rule and not rule.get('rule_sources', []): + continue + msbuild_rule = MSBuildRule(rule, spec) + msbuild_rules.append(msbuild_rule) + rule_dependencies.update(msbuild_rule.additional_dependencies.split(';')) + extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name + if msbuild_rules: + base = spec['target_name'] + options.suffix + props_name = base + '.props' + targets_name = base + '.targets' + xml_name = base + '.xml' + + props_files_of_rules.add(props_name) + targets_files_of_rules.add(targets_name) + + props_path = os.path.join(output_dir, props_name) + targets_path = os.path.join(output_dir, targets_name) + xml_path = os.path.join(output_dir, xml_name) + + _GenerateMSBuildRulePropsFile(props_path, msbuild_rules) + _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules) + _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules) + + if rules_external: + _GenerateExternalRules(rules_external, output_dir, spec, + sources, options, actions_to_add) + _AdjustSourcesForRules(rules, sources, excluded_sources, True) + + +class MSBuildRule(object): + """Used to store information used to generate an MSBuild rule. + + Attributes: + rule_name: The rule name, sanitized to use in XML. + target_name: The name of the target. + after_targets: The name of the AfterTargets element. + before_targets: The name of the BeforeTargets element. + depends_on: The name of the DependsOn element. + compute_output: The name of the ComputeOutput element. + dirs_to_make: The name of the DirsToMake element. + inputs: The name of the _inputs element. + tlog: The name of the _tlog element. + extension: The extension this rule applies to. + description: The message displayed when this rule is invoked. + additional_dependencies: A string listing additional dependencies. + outputs: The outputs of this rule. + command: The command used to run the rule. + """ + + def __init__(self, rule, spec): + self.display_name = rule['rule_name'] + # Assure that the rule name is only characters and numbers + self.rule_name = re.sub(r'\W', '_', self.display_name) + # Create the various element names, following the example set by the + # Visual Studio 2008 to 2010 conversion. I don't know if VS2010 + # is sensitive to the exact names. + self.target_name = '_' + self.rule_name + self.after_targets = self.rule_name + 'AfterTargets' + self.before_targets = self.rule_name + 'BeforeTargets' + self.depends_on = self.rule_name + 'DependsOn' + self.compute_output = 'Compute%sOutput' % self.rule_name + self.dirs_to_make = self.rule_name + 'DirsToMake' + self.inputs = self.rule_name + '_inputs' + self.tlog = self.rule_name + '_tlog' + self.extension = rule['extension'] + if not self.extension.startswith('.'): + self.extension = '.' + self.extension + + self.description = MSVSSettings.ConvertVCMacrosToMSBuild( + rule.get('message', self.rule_name)) + old_additional_dependencies = _FixPaths(rule.get('inputs', [])) + self.additional_dependencies = ( + ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i) + for i in old_additional_dependencies])) + old_outputs = _FixPaths(rule.get('outputs', [])) + self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i) + for i in old_outputs]) + old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True, + do_setup_env=True) + self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command) + + +def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules): + """Generate the .props file.""" + content = ['Project', + {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}] + for rule in msbuild_rules: + content.extend([ + ['PropertyGroup', + {'Condition': "'$(%s)' == '' and '$(%s)' == '' and " + "'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets, + rule.after_targets) + }, + [rule.before_targets, 'Midl'], + [rule.after_targets, 'CustomBuild'], + ], + ['PropertyGroup', + [rule.depends_on, + {'Condition': "'$(ConfigurationType)' != 'Makefile'"}, + '_SelectedFiles;$(%s)' % rule.depends_on + ], + ], + ['ItemDefinitionGroup', + [rule.rule_name, + ['CommandLineTemplate', rule.command], + ['Outputs', rule.outputs], + ['ExecutionDescription', rule.description], + ['AdditionalDependencies', rule.additional_dependencies], + ], + ] + ]) + easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True) + + +def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules): + """Generate the .targets file.""" + content = ['Project', + {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003' + } + ] + item_group = [ + 'ItemGroup', + ['PropertyPageSchema', + {'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'} + ] + ] + for rule in msbuild_rules: + item_group.append( + ['AvailableItemName', + {'Include': rule.rule_name}, + ['Targets', rule.target_name], + ]) + content.append(item_group) + + for rule in msbuild_rules: + content.append( + ['UsingTask', + {'TaskName': rule.rule_name, + 'TaskFactory': 'XamlTaskFactory', + 'AssemblyName': 'Microsoft.Build.Tasks.v4.0' + }, + ['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'], + ]) + for rule in msbuild_rules: + rule_name = rule.rule_name + target_outputs = '%%(%s.Outputs)' % rule_name + target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);' + '$(MSBuildProjectFile)') % (rule_name, rule_name) + rule_inputs = '%%(%s.Identity)' % rule_name + extension_condition = ("'%(Extension)'=='.obj' or " + "'%(Extension)'=='.res' or " + "'%(Extension)'=='.rsc' or " + "'%(Extension)'=='.lib'") + remove_section = [ + 'ItemGroup', + {'Condition': "'@(SelectedFiles)' != ''"}, + [rule_name, + {'Remove': '@(%s)' % rule_name, + 'Condition': "'%(Identity)' != '@(SelectedFiles)'" + } + ] + ] + inputs_section = [ + 'ItemGroup', + [rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}] + ] + logging_section = [ + 'ItemGroup', + [rule.tlog, + {'Include': '%%(%s.Outputs)' % rule_name, + 'Condition': ("'%%(%s.Outputs)' != '' and " + "'%%(%s.ExcludedFromBuild)' != 'true'" % + (rule_name, rule_name)) + }, + ['Source', "@(%s, '|')" % rule_name], + ['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs], + ], + ] + message_section = [ + 'Message', + {'Importance': 'High', + 'Text': '%%(%s.ExecutionDescription)' % rule_name + } + ] + write_tlog_section = [ + 'WriteLinesToFile', + {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != " + "'true'" % (rule.tlog, rule.tlog), + 'File': '$(IntDir)$(ProjectName).write.1.tlog', + 'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog, + rule.tlog) + } + ] + read_tlog_section = [ + 'WriteLinesToFile', + {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != " + "'true'" % (rule.tlog, rule.tlog), + 'File': '$(IntDir)$(ProjectName).read.1.tlog', + 'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog) + } + ] + command_and_input_section = [ + rule_name, + {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != " + "'true'" % (rule_name, rule_name), + 'EchoOff': 'true', + 'StandardOutputImportance': 'High', + 'StandardErrorImportance': 'High', + 'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name, + 'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name, + 'Inputs': rule_inputs + } + ] + content.extend([ + ['Target', + {'Name': rule.target_name, + 'BeforeTargets': '$(%s)' % rule.before_targets, + 'AfterTargets': '$(%s)' % rule.after_targets, + 'Condition': "'@(%s)' != ''" % rule_name, + 'DependsOnTargets': '$(%s);%s' % (rule.depends_on, + rule.compute_output), + 'Outputs': target_outputs, + 'Inputs': target_inputs + }, + remove_section, + inputs_section, + logging_section, + message_section, + write_tlog_section, + read_tlog_section, + command_and_input_section, + ], + ['PropertyGroup', + ['ComputeLinkInputsTargets', + '$(ComputeLinkInputsTargets);', + '%s;' % rule.compute_output + ], + ['ComputeLibInputsTargets', + '$(ComputeLibInputsTargets);', + '%s;' % rule.compute_output + ], + ], + ['Target', + {'Name': rule.compute_output, + 'Condition': "'@(%s)' != ''" % rule_name + }, + ['ItemGroup', + [rule.dirs_to_make, + {'Condition': "'@(%s)' != '' and " + "'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name), + 'Include': '%%(%s.Outputs)' % rule_name + } + ], + ['Link', + {'Include': '%%(%s.Identity)' % rule.dirs_to_make, + 'Condition': extension_condition + } + ], + ['Lib', + {'Include': '%%(%s.Identity)' % rule.dirs_to_make, + 'Condition': extension_condition + } + ], + ['ImpLib', + {'Include': '%%(%s.Identity)' % rule.dirs_to_make, + 'Condition': extension_condition + } + ], + ], + ['MakeDir', + {'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" % + rule.dirs_to_make) + } + ] + ], + ]) + easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True) + + +def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules): + # Generate the .xml file + content = [ + 'ProjectSchemaDefinitions', + {'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;' + 'assembly=Microsoft.Build.Framework'), + 'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml', + 'xmlns:sys': 'clr-namespace:System;assembly=mscorlib', + 'xmlns:transformCallback': + 'Microsoft.Cpp.Dev10.ConvertPropertyCallback' + } + ] + for rule in msbuild_rules: + content.extend([ + ['Rule', + {'Name': rule.rule_name, + 'PageTemplate': 'tool', + 'DisplayName': rule.display_name, + 'Order': '200' + }, + ['Rule.DataSource', + ['DataSource', + {'Persistence': 'ProjectFile', + 'ItemType': rule.rule_name + } + ] + ], + ['Rule.Categories', + ['Category', + {'Name': 'General'}, + ['Category.DisplayName', + ['sys:String', 'General'], + ], + ], + ['Category', + {'Name': 'Command Line', + 'Subtype': 'CommandLine' + }, + ['Category.DisplayName', + ['sys:String', 'Command Line'], + ], + ], + ], + ['StringListProperty', + {'Name': 'Inputs', + 'Category': 'Command Line', + 'IsRequired': 'true', + 'Switch': ' ' + }, + ['StringListProperty.DataSource', + ['DataSource', + {'Persistence': 'ProjectFile', + 'ItemType': rule.rule_name, + 'SourceType': 'Item' + } + ] + ], + ], + ['StringProperty', + {'Name': 'CommandLineTemplate', + 'DisplayName': 'Command Line', + 'Visible': 'False', + 'IncludeInCommandLine': 'False' + } + ], + ['DynamicEnumProperty', + {'Name': rule.before_targets, + 'Category': 'General', + 'EnumProvider': 'Targets', + 'IncludeInCommandLine': 'False' + }, + ['DynamicEnumProperty.DisplayName', + ['sys:String', 'Execute Before'], + ], + ['DynamicEnumProperty.Description', + ['sys:String', 'Specifies the targets for the build customization' + ' to run before.' + ], + ], + ['DynamicEnumProperty.ProviderSettings', + ['NameValuePair', + {'Name': 'Exclude', + 'Value': '^%s|^Compute' % rule.before_targets + } + ] + ], + ['DynamicEnumProperty.DataSource', + ['DataSource', + {'Persistence': 'ProjectFile', + 'HasConfigurationCondition': 'true' + } + ] + ], + ], + ['DynamicEnumProperty', + {'Name': rule.after_targets, + 'Category': 'General', + 'EnumProvider': 'Targets', + 'IncludeInCommandLine': 'False' + }, + ['DynamicEnumProperty.DisplayName', + ['sys:String', 'Execute After'], + ], + ['DynamicEnumProperty.Description', + ['sys:String', ('Specifies the targets for the build customization' + ' to run after.') + ], + ], + ['DynamicEnumProperty.ProviderSettings', + ['NameValuePair', + {'Name': 'Exclude', + 'Value': '^%s|^Compute' % rule.after_targets + } + ] + ], + ['DynamicEnumProperty.DataSource', + ['DataSource', + {'Persistence': 'ProjectFile', + 'ItemType': '', + 'HasConfigurationCondition': 'true' + } + ] + ], + ], + ['StringListProperty', + {'Name': 'Outputs', + 'DisplayName': 'Outputs', + 'Visible': 'False', + 'IncludeInCommandLine': 'False' + } + ], + ['StringProperty', + {'Name': 'ExecutionDescription', + 'DisplayName': 'Execution Description', + 'Visible': 'False', + 'IncludeInCommandLine': 'False' + } + ], + ['StringListProperty', + {'Name': 'AdditionalDependencies', + 'DisplayName': 'Additional Dependencies', + 'IncludeInCommandLine': 'False', + 'Visible': 'false' + } + ], + ['StringProperty', + {'Subtype': 'AdditionalOptions', + 'Name': 'AdditionalOptions', + 'Category': 'Command Line' + }, + ['StringProperty.DisplayName', + ['sys:String', 'Additional Options'], + ], + ['StringProperty.Description', + ['sys:String', 'Additional Options'], + ], + ], + ], + ['ItemType', + {'Name': rule.rule_name, + 'DisplayName': rule.display_name + } + ], + ['FileExtension', + {'Name': '*' + rule.extension, + 'ContentType': rule.rule_name + } + ], + ['ContentType', + {'Name': rule.rule_name, + 'DisplayName': '', + 'ItemType': rule.rule_name + } + ] + ]) + easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True) + + +def _GetConfigurationAndPlatform(name, settings): + configuration = name.rsplit('_', 1)[0] + platform = settings.get('msvs_configuration_platform', 'Win32') + return (configuration, platform) + + +def _GetConfigurationCondition(name, settings): + return (r"'$(Configuration)|$(Platform)'=='%s|%s'" % + _GetConfigurationAndPlatform(name, settings)) + + +def _GetMSBuildProjectConfigurations(configurations): + group = ['ItemGroup', {'Label': 'ProjectConfigurations'}] + for (name, settings) in sorted(configurations.iteritems()): + configuration, platform = _GetConfigurationAndPlatform(name, settings) + designation = '%s|%s' % (configuration, platform) + group.append( + ['ProjectConfiguration', {'Include': designation}, + ['Configuration', configuration], + ['Platform', platform]]) + return [group] + + +def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name): + namespace = os.path.splitext(gyp_file_name)[0] + properties = [ + ['PropertyGroup', {'Label': 'Globals'}, + ['ProjectGuid', guid], + ['Keyword', 'Win32Proj'], + ['RootNamespace', namespace], + ['IgnoreWarnCompileDuplicatedFilename', 'true'], + ] + ] + + if os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or \ + os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64': + properties[0].append(['PreferredToolArchitecture', 'x64']) + + if spec.get('msvs_target_platform_version'): + target_platform_version = spec.get('msvs_target_platform_version') + properties[0].append(['WindowsTargetPlatformVersion', + target_platform_version]) + if spec.get('msvs_target_platform_minversion'): + target_platform_minversion = spec.get('msvs_target_platform_minversion') + properties[0].append(['WindowsTargetPlatformMinVersion', + target_platform_minversion]) + else: + properties[0].append(['WindowsTargetPlatformMinVersion', + target_platform_version]) + + if spec.get('msvs_enable_winrt'): + properties[0].append(['DefaultLanguage', 'en-US']) + properties[0].append(['AppContainerApplication', 'true']) + if spec.get('msvs_application_type_revision'): + app_type_revision = spec.get('msvs_application_type_revision') + properties[0].append(['ApplicationTypeRevision', app_type_revision]) + else: + properties[0].append(['ApplicationTypeRevision', '8.1']) + if spec.get('msvs_enable_winphone'): + properties[0].append(['ApplicationType', 'Windows Phone']) + else: + properties[0].append(['ApplicationType', 'Windows Store']) + + platform_name = None + msvs_windows_sdk_version = None + for configuration in spec['configurations'].itervalues(): + platform_name = platform_name or _ConfigPlatform(configuration) + msvs_windows_sdk_version = (msvs_windows_sdk_version or + _ConfigWindowsTargetPlatformVersion(configuration)) + if platform_name and msvs_windows_sdk_version: + break + + if platform_name == 'ARM': + properties[0].append(['WindowsSDKDesktopARMSupport', 'true']) + if msvs_windows_sdk_version: + properties[0].append(['WindowsTargetPlatformVersion', + str(msvs_windows_sdk_version)]) + + return properties + + +def _GetMSBuildConfigurationDetails(spec, build_file): + properties = {} + for name, settings in spec['configurations'].iteritems(): + msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file) + condition = _GetConfigurationCondition(name, settings) + character_set = msbuild_attributes.get('CharacterSet') + config_type = msbuild_attributes.get('ConfigurationType') + _AddConditionalProperty(properties, condition, 'ConfigurationType', + config_type) + if config_type == 'Driver': + _AddConditionalProperty(properties, condition, 'DriverType', 'WDM') + _AddConditionalProperty(properties, condition, 'TargetVersion', + _ConfigTargetVersion(settings)) + if character_set: + if 'msvs_enable_winrt' not in spec : + _AddConditionalProperty(properties, condition, 'CharacterSet', + character_set) + return _GetMSBuildPropertyGroup(spec, 'Configuration', properties) + + +def _GetMSBuildLocalProperties(msbuild_toolset): + # Currently the only local property we support is PlatformToolset + properties = {} + if msbuild_toolset: + properties = [ + ['PropertyGroup', {'Label': 'Locals'}, + ['PlatformToolset', msbuild_toolset], + ] + ] + return properties + + +def _GetMSBuildPropertySheets(configurations): + user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props' + additional_props = {} + props_specified = False + for name, settings in sorted(configurations.iteritems()): + configuration = _GetConfigurationCondition(name, settings) + if settings.has_key('msbuild_props'): + additional_props[configuration] = _FixPaths(settings['msbuild_props']) + props_specified = True + else: + additional_props[configuration] = '' + + if not props_specified: + return [ + ['ImportGroup', + {'Label': 'PropertySheets'}, + ['Import', + {'Project': user_props, + 'Condition': "exists('%s')" % user_props, + 'Label': 'LocalAppDataPlatform' + } + ] + ] + ] + else: + sheets = [] + for condition, props in additional_props.iteritems(): + import_group = [ + 'ImportGroup', + {'Label': 'PropertySheets', + 'Condition': condition + }, + ['Import', + {'Project': user_props, + 'Condition': "exists('%s')" % user_props, + 'Label': 'LocalAppDataPlatform' + } + ] + ] + for props_file in props: + import_group.append(['Import', {'Project':props_file}]) + sheets.append(import_group) + return sheets + +def _ConvertMSVSBuildAttributes(spec, config, build_file): + config_type = _GetMSVSConfigurationType(spec, build_file) + msvs_attributes = _GetMSVSAttributes(spec, config, config_type) + msbuild_attributes = {} + for a in msvs_attributes: + if a in ['IntermediateDirectory', 'OutputDirectory']: + directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a]) + if not directory.endswith('\\'): + directory += '\\' + msbuild_attributes[a] = directory + elif a == 'CharacterSet': + msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a]) + elif a == 'ConfigurationType': + msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a]) + else: + print 'Warning: Do not know how to convert MSVS attribute ' + a + return msbuild_attributes + + +def _ConvertMSVSCharacterSet(char_set): + if char_set.isdigit(): + char_set = { + '0': 'MultiByte', + '1': 'Unicode', + '2': 'MultiByte', + }[char_set] + return char_set + + +def _ConvertMSVSConfigurationType(config_type): + if config_type.isdigit(): + config_type = { + '1': 'Application', + '2': 'DynamicLibrary', + '4': 'StaticLibrary', + '5': 'Driver', + '10': 'Utility' + }[config_type] + return config_type + + +def _GetMSBuildAttributes(spec, config, build_file): + if 'msbuild_configuration_attributes' not in config: + msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file) + + else: + config_type = _GetMSVSConfigurationType(spec, build_file) + config_type = _ConvertMSVSConfigurationType(config_type) + msbuild_attributes = config.get('msbuild_configuration_attributes', {}) + msbuild_attributes.setdefault('ConfigurationType', config_type) + output_dir = msbuild_attributes.get('OutputDirectory', + '$(SolutionDir)$(Configuration)') + msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\' + if 'IntermediateDirectory' not in msbuild_attributes: + intermediate = _FixPath('$(Configuration)') + '\\' + msbuild_attributes['IntermediateDirectory'] = intermediate + if 'CharacterSet' in msbuild_attributes: + msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet( + msbuild_attributes['CharacterSet']) + if 'TargetName' not in msbuild_attributes: + prefix = spec.get('product_prefix', '') + product_name = spec.get('product_name', '$(ProjectName)') + target_name = prefix + product_name + msbuild_attributes['TargetName'] = target_name + + if spec.get('msvs_external_builder'): + external_out_dir = spec.get('msvs_external_builder_out_dir', '.') + msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\' + + # Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile' + # (depending on the tool used) to avoid MSB8012 warning. + msbuild_tool_map = { + 'executable': 'Link', + 'shared_library': 'Link', + 'loadable_module': 'Link', + 'windows_driver': 'Link', + 'static_library': 'Lib', + } + msbuild_tool = msbuild_tool_map.get(spec['type']) + if msbuild_tool: + msbuild_settings = config['finalized_msbuild_settings'] + out_file = msbuild_settings[msbuild_tool].get('OutputFile') + if out_file: + msbuild_attributes['TargetPath'] = _FixPath(out_file) + target_ext = msbuild_settings[msbuild_tool].get('TargetExt') + if target_ext: + msbuild_attributes['TargetExt'] = target_ext + + return msbuild_attributes + + +def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file): + # TODO(jeanluc) We could optimize out the following and do it only if + # there are actions. + # TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'. + new_paths = [] + cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0] + if cygwin_dirs: + cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs) + new_paths.append(cyg_path) + # TODO(jeanluc) Change the convention to have both a cygwin_dir and a + # python_dir. + python_path = cyg_path.replace('cygwin\\bin', 'python_26') + new_paths.append(python_path) + if new_paths: + new_paths = '$(ExecutablePath);' + ';'.join(new_paths) + + properties = {} + for (name, configuration) in sorted(configurations.iteritems()): + condition = _GetConfigurationCondition(name, configuration) + attributes = _GetMSBuildAttributes(spec, configuration, build_file) + msbuild_settings = configuration['finalized_msbuild_settings'] + _AddConditionalProperty(properties, condition, 'IntDir', + attributes['IntermediateDirectory']) + _AddConditionalProperty(properties, condition, 'OutDir', + attributes['OutputDirectory']) + _AddConditionalProperty(properties, condition, 'TargetName', + attributes['TargetName']) + + if attributes.get('TargetPath'): + _AddConditionalProperty(properties, condition, 'TargetPath', + attributes['TargetPath']) + if attributes.get('TargetExt'): + _AddConditionalProperty(properties, condition, 'TargetExt', + attributes['TargetExt']) + + if new_paths: + _AddConditionalProperty(properties, condition, 'ExecutablePath', + new_paths) + tool_settings = msbuild_settings.get('', {}) + for name, value in sorted(tool_settings.iteritems()): + formatted_value = _GetValueFormattedForMSBuild('', name, value) + _AddConditionalProperty(properties, condition, name, formatted_value) + return _GetMSBuildPropertyGroup(spec, None, properties) + + +def _AddConditionalProperty(properties, condition, name, value): + """Adds a property / conditional value pair to a dictionary. + + Arguments: + properties: The dictionary to be modified. The key is the name of the + property. The value is itself a dictionary; its key is the value and + the value a list of condition for which this value is true. + condition: The condition under which the named property has the value. + name: The name of the property. + value: The value of the property. + """ + if name not in properties: + properties[name] = {} + values = properties[name] + if value not in values: + values[value] = [] + conditions = values[value] + conditions.append(condition) + + +# Regex for msvs variable references ( i.e. $(FOO) ). +MSVS_VARIABLE_REFERENCE = re.compile(r'\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)') + + +def _GetMSBuildPropertyGroup(spec, label, properties): + """Returns a PropertyGroup definition for the specified properties. + + Arguments: + spec: The target project dict. + label: An optional label for the PropertyGroup. + properties: The dictionary to be converted. The key is the name of the + property. The value is itself a dictionary; its key is the value and + the value a list of condition for which this value is true. + """ + group = ['PropertyGroup'] + if label: + group.append({'Label': label}) + num_configurations = len(spec['configurations']) + def GetEdges(node): + # Use a definition of edges such that user_of_variable -> used_varible. + # This happens to be easier in this case, since a variable's + # definition contains all variables it references in a single string. + edges = set() + for value in sorted(properties[node].keys()): + # Add to edges all $(...) references to variables. + # + # Variable references that refer to names not in properties are excluded + # These can exist for instance to refer built in definitions like + # $(SolutionDir). + # + # Self references are ignored. Self reference is used in a few places to + # append to the default value. I.e. PATH=$(PATH);other_path + edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value) + if v in properties and v != node])) + return edges + properties_ordered = gyp.common.TopologicallySorted( + properties.keys(), GetEdges) + # Walk properties in the reverse of a topological sort on + # user_of_variable -> used_variable as this ensures variables are + # defined before they are used. + # NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG)) + for name in reversed(properties_ordered): + values = properties[name] + for value, conditions in sorted(values.iteritems()): + if len(conditions) == num_configurations: + # If the value is the same all configurations, + # just add one unconditional entry. + group.append([name, value]) + else: + for condition in conditions: + group.append([name, {'Condition': condition}, value]) + return [group] + + +def _GetMSBuildToolSettingsSections(spec, configurations): + groups = [] + for (name, configuration) in sorted(configurations.iteritems()): + msbuild_settings = configuration['finalized_msbuild_settings'] + group = ['ItemDefinitionGroup', + {'Condition': _GetConfigurationCondition(name, configuration)} + ] + for tool_name, tool_settings in sorted(msbuild_settings.iteritems()): + # Skip the tool named '' which is a holder of global settings handled + # by _GetMSBuildConfigurationGlobalProperties. + if tool_name: + if tool_settings: + tool = [tool_name] + for name, value in sorted(tool_settings.iteritems()): + formatted_value = _GetValueFormattedForMSBuild(tool_name, name, + value) + tool.append([name, formatted_value]) + group.append(tool) + groups.append(group) + return groups + + +def _FinalizeMSBuildSettings(spec, configuration): + if 'msbuild_settings' in configuration: + converted = False + msbuild_settings = configuration['msbuild_settings'] + MSVSSettings.ValidateMSBuildSettings(msbuild_settings) + else: + converted = True + msvs_settings = configuration.get('msvs_settings', {}) + msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings) + include_dirs, midl_include_dirs, resource_include_dirs = \ + _GetIncludeDirs(configuration) + libraries = _GetLibraries(spec) + library_dirs = _GetLibraryDirs(configuration) + out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True) + target_ext = _GetOutputTargetExt(spec) + defines = _GetDefines(configuration) + if converted: + # Visual Studio 2010 has TR1 + defines = [d for d in defines if d != '_HAS_TR1=0'] + # Warn of ignored settings + ignored_settings = ['msvs_tool_files'] + for ignored_setting in ignored_settings: + value = configuration.get(ignored_setting) + if value: + print ('Warning: The automatic conversion to MSBuild does not handle ' + '%s. Ignoring setting of %s' % (ignored_setting, str(value))) + + defines = [_EscapeCppDefineForMSBuild(d) for d in defines] + disabled_warnings = _GetDisabledWarnings(configuration) + prebuild = configuration.get('msvs_prebuild') + postbuild = configuration.get('msvs_postbuild') + def_file = _GetModuleDefinition(spec) + precompiled_header = configuration.get('msvs_precompiled_header') + + # Add the information to the appropriate tool + # TODO(jeanluc) We could optimize and generate these settings only if + # the corresponding files are found, e.g. don't generate ResourceCompile + # if you don't have any resources. + _ToolAppend(msbuild_settings, 'ClCompile', + 'AdditionalIncludeDirectories', include_dirs) + _ToolAppend(msbuild_settings, 'Midl', + 'AdditionalIncludeDirectories', midl_include_dirs) + _ToolAppend(msbuild_settings, 'ResourceCompile', + 'AdditionalIncludeDirectories', resource_include_dirs) + # Add in libraries, note that even for empty libraries, we want this + # set, to prevent inheriting default libraries from the enviroment. + _ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies', + libraries) + _ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories', + library_dirs) + if out_file: + _ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file, + only_if_unset=True) + if target_ext: + _ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext, + only_if_unset=True) + # Add defines. + _ToolAppend(msbuild_settings, 'ClCompile', + 'PreprocessorDefinitions', defines) + _ToolAppend(msbuild_settings, 'ResourceCompile', + 'PreprocessorDefinitions', defines) + # Add disabled warnings. + _ToolAppend(msbuild_settings, 'ClCompile', + 'DisableSpecificWarnings', disabled_warnings) + # Turn on precompiled headers if appropriate. + if precompiled_header: + precompiled_header = os.path.split(precompiled_header)[1] + _ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use') + _ToolAppend(msbuild_settings, 'ClCompile', + 'PrecompiledHeaderFile', precompiled_header) + _ToolAppend(msbuild_settings, 'ClCompile', + 'ForcedIncludeFiles', [precompiled_header]) + else: + _ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'NotUsing') + # Turn off WinRT compilation + _ToolAppend(msbuild_settings, 'ClCompile', 'CompileAsWinRT', 'false') + # Turn on import libraries if appropriate + if spec.get('msvs_requires_importlibrary'): + _ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'false') + # Loadable modules don't generate import libraries; + # tell dependent projects to not expect one. + if spec['type'] == 'loadable_module': + _ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true') + # Set the module definition file if any. + if def_file: + _ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file) + configuration['finalized_msbuild_settings'] = msbuild_settings + if prebuild: + _ToolAppend(msbuild_settings, 'PreBuildEvent', 'Command', prebuild) + if postbuild: + _ToolAppend(msbuild_settings, 'PostBuildEvent', 'Command', postbuild) + + +def _GetValueFormattedForMSBuild(tool_name, name, value): + if type(value) == list: + # For some settings, VS2010 does not automatically extends the settings + # TODO(jeanluc) Is this what we want? + if name in ['AdditionalIncludeDirectories', + 'AdditionalLibraryDirectories', + 'AdditionalOptions', + 'DelayLoadDLLs', + 'DisableSpecificWarnings', + 'PreprocessorDefinitions']: + value.append('%%(%s)' % name) + # For most tools, entries in a list should be separated with ';' but some + # settings use a space. Check for those first. + exceptions = { + 'ClCompile': ['AdditionalOptions'], + 'Link': ['AdditionalOptions'], + 'Lib': ['AdditionalOptions']} + if tool_name in exceptions and name in exceptions[tool_name]: + char = ' ' + else: + char = ';' + formatted_value = char.join( + [MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value]) + else: + formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value) + return formatted_value + + +def _VerifySourcesExist(sources, root_dir): + """Verifies that all source files exist on disk. + + Checks that all regular source files, i.e. not created at run time, + exist on disk. Missing files cause needless recompilation but no otherwise + visible errors. + + Arguments: + sources: A recursive list of Filter/file names. + root_dir: The root directory for the relative path names. + Returns: + A list of source files that cannot be found on disk. + """ + missing_sources = [] + for source in sources: + if isinstance(source, MSVSProject.Filter): + missing_sources.extend(_VerifySourcesExist(source.contents, root_dir)) + else: + if '$' not in source: + full_path = os.path.join(root_dir, source) + if not os.path.exists(full_path): + missing_sources.append(full_path) + return missing_sources + + +def _GetMSBuildSources(spec, sources, exclusions, rule_dependencies, + extension_to_rule_name, actions_spec, + sources_handled_by_action, list_excluded): + groups = ['none', 'masm', 'midl', 'include', 'compile', 'resource', 'rule', + 'rule_dependency'] + grouped_sources = {} + for g in groups: + grouped_sources[g] = [] + + _AddSources2(spec, sources, exclusions, grouped_sources, + rule_dependencies, extension_to_rule_name, + sources_handled_by_action, list_excluded) + sources = [] + for g in groups: + if grouped_sources[g]: + sources.append(['ItemGroup'] + grouped_sources[g]) + if actions_spec: + sources.append(['ItemGroup'] + actions_spec) + return sources + + +def _AddSources2(spec, sources, exclusions, grouped_sources, + rule_dependencies, extension_to_rule_name, + sources_handled_by_action, + list_excluded): + extensions_excluded_from_precompile = [] + for source in sources: + if isinstance(source, MSVSProject.Filter): + _AddSources2(spec, source.contents, exclusions, grouped_sources, + rule_dependencies, extension_to_rule_name, + sources_handled_by_action, + list_excluded) + else: + if not source in sources_handled_by_action: + detail = [] + excluded_configurations = exclusions.get(source, []) + if len(excluded_configurations) == len(spec['configurations']): + detail.append(['ExcludedFromBuild', 'true']) + else: + for config_name, configuration in sorted(excluded_configurations): + condition = _GetConfigurationCondition(config_name, configuration) + detail.append(['ExcludedFromBuild', + {'Condition': condition}, + 'true']) + # Add precompile if needed + for config_name, configuration in spec['configurations'].iteritems(): + precompiled_source = configuration.get('msvs_precompiled_source', '') + if precompiled_source != '': + precompiled_source = _FixPath(precompiled_source) + if not extensions_excluded_from_precompile: + # If the precompiled header is generated by a C source, we must + # not try to use it for C++ sources, and vice versa. + basename, extension = os.path.splitext(precompiled_source) + if extension == '.c': + extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx'] + else: + extensions_excluded_from_precompile = ['.c'] + + if precompiled_source == source: + condition = _GetConfigurationCondition(config_name, configuration) + detail.append(['PrecompiledHeader', + {'Condition': condition}, + 'Create' + ]) + else: + # Turn off precompiled header usage for source files of a + # different type than the file that generated the + # precompiled header. + for extension in extensions_excluded_from_precompile: + if source.endswith(extension): + detail.append(['PrecompiledHeader', '']) + detail.append(['ForcedIncludeFiles', '']) + + group, element = _MapFileToMsBuildSourceType(source, rule_dependencies, + extension_to_rule_name) + grouped_sources[group].append([element, {'Include': source}] + detail) + + +def _GetMSBuildProjectReferences(project): + references = [] + if project.dependencies: + group = ['ItemGroup'] + for dependency in project.dependencies: + guid = dependency.guid + project_dir = os.path.split(project.path)[0] + relative_path = gyp.common.RelativePath(dependency.path, project_dir) + project_ref = ['ProjectReference', + {'Include': relative_path}, + ['Project', guid], + ['ReferenceOutputAssembly', 'false'] + ] + for config in dependency.spec.get('configurations', {}).itervalues(): + # If it's disabled in any config, turn it off in the reference. + if config.get('msvs_2010_disable_uldi_when_referenced', 0): + project_ref.append(['UseLibraryDependencyInputs', 'false']) + break + group.append(project_ref) + references.append(group) + return references + + +def _GenerateMSBuildProject(project, options, version, generator_flags): + spec = project.spec + configurations = spec['configurations'] + project_dir, project_file_name = os.path.split(project.path) + gyp.common.EnsureDirExists(project.path) + # Prepare list of sources and excluded sources. + gyp_path = _NormalizedSource(project.build_file) + relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir) + + gyp_file = os.path.split(project.build_file)[1] + sources, excluded_sources = _PrepareListOfSources(spec, generator_flags, + gyp_file) + # Add rules. + actions_to_add = {} + props_files_of_rules = set() + targets_files_of_rules = set() + rule_dependencies = set() + extension_to_rule_name = {} + list_excluded = generator_flags.get('msvs_list_excluded_files', True) + + # Don't generate rules if we are using an external builder like ninja. + if not spec.get('msvs_external_builder'): + _GenerateRulesForMSBuild(project_dir, options, spec, + sources, excluded_sources, + props_files_of_rules, targets_files_of_rules, + actions_to_add, rule_dependencies, + extension_to_rule_name) + else: + rules = spec.get('rules', []) + _AdjustSourcesForRules(rules, sources, excluded_sources, True) + + sources, excluded_sources, excluded_idl = ( + _AdjustSourcesAndConvertToFilterHierarchy(spec, options, + project_dir, sources, + excluded_sources, + list_excluded, version)) + + # Don't add actions if we are using an external builder like ninja. + if not spec.get('msvs_external_builder'): + _AddActions(actions_to_add, spec, project.build_file) + _AddCopies(actions_to_add, spec) + + # NOTE: this stanza must appear after all actions have been decided. + # Don't excluded sources with actions attached, or they won't run. + excluded_sources = _FilterActionsFromExcluded( + excluded_sources, actions_to_add) + + exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl) + actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild( + spec, actions_to_add) + + _GenerateMSBuildFiltersFile(project.path + '.filters', sources, + rule_dependencies, + extension_to_rule_name) + missing_sources = _VerifySourcesExist(sources, project_dir) + + for configuration in configurations.itervalues(): + _FinalizeMSBuildSettings(spec, configuration) + + # Add attributes to root element + + import_default_section = [ + ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]] + import_cpp_props_section = [ + ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]] + import_cpp_targets_section = [ + ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]] + import_masm_props_section = [ + ['Import', + {'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.props'}]] + import_masm_targets_section = [ + ['Import', + {'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.targets'}]] + macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]] + + content = [ + 'Project', + {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003', + 'ToolsVersion': version.ProjectVersion(), + 'DefaultTargets': 'Build' + }] + + content += _GetMSBuildProjectConfigurations(configurations) + content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name) + content += import_default_section + content += _GetMSBuildConfigurationDetails(spec, project.build_file) + if spec.get('msvs_enable_winphone'): + content += _GetMSBuildLocalProperties('v120_wp81') + else: + content += _GetMSBuildLocalProperties(project.msbuild_toolset) + content += import_cpp_props_section + content += import_masm_props_section + content += _GetMSBuildExtensions(props_files_of_rules) + content += _GetMSBuildPropertySheets(configurations) + content += macro_section + content += _GetMSBuildConfigurationGlobalProperties(spec, configurations, + project.build_file) + content += _GetMSBuildToolSettingsSections(spec, configurations) + content += _GetMSBuildSources( + spec, sources, exclusions, rule_dependencies, extension_to_rule_name, + actions_spec, sources_handled_by_action, list_excluded) + content += _GetMSBuildProjectReferences(project) + content += import_cpp_targets_section + content += import_masm_targets_section + content += _GetMSBuildExtensionTargets(targets_files_of_rules) + + if spec.get('msvs_external_builder'): + content += _GetMSBuildExternalBuilderTargets(spec) + + # TODO(jeanluc) File a bug to get rid of runas. We had in MSVS: + # has_run_as = _WriteMSVSUserFile(project.path, version, spec) + + easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True) + + return missing_sources + + +def _GetMSBuildExternalBuilderTargets(spec): + """Return a list of MSBuild targets for external builders. + + The "Build" and "Clean" targets are always generated. If the spec contains + 'msvs_external_builder_clcompile_cmd', then the "ClCompile" target will also + be generated, to support building selected C/C++ files. + + Arguments: + spec: The gyp target spec. + Returns: + List of MSBuild 'Target' specs. + """ + build_cmd = _BuildCommandLineForRuleRaw( + spec, spec['msvs_external_builder_build_cmd'], + False, False, False, False) + build_target = ['Target', {'Name': 'Build'}] + build_target.append(['Exec', {'Command': build_cmd}]) + + clean_cmd = _BuildCommandLineForRuleRaw( + spec, spec['msvs_external_builder_clean_cmd'], + False, False, False, False) + clean_target = ['Target', {'Name': 'Clean'}] + clean_target.append(['Exec', {'Command': clean_cmd}]) + + targets = [build_target, clean_target] + + if spec.get('msvs_external_builder_clcompile_cmd'): + clcompile_cmd = _BuildCommandLineForRuleRaw( + spec, spec['msvs_external_builder_clcompile_cmd'], + False, False, False, False) + clcompile_target = ['Target', {'Name': 'ClCompile'}] + clcompile_target.append(['Exec', {'Command': clcompile_cmd}]) + targets.append(clcompile_target) + + return targets + + +def _GetMSBuildExtensions(props_files_of_rules): + extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}] + for props_file in props_files_of_rules: + extensions.append(['Import', {'Project': props_file}]) + return [extensions] + + +def _GetMSBuildExtensionTargets(targets_files_of_rules): + targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}] + for targets_file in sorted(targets_files_of_rules): + targets_node.append(['Import', {'Project': targets_file}]) + return [targets_node] + + +def _GenerateActionsForMSBuild(spec, actions_to_add): + """Add actions accumulated into an actions_to_add, merging as needed. + + Arguments: + spec: the target project dict + actions_to_add: dictionary keyed on input name, which maps to a list of + dicts describing the actions attached to that input file. + + Returns: + A pair of (action specification, the sources handled by this action). + """ + sources_handled_by_action = OrderedSet() + actions_spec = [] + for primary_input, actions in actions_to_add.iteritems(): + inputs = OrderedSet() + outputs = OrderedSet() + descriptions = [] + commands = [] + for action in actions: + inputs.update(OrderedSet(action['inputs'])) + outputs.update(OrderedSet(action['outputs'])) + descriptions.append(action['description']) + cmd = action['command'] + # For most actions, add 'call' so that actions that invoke batch files + # return and continue executing. msbuild_use_call provides a way to + # disable this but I have not seen any adverse effect from doing that + # for everything. + if action.get('msbuild_use_call', True): + cmd = 'call ' + cmd + commands.append(cmd) + # Add the custom build action for one input file. + description = ', and also '.join(descriptions) + + # We can't join the commands simply with && because the command line will + # get too long. See also _AddActions: cygwin's setup_env mustn't be called + # for every invocation or the command that sets the PATH will grow too + # long. + command = '\r\n'.join([c + '\r\nif %errorlevel% neq 0 exit /b %errorlevel%' + for c in commands]) + _AddMSBuildAction(spec, + primary_input, + inputs, + outputs, + command, + description, + sources_handled_by_action, + actions_spec) + return actions_spec, sources_handled_by_action + + +def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description, + sources_handled_by_action, actions_spec): + command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd) + primary_input = _FixPath(primary_input) + inputs_array = _FixPaths(inputs) + outputs_array = _FixPaths(outputs) + additional_inputs = ';'.join([i for i in inputs_array + if i != primary_input]) + outputs = ';'.join(outputs_array) + sources_handled_by_action.add(primary_input) + action_spec = ['CustomBuild', {'Include': primary_input}] + action_spec.extend( + # TODO(jeanluc) 'Document' for all or just if as_sources? + [['FileType', 'Document'], + ['Command', command], + ['Message', description], + ['Outputs', outputs] + ]) + if additional_inputs: + action_spec.append(['AdditionalInputs', additional_inputs]) + actions_spec.append(action_spec) diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/msvs_test.py b/deps/libuv/build/gyp/pylib/gyp/generator/msvs_test.py new file mode 100755 index 00000000..c0b021df --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/msvs_test.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" Unit tests for the msvs.py file. """ + +import gyp.generator.msvs as msvs +import unittest +import StringIO + + +class TestSequenceFunctions(unittest.TestCase): + + def setUp(self): + self.stderr = StringIO.StringIO() + + def test_GetLibraries(self): + self.assertEqual( + msvs._GetLibraries({}), + []) + self.assertEqual( + msvs._GetLibraries({'libraries': []}), + []) + self.assertEqual( + msvs._GetLibraries({'other':'foo', 'libraries': ['a.lib']}), + ['a.lib']) + self.assertEqual( + msvs._GetLibraries({'libraries': ['-la']}), + ['a.lib']) + self.assertEqual( + msvs._GetLibraries({'libraries': ['a.lib', 'b.lib', 'c.lib', '-lb.lib', + '-lb.lib', 'd.lib', 'a.lib']}), + ['c.lib', 'b.lib', 'd.lib', 'a.lib']) + +if __name__ == '__main__': + unittest.main() diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/ninja.py b/deps/libuv/build/gyp/pylib/gyp/generator/ninja.py new file mode 100644 index 00000000..0555a4a9 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/ninja.py @@ -0,0 +1,2483 @@ +# Copyright (c) 2013 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import collections +import copy +import hashlib +import json +import multiprocessing +import os.path +import re +import signal +import subprocess +import sys +import gyp +import gyp.common +from gyp.common import OrderedSet +import gyp.msvs_emulation +import gyp.MSVSUtil as MSVSUtil +import gyp.xcode_emulation +from cStringIO import StringIO + +from gyp.common import GetEnvironFallback +import gyp.ninja_syntax as ninja_syntax + +generator_default_variables = { + 'EXECUTABLE_PREFIX': '', + 'EXECUTABLE_SUFFIX': '', + 'STATIC_LIB_PREFIX': 'lib', + 'STATIC_LIB_SUFFIX': '.a', + 'SHARED_LIB_PREFIX': 'lib', + + # Gyp expects the following variables to be expandable by the build + # system to the appropriate locations. Ninja prefers paths to be + # known at gyp time. To resolve this, introduce special + # variables starting with $! and $| (which begin with a $ so gyp knows it + # should be treated specially, but is otherwise an invalid + # ninja/shell variable) that are passed to gyp here but expanded + # before writing out into the target .ninja files; see + # ExpandSpecial. + # $! is used for variables that represent a path and that can only appear at + # the start of a string, while $| is used for variables that can appear + # anywhere in a string. + 'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR', + 'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen', + 'PRODUCT_DIR': '$!PRODUCT_DIR', + 'CONFIGURATION_NAME': '$|CONFIGURATION_NAME', + + # Special variables that may be used by gyp 'rule' targets. + # We generate definitions for these variables on the fly when processing a + # rule. + 'RULE_INPUT_ROOT': '${root}', + 'RULE_INPUT_DIRNAME': '${dirname}', + 'RULE_INPUT_PATH': '${source}', + 'RULE_INPUT_EXT': '${ext}', + 'RULE_INPUT_NAME': '${name}', +} + +# Placates pylint. +generator_additional_non_configuration_keys = [] +generator_additional_path_sections = [] +generator_extra_sources_for_rules = [] +generator_filelist_paths = None + +generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested() + +def StripPrefix(arg, prefix): + if arg.startswith(prefix): + return arg[len(prefix):] + return arg + + +def QuoteShellArgument(arg, flavor): + """Quote a string such that it will be interpreted as a single argument + by the shell.""" + # Rather than attempting to enumerate the bad shell characters, just + # whitelist common OK ones and quote anything else. + if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg): + return arg # No quoting necessary. + if flavor == 'win': + return gyp.msvs_emulation.QuoteForRspFile(arg) + return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'" + + +def Define(d, flavor): + """Takes a preprocessor define and returns a -D parameter that's ninja- and + shell-escaped.""" + if flavor == 'win': + # cl.exe replaces literal # characters with = in preprocesor definitions for + # some reason. Octal-encode to work around that. + d = d.replace('#', '\\%03o' % ord('#')) + return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor) + + +def AddArch(output, arch): + """Adds an arch string to an output path.""" + output, extension = os.path.splitext(output) + return '%s.%s%s' % (output, arch, extension) + + +class Target(object): + """Target represents the paths used within a single gyp target. + + Conceptually, building a single target A is a series of steps: + + 1) actions/rules/copies generates source/resources/etc. + 2) compiles generates .o files + 3) link generates a binary (library/executable) + 4) bundle merges the above in a mac bundle + + (Any of these steps can be optional.) + + From a build ordering perspective, a dependent target B could just + depend on the last output of this series of steps. + + But some dependent commands sometimes need to reach inside the box. + For example, when linking B it needs to get the path to the static + library generated by A. + + This object stores those paths. To keep things simple, member + variables only store concrete paths to single files, while methods + compute derived values like "the last output of the target". + """ + def __init__(self, type): + # Gyp type ("static_library", etc.) of this target. + self.type = type + # File representing whether any input dependencies necessary for + # dependent actions have completed. + self.preaction_stamp = None + # File representing whether any input dependencies necessary for + # dependent compiles have completed. + self.precompile_stamp = None + # File representing the completion of actions/rules/copies, if any. + self.actions_stamp = None + # Path to the output of the link step, if any. + self.binary = None + # Path to the file representing the completion of building the bundle, + # if any. + self.bundle = None + # On Windows, incremental linking requires linking against all the .objs + # that compose a .lib (rather than the .lib itself). That list is stored + # here. In this case, we also need to save the compile_deps for the target, + # so that the the target that directly depends on the .objs can also depend + # on those. + self.component_objs = None + self.compile_deps = None + # Windows only. The import .lib is the output of a build step, but + # because dependents only link against the lib (not both the lib and the + # dll) we keep track of the import library here. + self.import_lib = None + # Track if this target contains any C++ files, to decide if gcc or g++ + # should be used for linking. + self.uses_cpp = False + + def Linkable(self): + """Return true if this is a target that can be linked against.""" + return self.type in ('static_library', 'shared_library') + + def UsesToc(self, flavor): + """Return true if the target should produce a restat rule based on a TOC + file.""" + # For bundles, the .TOC should be produced for the binary, not for + # FinalOutput(). But the naive approach would put the TOC file into the + # bundle, so don't do this for bundles for now. + if flavor == 'win' or self.bundle: + return False + return self.type in ('shared_library', 'loadable_module') + + def PreActionInput(self, flavor): + """Return the path, if any, that should be used as a dependency of + any dependent action step.""" + if self.UsesToc(flavor): + return self.FinalOutput() + '.TOC' + return self.FinalOutput() or self.preaction_stamp + + def PreCompileInput(self): + """Return the path, if any, that should be used as a dependency of + any dependent compile step.""" + return self.actions_stamp or self.precompile_stamp + + def FinalOutput(self): + """Return the last output of the target, which depends on all prior + steps.""" + return self.bundle or self.binary or self.actions_stamp + + +# A small discourse on paths as used within the Ninja build: +# All files we produce (both at gyp and at build time) appear in the +# build directory (e.g. out/Debug). +# +# Paths within a given .gyp file are always relative to the directory +# containing the .gyp file. Call these "gyp paths". This includes +# sources as well as the starting directory a given gyp rule/action +# expects to be run from. We call the path from the source root to +# the gyp file the "base directory" within the per-.gyp-file +# NinjaWriter code. +# +# All paths as written into the .ninja files are relative to the build +# directory. Call these paths "ninja paths". +# +# We translate between these two notions of paths with two helper +# functions: +# +# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file) +# into the equivalent ninja path. +# +# - GypPathToUniqueOutput translates a gyp path into a ninja path to write +# an output file; the result can be namespaced such that it is unique +# to the input file name as well as the output target name. + +class NinjaWriter(object): + def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir, + output_file, toplevel_build, output_file_name, flavor, + toplevel_dir=None): + """ + base_dir: path from source root to directory containing this gyp file, + by gyp semantics, all input paths are relative to this + build_dir: path from source root to build output + toplevel_dir: path to the toplevel directory + """ + + self.hash_for_rules = hash_for_rules + self.target_outputs = target_outputs + self.base_dir = base_dir + self.build_dir = build_dir + self.ninja = ninja_syntax.Writer(output_file) + self.toplevel_build = toplevel_build + self.output_file_name = output_file_name + + self.flavor = flavor + self.abs_build_dir = None + if toplevel_dir is not None: + self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir, + build_dir)) + self.obj_ext = '.obj' if flavor == 'win' else '.o' + if flavor == 'win': + # See docstring of msvs_emulation.GenerateEnvironmentFiles(). + self.win_env = {} + for arch in ('x86', 'x64'): + self.win_env[arch] = 'environment.' + arch + + # Relative path from build output dir to base dir. + build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir) + self.build_to_base = os.path.join(build_to_top, base_dir) + # Relative path from base dir to build dir. + base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir) + self.base_to_build = os.path.join(base_to_top, build_dir) + + def ExpandSpecial(self, path, product_dir=None): + """Expand specials like $!PRODUCT_DIR in |path|. + + If |product_dir| is None, assumes the cwd is already the product + dir. Otherwise, |product_dir| is the relative path to the product + dir. + """ + + PRODUCT_DIR = '$!PRODUCT_DIR' + if PRODUCT_DIR in path: + if product_dir: + path = path.replace(PRODUCT_DIR, product_dir) + else: + path = path.replace(PRODUCT_DIR + '/', '') + path = path.replace(PRODUCT_DIR + '\\', '') + path = path.replace(PRODUCT_DIR, '.') + + INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR' + if INTERMEDIATE_DIR in path: + int_dir = self.GypPathToUniqueOutput('gen') + # GypPathToUniqueOutput generates a path relative to the product dir, + # so insert product_dir in front if it is provided. + path = path.replace(INTERMEDIATE_DIR, + os.path.join(product_dir or '', int_dir)) + + CONFIGURATION_NAME = '$|CONFIGURATION_NAME' + path = path.replace(CONFIGURATION_NAME, self.config_name) + + return path + + def ExpandRuleVariables(self, path, root, dirname, source, ext, name): + if self.flavor == 'win': + path = self.msvs_settings.ConvertVSMacros( + path, config=self.config_name) + path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root) + path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'], + dirname) + path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source) + path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext) + path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name) + return path + + def GypPathToNinja(self, path, env=None): + """Translate a gyp path to a ninja path, optionally expanding environment + variable references in |path| with |env|. + + See the above discourse on path conversions.""" + if env: + if self.flavor == 'mac': + path = gyp.xcode_emulation.ExpandEnvVars(path, env) + elif self.flavor == 'win': + path = gyp.msvs_emulation.ExpandMacros(path, env) + if path.startswith('$!'): + expanded = self.ExpandSpecial(path) + if self.flavor == 'win': + expanded = os.path.normpath(expanded) + return expanded + if '$|' in path: + path = self.ExpandSpecial(path) + assert '$' not in path, path + return os.path.normpath(os.path.join(self.build_to_base, path)) + + def GypPathToUniqueOutput(self, path, qualified=True): + """Translate a gyp path to a ninja path for writing output. + + If qualified is True, qualify the resulting filename with the name + of the target. This is necessary when e.g. compiling the same + path twice for two separate output targets. + + See the above discourse on path conversions.""" + + path = self.ExpandSpecial(path) + assert not path.startswith('$'), path + + # Translate the path following this scheme: + # Input: foo/bar.gyp, target targ, references baz/out.o + # Output: obj/foo/baz/targ.out.o (if qualified) + # obj/foo/baz/out.o (otherwise) + # (and obj.host instead of obj for cross-compiles) + # + # Why this scheme and not some other one? + # 1) for a given input, you can compute all derived outputs by matching + # its path, even if the input is brought via a gyp file with '..'. + # 2) simple files like libraries and stamps have a simple filename. + + obj = 'obj' + if self.toolset != 'target': + obj += '.' + self.toolset + + path_dir, path_basename = os.path.split(path) + assert not os.path.isabs(path_dir), ( + "'%s' can not be absolute path (see crbug.com/462153)." % path_dir) + + if qualified: + path_basename = self.name + '.' + path_basename + return os.path.normpath(os.path.join(obj, self.base_dir, path_dir, + path_basename)) + + def WriteCollapsedDependencies(self, name, targets, order_only=None): + """Given a list of targets, return a path for a single file + representing the result of building all the targets or None. + + Uses a stamp file if necessary.""" + + assert targets == filter(None, targets), targets + if len(targets) == 0: + assert not order_only + return None + if len(targets) > 1 or order_only: + stamp = self.GypPathToUniqueOutput(name + '.stamp') + targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only) + self.ninja.newline() + return targets[0] + + def _SubninjaNameForArch(self, arch): + output_file_base = os.path.splitext(self.output_file_name)[0] + return '%s.%s.ninja' % (output_file_base, arch) + + def WriteSpec(self, spec, config_name, generator_flags): + """The main entry point for NinjaWriter: write the build rules for a spec. + + Returns a Target object, which represents the output paths for this spec. + Returns None if there are no outputs (e.g. a settings-only 'none' type + target).""" + + self.config_name = config_name + self.name = spec['target_name'] + self.toolset = spec['toolset'] + config = spec['configurations'][config_name] + self.target = Target(spec['type']) + self.is_standalone_static_library = bool( + spec.get('standalone_static_library', 0)) + + self.target_rpath = generator_flags.get('target_rpath', r'\$$ORIGIN/lib/') + + self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec) + self.xcode_settings = self.msvs_settings = None + if self.flavor == 'mac': + self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec) + mac_toolchain_dir = generator_flags.get('mac_toolchain_dir', None) + if mac_toolchain_dir: + self.xcode_settings.mac_toolchain_dir = mac_toolchain_dir + + if self.flavor == 'win': + self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec, + generator_flags) + arch = self.msvs_settings.GetArch(config_name) + self.ninja.variable('arch', self.win_env[arch]) + self.ninja.variable('cc', '$cl_' + arch) + self.ninja.variable('cxx', '$cl_' + arch) + self.ninja.variable('cc_host', '$cl_' + arch) + self.ninja.variable('cxx_host', '$cl_' + arch) + self.ninja.variable('asm', '$ml_' + arch) + + if self.flavor == 'mac': + self.archs = self.xcode_settings.GetActiveArchs(config_name) + if len(self.archs) > 1: + self.arch_subninjas = dict( + (arch, ninja_syntax.Writer( + OpenOutput(os.path.join(self.toplevel_build, + self._SubninjaNameForArch(arch)), + 'w'))) + for arch in self.archs) + + # Compute predepends for all rules. + # actions_depends is the dependencies this target depends on before running + # any of its action/rule/copy steps. + # compile_depends is the dependencies this target depends on before running + # any of its compile steps. + actions_depends = [] + compile_depends = [] + # TODO(evan): it is rather confusing which things are lists and which + # are strings. Fix these. + if 'dependencies' in spec: + for dep in spec['dependencies']: + if dep in self.target_outputs: + target = self.target_outputs[dep] + actions_depends.append(target.PreActionInput(self.flavor)) + compile_depends.append(target.PreCompileInput()) + if target.uses_cpp: + self.target.uses_cpp = True + actions_depends = filter(None, actions_depends) + compile_depends = filter(None, compile_depends) + actions_depends = self.WriteCollapsedDependencies('actions_depends', + actions_depends) + compile_depends = self.WriteCollapsedDependencies('compile_depends', + compile_depends) + self.target.preaction_stamp = actions_depends + self.target.precompile_stamp = compile_depends + + # Write out actions, rules, and copies. These must happen before we + # compile any sources, so compute a list of predependencies for sources + # while we do it. + extra_sources = [] + mac_bundle_depends = [] + self.target.actions_stamp = self.WriteActionsRulesCopies( + spec, extra_sources, actions_depends, mac_bundle_depends) + + # If we have actions/rules/copies, we depend directly on those, but + # otherwise we depend on dependent target's actions/rules/copies etc. + # We never need to explicitly depend on previous target's link steps, + # because no compile ever depends on them. + compile_depends_stamp = (self.target.actions_stamp or compile_depends) + + # Write out the compilation steps, if any. + link_deps = [] + try: + sources = extra_sources + spec.get('sources', []) + except TypeError: + print 'extra_sources: ', str(extra_sources) + print 'spec.get("sources"): ', str(spec.get('sources')) + raise + if sources: + if self.flavor == 'mac' and len(self.archs) > 1: + # Write subninja file containing compile and link commands scoped to + # a single arch if a fat binary is being built. + for arch in self.archs: + self.ninja.subninja(self._SubninjaNameForArch(arch)) + + pch = None + if self.flavor == 'win': + gyp.msvs_emulation.VerifyMissingSources( + sources, self.abs_build_dir, generator_flags, self.GypPathToNinja) + pch = gyp.msvs_emulation.PrecompiledHeader( + self.msvs_settings, config_name, self.GypPathToNinja, + self.GypPathToUniqueOutput, self.obj_ext) + else: + pch = gyp.xcode_emulation.MacPrefixHeader( + self.xcode_settings, self.GypPathToNinja, + lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang)) + link_deps = self.WriteSources( + self.ninja, config_name, config, sources, compile_depends_stamp, pch, + spec) + # Some actions/rules output 'sources' that are already object files. + obj_outputs = [f for f in sources if f.endswith(self.obj_ext)] + if obj_outputs: + if self.flavor != 'mac' or len(self.archs) == 1: + link_deps += [self.GypPathToNinja(o) for o in obj_outputs] + else: + print "Warning: Actions/rules writing object files don't work with " \ + "multiarch targets, dropping. (target %s)" % spec['target_name'] + elif self.flavor == 'mac' and len(self.archs) > 1: + link_deps = collections.defaultdict(list) + + compile_deps = self.target.actions_stamp or actions_depends + if self.flavor == 'win' and self.target.type == 'static_library': + self.target.component_objs = link_deps + self.target.compile_deps = compile_deps + + # Write out a link step, if needed. + output = None + is_empty_bundle = not link_deps and not mac_bundle_depends + if link_deps or self.target.actions_stamp or actions_depends: + output = self.WriteTarget(spec, config_name, config, link_deps, + compile_deps) + if self.is_mac_bundle: + mac_bundle_depends.append(output) + + # Bundle all of the above together, if needed. + if self.is_mac_bundle: + output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle) + + if not output: + return None + + assert self.target.FinalOutput(), output + return self.target + + def _WinIdlRule(self, source, prebuild, outputs): + """Handle the implicit VS .idl rule for one source file. Fills |outputs| + with files that are generated.""" + outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData( + source, self.config_name) + outdir = self.GypPathToNinja(outdir) + def fix_path(path, rel=None): + path = os.path.join(outdir, path) + dirname, basename = os.path.split(source) + root, ext = os.path.splitext(basename) + path = self.ExpandRuleVariables( + path, root, dirname, source, ext, basename) + if rel: + path = os.path.relpath(path, rel) + return path + vars = [(name, fix_path(value, outdir)) for name, value in vars] + output = [fix_path(p) for p in output] + vars.append(('outdir', outdir)) + vars.append(('idlflags', flags)) + input = self.GypPathToNinja(source) + self.ninja.build(output, 'idl', input, + variables=vars, order_only=prebuild) + outputs.extend(output) + + def WriteWinIdlFiles(self, spec, prebuild): + """Writes rules to match MSVS's implicit idl handling.""" + assert self.flavor == 'win' + if self.msvs_settings.HasExplicitIdlRulesOrActions(spec): + return [] + outputs = [] + for source in filter(lambda x: x.endswith('.idl'), spec['sources']): + self._WinIdlRule(source, prebuild, outputs) + return outputs + + def WriteActionsRulesCopies(self, spec, extra_sources, prebuild, + mac_bundle_depends): + """Write out the Actions, Rules, and Copies steps. Return a path + representing the outputs of these steps.""" + outputs = [] + if self.is_mac_bundle: + mac_bundle_resources = spec.get('mac_bundle_resources', [])[:] + else: + mac_bundle_resources = [] + extra_mac_bundle_resources = [] + + if 'actions' in spec: + outputs += self.WriteActions(spec['actions'], extra_sources, prebuild, + extra_mac_bundle_resources) + if 'rules' in spec: + outputs += self.WriteRules(spec['rules'], extra_sources, prebuild, + mac_bundle_resources, + extra_mac_bundle_resources) + if 'copies' in spec: + outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends) + + if 'sources' in spec and self.flavor == 'win': + outputs += self.WriteWinIdlFiles(spec, prebuild) + + if self.xcode_settings and self.xcode_settings.IsIosFramework(): + self.WriteiOSFrameworkHeaders(spec, outputs, prebuild) + + stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs) + + if self.is_mac_bundle: + xcassets = self.WriteMacBundleResources( + extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends) + partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends) + self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends) + + return stamp + + def GenerateDescription(self, verb, message, fallback): + """Generate and return a description of a build step. + + |verb| is the short summary, e.g. ACTION or RULE. + |message| is a hand-written description, or None if not available. + |fallback| is the gyp-level name of the step, usable as a fallback. + """ + if self.toolset != 'target': + verb += '(%s)' % self.toolset + if message: + return '%s %s' % (verb, self.ExpandSpecial(message)) + else: + return '%s %s: %s' % (verb, self.name, fallback) + + def WriteActions(self, actions, extra_sources, prebuild, + extra_mac_bundle_resources): + # Actions cd into the base directory. + env = self.GetToolchainEnv() + all_outputs = [] + for action in actions: + # First write out a rule for the action. + name = '%s_%s' % (action['action_name'], self.hash_for_rules) + description = self.GenerateDescription('ACTION', + action.get('message', None), + name) + is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action) + if self.flavor == 'win' else False) + args = action['action'] + depfile = action.get('depfile', None) + if depfile: + depfile = self.ExpandSpecial(depfile, self.base_to_build) + pool = 'console' if int(action.get('ninja_use_console', 0)) else None + rule_name, _ = self.WriteNewNinjaRule(name, args, description, + is_cygwin, env, pool, + depfile=depfile) + + inputs = [self.GypPathToNinja(i, env) for i in action['inputs']] + if int(action.get('process_outputs_as_sources', False)): + extra_sources += action['outputs'] + if int(action.get('process_outputs_as_mac_bundle_resources', False)): + extra_mac_bundle_resources += action['outputs'] + outputs = [self.GypPathToNinja(o, env) for o in action['outputs']] + + # Then write out an edge using the rule. + self.ninja.build(outputs, rule_name, inputs, + order_only=prebuild) + all_outputs += outputs + + self.ninja.newline() + + return all_outputs + + def WriteRules(self, rules, extra_sources, prebuild, + mac_bundle_resources, extra_mac_bundle_resources): + env = self.GetToolchainEnv() + all_outputs = [] + for rule in rules: + # Skip a rule with no action and no inputs. + if 'action' not in rule and not rule.get('rule_sources', []): + continue + + # First write out a rule for the rule action. + name = '%s_%s' % (rule['rule_name'], self.hash_for_rules) + + args = rule['action'] + description = self.GenerateDescription( + 'RULE', + rule.get('message', None), + ('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name) + is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule) + if self.flavor == 'win' else False) + pool = 'console' if int(rule.get('ninja_use_console', 0)) else None + rule_name, args = self.WriteNewNinjaRule( + name, args, description, is_cygwin, env, pool) + + # TODO: if the command references the outputs directly, we should + # simplify it to just use $out. + + # Rules can potentially make use of some special variables which + # must vary per source file. + # Compute the list of variables we'll need to provide. + special_locals = ('source', 'root', 'dirname', 'ext', 'name') + needed_variables = set(['source']) + for argument in args: + for var in special_locals: + if '${%s}' % var in argument: + needed_variables.add(var) + needed_variables = sorted(needed_variables) + + def cygwin_munge(path): + # pylint: disable=cell-var-from-loop + if is_cygwin: + return path.replace('\\', '/') + return path + + inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])] + + # If there are n source files matching the rule, and m additional rule + # inputs, then adding 'inputs' to each build edge written below will + # write m * n inputs. Collapsing reduces this to m + n. + sources = rule.get('rule_sources', []) + num_inputs = len(inputs) + if prebuild: + num_inputs += 1 + if num_inputs > 2 and len(sources) > 2: + inputs = [self.WriteCollapsedDependencies( + rule['rule_name'], inputs, order_only=prebuild)] + prebuild = [] + + # For each source file, write an edge that generates all the outputs. + for source in sources: + source = os.path.normpath(source) + dirname, basename = os.path.split(source) + root, ext = os.path.splitext(basename) + + # Gather the list of inputs and outputs, expanding $vars if possible. + outputs = [self.ExpandRuleVariables(o, root, dirname, + source, ext, basename) + for o in rule['outputs']] + + if int(rule.get('process_outputs_as_sources', False)): + extra_sources += outputs + + was_mac_bundle_resource = source in mac_bundle_resources + if was_mac_bundle_resource or \ + int(rule.get('process_outputs_as_mac_bundle_resources', False)): + extra_mac_bundle_resources += outputs + # Note: This is n_resources * n_outputs_in_rule. Put to-be-removed + # items in a set and remove them all in a single pass if this becomes + # a performance issue. + if was_mac_bundle_resource: + mac_bundle_resources.remove(source) + + extra_bindings = [] + for var in needed_variables: + if var == 'root': + extra_bindings.append(('root', cygwin_munge(root))) + elif var == 'dirname': + # '$dirname' is a parameter to the rule action, which means + # it shouldn't be converted to a Ninja path. But we don't + # want $!PRODUCT_DIR in there either. + dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build) + extra_bindings.append(('dirname', cygwin_munge(dirname_expanded))) + elif var == 'source': + # '$source' is a parameter to the rule action, which means + # it shouldn't be converted to a Ninja path. But we don't + # want $!PRODUCT_DIR in there either. + source_expanded = self.ExpandSpecial(source, self.base_to_build) + extra_bindings.append(('source', cygwin_munge(source_expanded))) + elif var == 'ext': + extra_bindings.append(('ext', ext)) + elif var == 'name': + extra_bindings.append(('name', cygwin_munge(basename))) + else: + assert var == None, repr(var) + + outputs = [self.GypPathToNinja(o, env) for o in outputs] + if self.flavor == 'win': + # WriteNewNinjaRule uses unique_name for creating an rsp file on win. + extra_bindings.append(('unique_name', + hashlib.md5(outputs[0]).hexdigest())) + + self.ninja.build(outputs, rule_name, self.GypPathToNinja(source), + implicit=inputs, + order_only=prebuild, + variables=extra_bindings) + + all_outputs.extend(outputs) + + return all_outputs + + def WriteCopies(self, copies, prebuild, mac_bundle_depends): + outputs = [] + if self.xcode_settings: + extra_env = self.xcode_settings.GetPerTargetSettings() + env = self.GetToolchainEnv(additional_settings=extra_env) + else: + env = self.GetToolchainEnv() + for copy in copies: + for path in copy['files']: + # Normalize the path so trailing slashes don't confuse us. + path = os.path.normpath(path) + basename = os.path.split(path)[1] + src = self.GypPathToNinja(path, env) + dst = self.GypPathToNinja(os.path.join(copy['destination'], basename), + env) + outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild) + if self.is_mac_bundle: + # gyp has mac_bundle_resources to copy things into a bundle's + # Resources folder, but there's no built-in way to copy files to other + # places in the bundle. Hence, some targets use copies for this. Check + # if this file is copied into the current bundle, and if so add it to + # the bundle depends so that dependent targets get rebuilt if the copy + # input changes. + if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()): + mac_bundle_depends.append(dst) + + return outputs + + def WriteiOSFrameworkHeaders(self, spec, outputs, prebuild): + """Prebuild steps to generate hmap files and copy headers to destination.""" + framework = self.ComputeMacBundleOutput() + all_sources = spec['sources'] + copy_headers = spec['mac_framework_headers'] + output = self.GypPathToUniqueOutput('headers.hmap') + self.xcode_settings.header_map_path = output + all_headers = map(self.GypPathToNinja, + filter(lambda x:x.endswith(('.h')), all_sources)) + variables = [('framework', framework), + ('copy_headers', map(self.GypPathToNinja, copy_headers))] + outputs.extend(self.ninja.build( + output, 'compile_ios_framework_headers', all_headers, + variables=variables, order_only=prebuild)) + + def WriteMacBundleResources(self, resources, bundle_depends): + """Writes ninja edges for 'mac_bundle_resources'.""" + xcassets = [] + + extra_env = self.xcode_settings.GetPerTargetSettings() + env = self.GetSortedXcodeEnv(additional_settings=extra_env) + env = self.ComputeExportEnvString(env) + isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name) + + for output, res in gyp.xcode_emulation.GetMacBundleResources( + generator_default_variables['PRODUCT_DIR'], + self.xcode_settings, map(self.GypPathToNinja, resources)): + output = self.ExpandSpecial(output) + if os.path.splitext(output)[-1] != '.xcassets': + self.ninja.build(output, 'mac_tool', res, + variables=[('mactool_cmd', 'copy-bundle-resource'), \ + ('env', env), ('binary', isBinary)]) + bundle_depends.append(output) + else: + xcassets.append(res) + return xcassets + + def WriteMacXCassets(self, xcassets, bundle_depends): + """Writes ninja edges for 'mac_bundle_resources' .xcassets files. + + This add an invocation of 'actool' via the 'mac_tool.py' helper script. + It assumes that the assets catalogs define at least one imageset and + thus an Assets.car file will be generated in the application resources + directory. If this is not the case, then the build will probably be done + at each invocation of ninja.""" + if not xcassets: + return + + extra_arguments = {} + settings_to_arg = { + 'XCASSETS_APP_ICON': 'app-icon', + 'XCASSETS_LAUNCH_IMAGE': 'launch-image', + } + settings = self.xcode_settings.xcode_settings[self.config_name] + for settings_key, arg_name in settings_to_arg.iteritems(): + value = settings.get(settings_key) + if value: + extra_arguments[arg_name] = value + + partial_info_plist = None + if extra_arguments: + partial_info_plist = self.GypPathToUniqueOutput( + 'assetcatalog_generated_info.plist') + extra_arguments['output-partial-info-plist'] = partial_info_plist + + outputs = [] + outputs.append( + os.path.join( + self.xcode_settings.GetBundleResourceFolder(), + 'Assets.car')) + if partial_info_plist: + outputs.append(partial_info_plist) + + keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor) + extra_env = self.xcode_settings.GetPerTargetSettings() + env = self.GetSortedXcodeEnv(additional_settings=extra_env) + env = self.ComputeExportEnvString(env) + + bundle_depends.extend(self.ninja.build( + outputs, 'compile_xcassets', xcassets, + variables=[('env', env), ('keys', keys)])) + return partial_info_plist + + def WriteMacInfoPlist(self, partial_info_plist, bundle_depends): + """Write build rules for bundle Info.plist files.""" + info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist( + generator_default_variables['PRODUCT_DIR'], + self.xcode_settings, self.GypPathToNinja) + if not info_plist: + return + out = self.ExpandSpecial(out) + if defines: + # Create an intermediate file to store preprocessed results. + intermediate_plist = self.GypPathToUniqueOutput( + os.path.basename(info_plist)) + defines = ' '.join([Define(d, self.flavor) for d in defines]) + info_plist = self.ninja.build( + intermediate_plist, 'preprocess_infoplist', info_plist, + variables=[('defines',defines)]) + + env = self.GetSortedXcodeEnv(additional_settings=extra_env) + env = self.ComputeExportEnvString(env) + + if partial_info_plist: + intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist') + info_plist = self.ninja.build( + intermediate_plist, 'merge_infoplist', + [partial_info_plist, info_plist]) + + keys = self.xcode_settings.GetExtraPlistItems(self.config_name) + keys = QuoteShellArgument(json.dumps(keys), self.flavor) + isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name) + self.ninja.build(out, 'copy_infoplist', info_plist, + variables=[('env', env), ('keys', keys), + ('binary', isBinary)]) + bundle_depends.append(out) + + def WriteSources(self, ninja_file, config_name, config, sources, predepends, + precompiled_header, spec): + """Write build rules to compile all of |sources|.""" + if self.toolset == 'host': + self.ninja.variable('ar', '$ar_host') + self.ninja.variable('cc', '$cc_host') + self.ninja.variable('cxx', '$cxx_host') + self.ninja.variable('ld', '$ld_host') + self.ninja.variable('ldxx', '$ldxx_host') + self.ninja.variable('nm', '$nm_host') + self.ninja.variable('readelf', '$readelf_host') + + if self.flavor != 'mac' or len(self.archs) == 1: + return self.WriteSourcesForArch( + self.ninja, config_name, config, sources, predepends, + precompiled_header, spec) + else: + return dict((arch, self.WriteSourcesForArch( + self.arch_subninjas[arch], config_name, config, sources, predepends, + precompiled_header, spec, arch=arch)) + for arch in self.archs) + + def WriteSourcesForArch(self, ninja_file, config_name, config, sources, + predepends, precompiled_header, spec, arch=None): + """Write build rules to compile all of |sources|.""" + + extra_defines = [] + if self.flavor == 'mac': + cflags = self.xcode_settings.GetCflags(config_name, arch=arch) + cflags_c = self.xcode_settings.GetCflagsC(config_name) + cflags_cc = self.xcode_settings.GetCflagsCC(config_name) + cflags_objc = ['$cflags_c'] + \ + self.xcode_settings.GetCflagsObjC(config_name) + cflags_objcc = ['$cflags_cc'] + \ + self.xcode_settings.GetCflagsObjCC(config_name) + elif self.flavor == 'win': + asmflags = self.msvs_settings.GetAsmflags(config_name) + cflags = self.msvs_settings.GetCflags(config_name) + cflags_c = self.msvs_settings.GetCflagsC(config_name) + cflags_cc = self.msvs_settings.GetCflagsCC(config_name) + extra_defines = self.msvs_settings.GetComputedDefines(config_name) + # See comment at cc_command for why there's two .pdb files. + pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName( + config_name, self.ExpandSpecial) + if not pdbpath_c: + obj = 'obj' + if self.toolset != 'target': + obj += '.' + self.toolset + pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name)) + pdbpath_c = pdbpath + '.c.pdb' + pdbpath_cc = pdbpath + '.cc.pdb' + self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c]) + self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc]) + self.WriteVariableList(ninja_file, 'pchprefix', [self.name]) + else: + cflags = config.get('cflags', []) + cflags_c = config.get('cflags_c', []) + cflags_cc = config.get('cflags_cc', []) + + # Respect environment variables related to build, but target-specific + # flags can still override them. + if self.toolset == 'target': + cflags_c = (os.environ.get('CPPFLAGS', '').split() + + os.environ.get('CFLAGS', '').split() + cflags_c) + cflags_cc = (os.environ.get('CPPFLAGS', '').split() + + os.environ.get('CXXFLAGS', '').split() + cflags_cc) + elif self.toolset == 'host': + cflags_c = (os.environ.get('CPPFLAGS_host', '').split() + + os.environ.get('CFLAGS_host', '').split() + cflags_c) + cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() + + os.environ.get('CXXFLAGS_host', '').split() + cflags_cc) + + defines = config.get('defines', []) + extra_defines + self.WriteVariableList(ninja_file, 'defines', + [Define(d, self.flavor) for d in defines]) + if self.flavor == 'win': + self.WriteVariableList(ninja_file, 'asmflags', + map(self.ExpandSpecial, asmflags)) + self.WriteVariableList(ninja_file, 'rcflags', + [QuoteShellArgument(self.ExpandSpecial(f), self.flavor) + for f in self.msvs_settings.GetRcflags(config_name, + self.GypPathToNinja)]) + + include_dirs = config.get('include_dirs', []) + + env = self.GetToolchainEnv() + if self.flavor == 'win': + include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs, + config_name) + self.WriteVariableList(ninja_file, 'includes', + [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) + for i in include_dirs]) + + if self.flavor == 'win': + midl_include_dirs = config.get('midl_include_dirs', []) + midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs( + midl_include_dirs, config_name) + self.WriteVariableList(ninja_file, 'midl_includes', + [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) + for i in midl_include_dirs]) + + pch_commands = precompiled_header.GetPchBuildCommands(arch) + if self.flavor == 'mac': + # Most targets use no precompiled headers, so only write these if needed. + for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'), + ('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]: + include = precompiled_header.GetInclude(ext, arch) + if include: ninja_file.variable(var, include) + + arflags = config.get('arflags', []) + + self.WriteVariableList(ninja_file, 'cflags', + map(self.ExpandSpecial, cflags)) + self.WriteVariableList(ninja_file, 'cflags_c', + map(self.ExpandSpecial, cflags_c)) + self.WriteVariableList(ninja_file, 'cflags_cc', + map(self.ExpandSpecial, cflags_cc)) + if self.flavor == 'mac': + self.WriteVariableList(ninja_file, 'cflags_objc', + map(self.ExpandSpecial, cflags_objc)) + self.WriteVariableList(ninja_file, 'cflags_objcc', + map(self.ExpandSpecial, cflags_objcc)) + self.WriteVariableList(ninja_file, 'arflags', + map(self.ExpandSpecial, arflags)) + ninja_file.newline() + outputs = [] + has_rc_source = False + for source in sources: + filename, ext = os.path.splitext(source) + ext = ext[1:] + obj_ext = self.obj_ext + if ext in ('cc', 'cpp', 'cxx'): + command = 'cxx' + self.target.uses_cpp = True + elif ext == 'c' or (ext == 'S' and self.flavor != 'win'): + command = 'cc' + elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files. + command = 'cc_s' + elif (self.flavor == 'win' and ext == 'asm' and + not self.msvs_settings.HasExplicitAsmRules(spec)): + command = 'asm' + # Add the _asm suffix as msvs is capable of handling .cc and + # .asm files of the same name without collision. + obj_ext = '_asm.obj' + elif self.flavor == 'mac' and ext == 'm': + command = 'objc' + elif self.flavor == 'mac' and ext == 'mm': + command = 'objcxx' + self.target.uses_cpp = True + elif self.flavor == 'win' and ext == 'rc': + command = 'rc' + obj_ext = '.res' + has_rc_source = True + else: + # Ignore unhandled extensions. + continue + input = self.GypPathToNinja(source) + output = self.GypPathToUniqueOutput(filename + obj_ext) + if arch is not None: + output = AddArch(output, arch) + implicit = precompiled_header.GetObjDependencies([input], [output], arch) + variables = [] + if self.flavor == 'win': + variables, output, implicit = precompiled_header.GetFlagsModifications( + input, output, implicit, command, cflags_c, cflags_cc, + self.ExpandSpecial) + ninja_file.build(output, command, input, + implicit=[gch for _, _, gch in implicit], + order_only=predepends, variables=variables) + outputs.append(output) + + if has_rc_source: + resource_include_dirs = config.get('resource_include_dirs', include_dirs) + self.WriteVariableList(ninja_file, 'resource_includes', + [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor) + for i in resource_include_dirs]) + + self.WritePchTargets(ninja_file, pch_commands) + + ninja_file.newline() + return outputs + + def WritePchTargets(self, ninja_file, pch_commands): + """Writes ninja rules to compile prefix headers.""" + if not pch_commands: + return + + for gch, lang_flag, lang, input in pch_commands: + var_name = { + 'c': 'cflags_pch_c', + 'cc': 'cflags_pch_cc', + 'm': 'cflags_pch_objc', + 'mm': 'cflags_pch_objcc', + }[lang] + + map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', } + cmd = map.get(lang) + ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)]) + + def WriteLink(self, spec, config_name, config, link_deps, compile_deps): + """Write out a link step. Fills out target.binary. """ + if self.flavor != 'mac' or len(self.archs) == 1: + return self.WriteLinkForArch( + self.ninja, spec, config_name, config, link_deps, compile_deps) + else: + output = self.ComputeOutput(spec) + inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec, + config_name, config, link_deps[arch], + compile_deps, arch=arch) + for arch in self.archs] + extra_bindings = [] + build_output = output + if not self.is_mac_bundle: + self.AppendPostbuildVariable(extra_bindings, spec, output, output) + + # TODO(yyanagisawa): more work needed to fix: + # https://code.google.com/p/gyp/issues/detail?id=411 + if (spec['type'] in ('shared_library', 'loadable_module') and + not self.is_mac_bundle): + extra_bindings.append(('lib', output)) + self.ninja.build([output, output + '.TOC'], 'solipo', inputs, + variables=extra_bindings) + else: + self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings) + return output + + def WriteLinkForArch(self, ninja_file, spec, config_name, config, + link_deps, compile_deps, arch=None): + """Write out a link step. Fills out target.binary. """ + command = { + 'executable': 'link', + 'loadable_module': 'solink_module', + 'shared_library': 'solink', + }[spec['type']] + command_suffix = '' + + implicit_deps = set() + solibs = set() + order_deps = set() + + if compile_deps: + # Normally, the compiles of the target already depend on compile_deps, + # but a shared_library target might have no sources and only link together + # a few static_library deps, so the link step also needs to depend + # on compile_deps to make sure actions in the shared_library target + # get run before the link. + order_deps.add(compile_deps) + + if 'dependencies' in spec: + # Two kinds of dependencies: + # - Linkable dependencies (like a .a or a .so): add them to the link line. + # - Non-linkable dependencies (like a rule that generates a file + # and writes a stamp file): add them to implicit_deps + extra_link_deps = set() + for dep in spec['dependencies']: + target = self.target_outputs.get(dep) + if not target: + continue + linkable = target.Linkable() + if linkable: + new_deps = [] + if (self.flavor == 'win' and + target.component_objs and + self.msvs_settings.IsUseLibraryDependencyInputs(config_name)): + new_deps = target.component_objs + if target.compile_deps: + order_deps.add(target.compile_deps) + elif self.flavor == 'win' and target.import_lib: + new_deps = [target.import_lib] + elif target.UsesToc(self.flavor): + solibs.add(target.binary) + implicit_deps.add(target.binary + '.TOC') + else: + new_deps = [target.binary] + for new_dep in new_deps: + if new_dep not in extra_link_deps: + extra_link_deps.add(new_dep) + link_deps.append(new_dep) + + final_output = target.FinalOutput() + if not linkable or final_output != target.binary: + implicit_deps.add(final_output) + + extra_bindings = [] + if self.target.uses_cpp and self.flavor != 'win': + extra_bindings.append(('ld', '$ldxx')) + + output = self.ComputeOutput(spec, arch) + if arch is None and not self.is_mac_bundle: + self.AppendPostbuildVariable(extra_bindings, spec, output, output) + + is_executable = spec['type'] == 'executable' + # The ldflags config key is not used on mac or win. On those platforms + # linker flags are set via xcode_settings and msvs_settings, respectively. + env_ldflags = os.environ.get('LDFLAGS', '').split() + if self.flavor == 'mac': + ldflags = self.xcode_settings.GetLdflags(config_name, + self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']), + self.GypPathToNinja, arch) + ldflags = env_ldflags + ldflags + elif self.flavor == 'win': + manifest_base_name = self.GypPathToUniqueOutput( + self.ComputeOutputFileName(spec)) + ldflags, intermediate_manifest, manifest_files = \ + self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja, + self.ExpandSpecial, manifest_base_name, + output, is_executable, + self.toplevel_build) + ldflags = env_ldflags + ldflags + self.WriteVariableList(ninja_file, 'manifests', manifest_files) + implicit_deps = implicit_deps.union(manifest_files) + if intermediate_manifest: + self.WriteVariableList( + ninja_file, 'intermediatemanifest', [intermediate_manifest]) + command_suffix = _GetWinLinkRuleNameSuffix( + self.msvs_settings.IsEmbedManifest(config_name)) + def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja) + if def_file: + implicit_deps.add(def_file) + else: + # Respect environment variables related to build, but target-specific + # flags can still override them. + ldflags = env_ldflags + config.get('ldflags', []) + if is_executable and len(solibs): + rpath = 'lib/' + if self.toolset != 'target': + rpath += self.toolset + ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath) + else: + ldflags.append('-Wl,-rpath=%s' % self.target_rpath) + ldflags.append('-Wl,-rpath-link=%s' % rpath) + self.WriteVariableList(ninja_file, 'ldflags', + map(self.ExpandSpecial, ldflags)) + + library_dirs = config.get('library_dirs', []) + if self.flavor == 'win': + library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name) + for l in library_dirs] + library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l), + self.flavor) + for l in library_dirs] + else: + library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l), + self.flavor) + for l in library_dirs] + + libraries = gyp.common.uniquer(map(self.ExpandSpecial, + spec.get('libraries', []))) + if self.flavor == 'mac': + libraries = self.xcode_settings.AdjustLibraries(libraries, config_name) + elif self.flavor == 'win': + libraries = self.msvs_settings.AdjustLibraries(libraries) + + self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries) + + linked_binary = output + + if command in ('solink', 'solink_module'): + extra_bindings.append(('soname', os.path.split(output)[1])) + extra_bindings.append(('lib', + gyp.common.EncodePOSIXShellArgument(output))) + if self.flavor != 'win': + link_file_list = output + if self.is_mac_bundle: + # 'Dependency Framework.framework/Versions/A/Dependency Framework' -> + # 'Dependency Framework.framework.rsp' + link_file_list = self.xcode_settings.GetWrapperName() + if arch: + link_file_list += '.' + arch + link_file_list += '.rsp' + # If an rspfile contains spaces, ninja surrounds the filename with + # quotes around it and then passes it to open(), creating a file with + # quotes in its name (and when looking for the rsp file, the name + # makes it through bash which strips the quotes) :-/ + link_file_list = link_file_list.replace(' ', '_') + extra_bindings.append( + ('link_file_list', + gyp.common.EncodePOSIXShellArgument(link_file_list))) + if self.flavor == 'win': + extra_bindings.append(('binary', output)) + if ('/NOENTRY' not in ldflags and + not self.msvs_settings.GetNoImportLibrary(config_name)): + self.target.import_lib = output + '.lib' + extra_bindings.append(('implibflag', + '/IMPLIB:%s' % self.target.import_lib)) + pdbname = self.msvs_settings.GetPDBName( + config_name, self.ExpandSpecial, output + '.pdb') + output = [output, self.target.import_lib] + if pdbname: + output.append(pdbname) + elif not self.is_mac_bundle: + output = [output, output + '.TOC'] + else: + command = command + '_notoc' + elif self.flavor == 'win': + extra_bindings.append(('binary', output)) + pdbname = self.msvs_settings.GetPDBName( + config_name, self.ExpandSpecial, output + '.pdb') + if pdbname: + output = [output, pdbname] + + + if len(solibs): + extra_bindings.append(('solibs', + gyp.common.EncodePOSIXShellList(sorted(solibs)))) + + ninja_file.build(output, command + command_suffix, link_deps, + implicit=sorted(implicit_deps), + order_only=list(order_deps), + variables=extra_bindings) + return linked_binary + + def WriteTarget(self, spec, config_name, config, link_deps, compile_deps): + extra_link_deps = any(self.target_outputs.get(dep).Linkable() + for dep in spec.get('dependencies', []) + if dep in self.target_outputs) + if spec['type'] == 'none' or (not link_deps and not extra_link_deps): + # TODO(evan): don't call this function for 'none' target types, as + # it doesn't do anything, and we fake out a 'binary' with a stamp file. + self.target.binary = compile_deps + self.target.type = 'none' + elif spec['type'] == 'static_library': + self.target.binary = self.ComputeOutput(spec) + if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not + self.is_standalone_static_library): + self.ninja.build(self.target.binary, 'alink_thin', link_deps, + order_only=compile_deps) + else: + variables = [] + if self.xcode_settings: + libtool_flags = self.xcode_settings.GetLibtoolflags(config_name) + if libtool_flags: + variables.append(('libtool_flags', libtool_flags)) + if self.msvs_settings: + libflags = self.msvs_settings.GetLibFlags(config_name, + self.GypPathToNinja) + variables.append(('libflags', libflags)) + + if self.flavor != 'mac' or len(self.archs) == 1: + self.AppendPostbuildVariable(variables, spec, + self.target.binary, self.target.binary) + self.ninja.build(self.target.binary, 'alink', link_deps, + order_only=compile_deps, variables=variables) + else: + inputs = [] + for arch in self.archs: + output = self.ComputeOutput(spec, arch) + self.arch_subninjas[arch].build(output, 'alink', link_deps[arch], + order_only=compile_deps, + variables=variables) + inputs.append(output) + # TODO: It's not clear if libtool_flags should be passed to the alink + # call that combines single-arch .a files into a fat .a file. + self.AppendPostbuildVariable(variables, spec, + self.target.binary, self.target.binary) + self.ninja.build(self.target.binary, 'alink', inputs, + # FIXME: test proving order_only=compile_deps isn't + # needed. + variables=variables) + else: + self.target.binary = self.WriteLink(spec, config_name, config, link_deps, + compile_deps) + return self.target.binary + + def WriteMacBundle(self, spec, mac_bundle_depends, is_empty): + assert self.is_mac_bundle + package_framework = spec['type'] in ('shared_library', 'loadable_module') + output = self.ComputeMacBundleOutput() + if is_empty: + output += '.stamp' + variables = [] + self.AppendPostbuildVariable(variables, spec, output, self.target.binary, + is_command_start=not package_framework) + if package_framework and not is_empty: + if spec['type'] == 'shared_library' and self.xcode_settings.isIOS: + self.ninja.build(output, 'package_ios_framework', mac_bundle_depends, + variables=variables) + else: + variables.append(('version', self.xcode_settings.GetFrameworkVersion())) + self.ninja.build(output, 'package_framework', mac_bundle_depends, + variables=variables) + else: + self.ninja.build(output, 'stamp', mac_bundle_depends, + variables=variables) + self.target.bundle = output + return output + + def GetToolchainEnv(self, additional_settings=None): + """Returns the variables toolchain would set for build steps.""" + env = self.GetSortedXcodeEnv(additional_settings=additional_settings) + if self.flavor == 'win': + env = self.GetMsvsToolchainEnv( + additional_settings=additional_settings) + return env + + def GetMsvsToolchainEnv(self, additional_settings=None): + """Returns the variables Visual Studio would set for build steps.""" + return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR', + config=self.config_name) + + def GetSortedXcodeEnv(self, additional_settings=None): + """Returns the variables Xcode would set for build steps.""" + assert self.abs_build_dir + abs_build_dir = self.abs_build_dir + return gyp.xcode_emulation.GetSortedXcodeEnv( + self.xcode_settings, abs_build_dir, + os.path.join(abs_build_dir, self.build_to_base), self.config_name, + additional_settings) + + def GetSortedXcodePostbuildEnv(self): + """Returns the variables Xcode would set for postbuild steps.""" + postbuild_settings = {} + # CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack. + # TODO(thakis): It would be nice to have some general mechanism instead. + strip_save_file = self.xcode_settings.GetPerTargetSetting( + 'CHROMIUM_STRIP_SAVE_FILE') + if strip_save_file: + postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file + return self.GetSortedXcodeEnv(additional_settings=postbuild_settings) + + def AppendPostbuildVariable(self, variables, spec, output, binary, + is_command_start=False): + """Adds a 'postbuild' variable if there is a postbuild for |output|.""" + postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start) + if postbuild: + variables.append(('postbuilds', postbuild)) + + def GetPostbuildCommand(self, spec, output, output_binary, is_command_start): + """Returns a shell command that runs all the postbuilds, and removes + |output| if any of them fails. If |is_command_start| is False, then the + returned string will start with ' && '.""" + if not self.xcode_settings or spec['type'] == 'none' or not output: + return '' + output = QuoteShellArgument(output, self.flavor) + postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True) + if output_binary is not None: + postbuilds = self.xcode_settings.AddImplicitPostbuilds( + self.config_name, + os.path.normpath(os.path.join(self.base_to_build, output)), + QuoteShellArgument( + os.path.normpath(os.path.join(self.base_to_build, output_binary)), + self.flavor), + postbuilds, quiet=True) + + if not postbuilds: + return '' + # Postbuilds expect to be run in the gyp file's directory, so insert an + # implicit postbuild to cd to there. + postbuilds.insert(0, gyp.common.EncodePOSIXShellList( + ['cd', self.build_to_base])) + env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv()) + # G will be non-null if any postbuild fails. Run all postbuilds in a + # subshell. + commands = env + ' (' + \ + ' && '.join([ninja_syntax.escape(command) for command in postbuilds]) + command_string = (commands + '); G=$$?; ' + # Remove the final output if any postbuild failed. + '((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)') + if is_command_start: + return '(' + command_string + ' && ' + else: + return '$ && (' + command_string + + def ComputeExportEnvString(self, env): + """Given an environment, returns a string looking like + 'export FOO=foo; export BAR="${FOO} bar;' + that exports |env| to the shell.""" + export_str = [] + for k, v in env: + export_str.append('export %s=%s;' % + (k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v)))) + return ' '.join(export_str) + + def ComputeMacBundleOutput(self): + """Return the 'output' (full output path) to a bundle output directory.""" + assert self.is_mac_bundle + path = generator_default_variables['PRODUCT_DIR'] + return self.ExpandSpecial( + os.path.join(path, self.xcode_settings.GetWrapperName())) + + def ComputeOutputFileName(self, spec, type=None): + """Compute the filename of the final output for the current target.""" + if not type: + type = spec['type'] + + default_variables = copy.copy(generator_default_variables) + CalculateVariables(default_variables, {'flavor': self.flavor}) + + # Compute filename prefix: the product prefix, or a default for + # the product type. + DEFAULT_PREFIX = { + 'loadable_module': default_variables['SHARED_LIB_PREFIX'], + 'shared_library': default_variables['SHARED_LIB_PREFIX'], + 'static_library': default_variables['STATIC_LIB_PREFIX'], + 'executable': default_variables['EXECUTABLE_PREFIX'], + } + prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, '')) + + # Compute filename extension: the product extension, or a default + # for the product type. + DEFAULT_EXTENSION = { + 'loadable_module': default_variables['SHARED_LIB_SUFFIX'], + 'shared_library': default_variables['SHARED_LIB_SUFFIX'], + 'static_library': default_variables['STATIC_LIB_SUFFIX'], + 'executable': default_variables['EXECUTABLE_SUFFIX'], + } + extension = spec.get('product_extension') + if extension: + extension = '.' + extension + else: + extension = DEFAULT_EXTENSION.get(type, '') + + if 'product_name' in spec: + # If we were given an explicit name, use that. + target = spec['product_name'] + else: + # Otherwise, derive a name from the target name. + target = spec['target_name'] + if prefix == 'lib': + # Snip out an extra 'lib' from libs if appropriate. + target = StripPrefix(target, 'lib') + + if type in ('static_library', 'loadable_module', 'shared_library', + 'executable'): + return '%s%s%s' % (prefix, target, extension) + elif type == 'none': + return '%s.stamp' % target + else: + raise Exception('Unhandled output type %s' % type) + + def ComputeOutput(self, spec, arch=None): + """Compute the path for the final output of the spec.""" + type = spec['type'] + + if self.flavor == 'win': + override = self.msvs_settings.GetOutputName(self.config_name, + self.ExpandSpecial) + if override: + return override + + if arch is None and self.flavor == 'mac' and type in ( + 'static_library', 'executable', 'shared_library', 'loadable_module'): + filename = self.xcode_settings.GetExecutablePath() + else: + filename = self.ComputeOutputFileName(spec, type) + + if arch is None and 'product_dir' in spec: + path = os.path.join(spec['product_dir'], filename) + return self.ExpandSpecial(path) + + # Some products go into the output root, libraries go into shared library + # dir, and everything else goes into the normal place. + type_in_output_root = ['executable', 'loadable_module'] + if self.flavor == 'mac' and self.toolset == 'target': + type_in_output_root += ['shared_library', 'static_library'] + elif self.flavor == 'win' and self.toolset == 'target': + type_in_output_root += ['shared_library'] + + if arch is not None: + # Make sure partial executables don't end up in a bundle or the regular + # output directory. + archdir = 'arch' + if self.toolset != 'target': + archdir = os.path.join('arch', '%s' % self.toolset) + return os.path.join(archdir, AddArch(filename, arch)) + elif type in type_in_output_root or self.is_standalone_static_library: + return filename + elif type == 'shared_library': + libdir = 'lib' + if self.toolset != 'target': + libdir = os.path.join('lib', '%s' % self.toolset) + return os.path.join(libdir, filename) + else: + return self.GypPathToUniqueOutput(filename, qualified=False) + + def WriteVariableList(self, ninja_file, var, values): + assert not isinstance(values, str) + if values is None: + values = [] + ninja_file.variable(var, ' '.join(values)) + + def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool, + depfile=None): + """Write out a new ninja "rule" statement for a given command. + + Returns the name of the new rule, and a copy of |args| with variables + expanded.""" + + if self.flavor == 'win': + args = [self.msvs_settings.ConvertVSMacros( + arg, self.base_to_build, config=self.config_name) + for arg in args] + description = self.msvs_settings.ConvertVSMacros( + description, config=self.config_name) + elif self.flavor == 'mac': + # |env| is an empty list on non-mac. + args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args] + description = gyp.xcode_emulation.ExpandEnvVars(description, env) + + # TODO: we shouldn't need to qualify names; we do it because + # currently the ninja rule namespace is global, but it really + # should be scoped to the subninja. + rule_name = self.name + if self.toolset == 'target': + rule_name += '.' + self.toolset + rule_name += '.' + name + rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name) + + # Remove variable references, but not if they refer to the magic rule + # variables. This is not quite right, as it also protects these for + # actions, not just for rules where they are valid. Good enough. + protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ] + protect = '(?!' + '|'.join(map(re.escape, protect)) + ')' + description = re.sub(protect + r'\$', '_', description) + + # gyp dictates that commands are run from the base directory. + # cd into the directory before running, and adjust paths in + # the arguments to point to the proper locations. + rspfile = None + rspfile_content = None + args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args] + if self.flavor == 'win': + rspfile = rule_name + '.$unique_name.rsp' + # The cygwin case handles this inside the bash sub-shell. + run_in = '' if is_cygwin else ' ' + self.build_to_base + if is_cygwin: + rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine( + args, self.build_to_base) + else: + rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args) + command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable + + rspfile + run_in) + else: + env = self.ComputeExportEnvString(env) + command = gyp.common.EncodePOSIXShellList(args) + command = 'cd %s; ' % self.build_to_base + env + command + + # GYP rules/actions express being no-ops by not touching their outputs. + # Avoid executing downstream dependencies in this case by specifying + # restat=1 to ninja. + self.ninja.rule(rule_name, command, description, depfile=depfile, + restat=True, pool=pool, + rspfile=rspfile, rspfile_content=rspfile_content) + self.ninja.newline() + + return rule_name, args + + +def CalculateVariables(default_variables, params): + """Calculate additional variables for use in the build (called by gyp).""" + global generator_additional_non_configuration_keys + global generator_additional_path_sections + flavor = gyp.common.GetFlavor(params) + if flavor == 'mac': + default_variables.setdefault('OS', 'mac') + default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib') + default_variables.setdefault('SHARED_LIB_DIR', + generator_default_variables['PRODUCT_DIR']) + default_variables.setdefault('LIB_DIR', + generator_default_variables['PRODUCT_DIR']) + + # Copy additional generator configuration data from Xcode, which is shared + # by the Mac Ninja generator. + import gyp.generator.xcode as xcode_generator + generator_additional_non_configuration_keys = getattr(xcode_generator, + 'generator_additional_non_configuration_keys', []) + generator_additional_path_sections = getattr(xcode_generator, + 'generator_additional_path_sections', []) + global generator_extra_sources_for_rules + generator_extra_sources_for_rules = getattr(xcode_generator, + 'generator_extra_sources_for_rules', []) + elif flavor == 'win': + exts = gyp.MSVSUtil.TARGET_TYPE_EXT + default_variables.setdefault('OS', 'win') + default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable'] + default_variables['STATIC_LIB_PREFIX'] = '' + default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library'] + default_variables['SHARED_LIB_PREFIX'] = '' + default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library'] + + # Copy additional generator configuration data from VS, which is shared + # by the Windows Ninja generator. + import gyp.generator.msvs as msvs_generator + generator_additional_non_configuration_keys = getattr(msvs_generator, + 'generator_additional_non_configuration_keys', []) + generator_additional_path_sections = getattr(msvs_generator, + 'generator_additional_path_sections', []) + + gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) + else: + operating_system = flavor + if flavor == 'android': + operating_system = 'linux' # Keep this legacy behavior for now. + default_variables.setdefault('OS', operating_system) + default_variables.setdefault('SHARED_LIB_SUFFIX', '.so') + default_variables.setdefault('SHARED_LIB_DIR', + os.path.join('$!PRODUCT_DIR', 'lib')) + default_variables.setdefault('LIB_DIR', + os.path.join('$!PRODUCT_DIR', 'obj')) + +def ComputeOutputDir(params): + """Returns the path from the toplevel_dir to the build output directory.""" + # generator_dir: relative path from pwd to where make puts build files. + # Makes migrating from make to ninja easier, ninja doesn't put anything here. + generator_dir = os.path.relpath(params['options'].generator_output or '.') + + # output_dir: relative path from generator_dir to the build directory. + output_dir = params.get('generator_flags', {}).get('output_dir', 'out') + + # Relative path from source root to our output files. e.g. "out" + return os.path.normpath(os.path.join(generator_dir, output_dir)) + + +def CalculateGeneratorInputInfo(params): + """Called by __init__ to initialize generator values based on params.""" + # E.g. "out/gypfiles" + toplevel = params['options'].toplevel_dir + qualified_out_dir = os.path.normpath(os.path.join( + toplevel, ComputeOutputDir(params), 'gypfiles')) + + global generator_filelist_paths + generator_filelist_paths = { + 'toplevel': toplevel, + 'qualified_out_dir': qualified_out_dir, + } + + +def OpenOutput(path, mode='w'): + """Open |path| for writing, creating directories if necessary.""" + gyp.common.EnsureDirExists(path) + return open(path, mode) + + +def CommandWithWrapper(cmd, wrappers, prog): + wrapper = wrappers.get(cmd, '') + if wrapper: + return wrapper + ' ' + prog + return prog + + +def GetDefaultConcurrentLinks(): + """Returns a best-guess for a number of concurrent links.""" + pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0)) + if pool_size: + return pool_size + + if sys.platform in ('win32', 'cygwin'): + import ctypes + + class MEMORYSTATUSEX(ctypes.Structure): + _fields_ = [ + ("dwLength", ctypes.c_ulong), + ("dwMemoryLoad", ctypes.c_ulong), + ("ullTotalPhys", ctypes.c_ulonglong), + ("ullAvailPhys", ctypes.c_ulonglong), + ("ullTotalPageFile", ctypes.c_ulonglong), + ("ullAvailPageFile", ctypes.c_ulonglong), + ("ullTotalVirtual", ctypes.c_ulonglong), + ("ullAvailVirtual", ctypes.c_ulonglong), + ("sullAvailExtendedVirtual", ctypes.c_ulonglong), + ] + + stat = MEMORYSTATUSEX() + stat.dwLength = ctypes.sizeof(stat) + ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat)) + + # VS 2015 uses 20% more working set than VS 2013 and can consume all RAM + # on a 64 GB machine. + mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB + hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32))) + return min(mem_limit, hard_cap) + elif sys.platform.startswith('linux'): + if os.path.exists("/proc/meminfo"): + with open("/proc/meminfo") as meminfo: + memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB') + for line in meminfo: + match = memtotal_re.match(line) + if not match: + continue + # Allow 8Gb per link on Linux because Gold is quite memory hungry + return max(1, int(match.group(1)) / (8 * (2 ** 20))) + return 1 + elif sys.platform == 'darwin': + try: + avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize'])) + # A static library debug build of Chromium's unit_tests takes ~2.7GB, so + # 4GB per ld process allows for some more bloat. + return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB + except: + return 1 + else: + # TODO(scottmg): Implement this for other platforms. + return 1 + + +def _GetWinLinkRuleNameSuffix(embed_manifest): + """Returns the suffix used to select an appropriate linking rule depending on + whether the manifest embedding is enabled.""" + return '_embed' if embed_manifest else '' + + +def _AddWinLinkRules(master_ninja, embed_manifest): + """Adds link rules for Windows platform to |master_ninja|.""" + def FullLinkCommand(ldcmd, out, binary_type): + resource_name = { + 'exe': '1', + 'dll': '2', + }[binary_type] + return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \ + '%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \ + '$manifests' % { + 'python': sys.executable, + 'out': out, + 'ldcmd': ldcmd, + 'resname': resource_name, + 'embed': embed_manifest } + rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest) + use_separate_mspdbsrv = ( + int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0) + dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper() + dllcmd = ('%s gyp-win-tool link-wrapper $arch %s ' + '$ld /nologo $implibflag /DLL /OUT:$binary ' + '@$binary.rsp' % (sys.executable, use_separate_mspdbsrv)) + dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll') + master_ninja.rule('solink' + rule_name_suffix, + description=dlldesc, command=dllcmd, + rspfile='$binary.rsp', + rspfile_content='$libs $in_newline $ldflags', + restat=True, + pool='link_pool') + master_ninja.rule('solink_module' + rule_name_suffix, + description=dlldesc, command=dllcmd, + rspfile='$binary.rsp', + rspfile_content='$libs $in_newline $ldflags', + restat=True, + pool='link_pool') + # Note that ldflags goes at the end so that it has the option of + # overriding default settings earlier in the command line. + exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s ' + '$ld /nologo /OUT:$binary @$binary.rsp' % + (sys.executable, use_separate_mspdbsrv)) + exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe') + master_ninja.rule('link' + rule_name_suffix, + description='LINK%s $binary' % rule_name_suffix.upper(), + command=exe_cmd, + rspfile='$binary.rsp', + rspfile_content='$in_newline $libs $ldflags', + pool='link_pool') + + +def GenerateOutputForConfig(target_list, target_dicts, data, params, + config_name): + options = params['options'] + flavor = gyp.common.GetFlavor(params) + generator_flags = params.get('generator_flags', {}) + + # build_dir: relative path from source root to our output files. + # e.g. "out/Debug" + build_dir = os.path.normpath( + os.path.join(ComputeOutputDir(params), config_name)) + + toplevel_build = os.path.join(options.toplevel_dir, build_dir) + + master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja')) + master_ninja = ninja_syntax.Writer(master_ninja_file, width=120) + + # Put build-time support tools in out/{config_name}. + gyp.common.CopyTool(flavor, toplevel_build, generator_flags) + + # Grab make settings for CC/CXX. + # The rules are + # - The priority from low to high is gcc/g++, the 'make_global_settings' in + # gyp, the environment variable. + # - If there is no 'make_global_settings' for CC.host/CXX.host or + # 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set + # to cc/cxx. + if flavor == 'win': + ar = 'lib.exe' + # cc and cxx must be set to the correct architecture by overriding with one + # of cl_x86 or cl_x64 below. + cc = 'UNSET' + cxx = 'UNSET' + ld = 'link.exe' + ld_host = '$ld' + else: + ar = 'ar' + cc = 'cc' + cxx = 'c++' + ld = '$cc' + ldxx = '$cxx' + ld_host = '$cc_host' + ldxx_host = '$cxx_host' + + ar_host = ar + cc_host = None + cxx_host = None + cc_host_global_setting = None + cxx_host_global_setting = None + clang_cl = None + nm = 'nm' + nm_host = 'nm' + readelf = 'readelf' + readelf_host = 'readelf' + + build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) + make_global_settings = data[build_file].get('make_global_settings', []) + build_to_root = gyp.common.InvertRelativePath(build_dir, + options.toplevel_dir) + wrappers = {} + for key, value in make_global_settings: + if key == 'AR': + ar = os.path.join(build_to_root, value) + if key == 'AR.host': + ar_host = os.path.join(build_to_root, value) + if key == 'CC': + cc = os.path.join(build_to_root, value) + if cc.endswith('clang-cl'): + clang_cl = cc + if key == 'CXX': + cxx = os.path.join(build_to_root, value) + if key == 'CC.host': + cc_host = os.path.join(build_to_root, value) + cc_host_global_setting = value + if key == 'CXX.host': + cxx_host = os.path.join(build_to_root, value) + cxx_host_global_setting = value + if key == 'LD': + ld = os.path.join(build_to_root, value) + if key == 'LD.host': + ld_host = os.path.join(build_to_root, value) + if key == 'NM': + nm = os.path.join(build_to_root, value) + if key == 'NM.host': + nm_host = os.path.join(build_to_root, value) + if key == 'READELF': + readelf = os.path.join(build_to_root, value) + if key == 'READELF.host': + readelf_host = os.path.join(build_to_root, value) + if key.endswith('_wrapper'): + wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value) + + # Support wrappers from environment variables too. + for key, value in os.environ.iteritems(): + if key.lower().endswith('_wrapper'): + key_prefix = key[:-len('_wrapper')] + key_prefix = re.sub(r'\.HOST$', '.host', key_prefix) + wrappers[key_prefix] = os.path.join(build_to_root, value) + + mac_toolchain_dir = generator_flags.get('mac_toolchain_dir', None) + if mac_toolchain_dir: + wrappers['LINK'] = "export DEVELOPER_DIR='%s' &&" % mac_toolchain_dir + + if flavor == 'win': + configs = [target_dicts[qualified_target]['configurations'][config_name] + for qualified_target in target_list] + shared_system_includes = None + if not generator_flags.get('ninja_use_custom_environment_files', 0): + shared_system_includes = \ + gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes( + configs, generator_flags) + cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles( + toplevel_build, generator_flags, shared_system_includes, OpenOutput) + for arch, path in sorted(cl_paths.iteritems()): + if clang_cl: + # If we have selected clang-cl, use that instead. + path = clang_cl + command = CommandWithWrapper('CC', wrappers, + QuoteShellArgument(path, 'win')) + if clang_cl: + # Use clang-cl to cross-compile for x86 or x86_64. + command += (' -m32' if arch == 'x86' else ' -m64') + master_ninja.variable('cl_' + arch, command) + + cc = GetEnvironFallback(['CC_target', 'CC'], cc) + master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc)) + cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx) + master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx)) + + if flavor == 'win': + master_ninja.variable('ld', ld) + master_ninja.variable('idl', 'midl.exe') + master_ninja.variable('ar', ar) + master_ninja.variable('rc', 'rc.exe') + master_ninja.variable('ml_x86', 'ml.exe') + master_ninja.variable('ml_x64', 'ml64.exe') + master_ninja.variable('mt', 'mt.exe') + else: + master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld)) + master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx)) + master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar)) + if flavor != 'mac': + # Mac does not use readelf/nm for .TOC generation, so avoiding polluting + # the master ninja with extra unused variables. + master_ninja.variable( + 'nm', GetEnvironFallback(['NM_target', 'NM'], nm)) + master_ninja.variable( + 'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf)) + + if generator_supports_multiple_toolsets: + if not cc_host: + cc_host = cc + if not cxx_host: + cxx_host = cxx + + master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host)) + master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host)) + master_ninja.variable('readelf_host', + GetEnvironFallback(['READELF_host'], readelf_host)) + cc_host = GetEnvironFallback(['CC_host'], cc_host) + cxx_host = GetEnvironFallback(['CXX_host'], cxx_host) + + # The environment variable could be used in 'make_global_settings', like + # ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here. + if '$(CC)' in cc_host and cc_host_global_setting: + cc_host = cc_host_global_setting.replace('$(CC)', cc) + if '$(CXX)' in cxx_host and cxx_host_global_setting: + cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx) + master_ninja.variable('cc_host', + CommandWithWrapper('CC.host', wrappers, cc_host)) + master_ninja.variable('cxx_host', + CommandWithWrapper('CXX.host', wrappers, cxx_host)) + if flavor == 'win': + master_ninja.variable('ld_host', ld_host) + else: + master_ninja.variable('ld_host', CommandWithWrapper( + 'LINK', wrappers, ld_host)) + master_ninja.variable('ldxx_host', CommandWithWrapper( + 'LINK', wrappers, ldxx_host)) + + master_ninja.newline() + + master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks()) + master_ninja.newline() + + deps = 'msvc' if flavor == 'win' else 'gcc' + + if flavor != 'win': + master_ninja.rule( + 'cc', + description='CC $out', + command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c ' + '$cflags_pch_c -c $in -o $out'), + depfile='$out.d', + deps=deps) + master_ninja.rule( + 'cc_s', + description='CC $out', + command=('$cc $defines $includes $cflags $cflags_c ' + '$cflags_pch_c -c $in -o $out')) + master_ninja.rule( + 'cxx', + description='CXX $out', + command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc ' + '$cflags_pch_cc -c $in -o $out'), + depfile='$out.d', + deps=deps) + else: + # TODO(scottmg) Separate pdb names is a test to see if it works around + # http://crbug.com/142362. It seems there's a race between the creation of + # the .pdb by the precompiled header step for .cc and the compilation of + # .c files. This should be handled by mspdbsrv, but rarely errors out with + # c1xx : fatal error C1033: cannot open program database + # By making the rules target separate pdb files this might be avoided. + cc_command = ('ninja -t msvc -e $arch ' + + '-- ' + '$cc /nologo /showIncludes /FC ' + '@$out.rsp /c $in /Fo$out /Fd$pdbname_c ') + cxx_command = ('ninja -t msvc -e $arch ' + + '-- ' + '$cxx /nologo /showIncludes /FC ' + '@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ') + master_ninja.rule( + 'cc', + description='CC $out', + command=cc_command, + rspfile='$out.rsp', + rspfile_content='$defines $includes $cflags $cflags_c', + deps=deps) + master_ninja.rule( + 'cxx', + description='CXX $out', + command=cxx_command, + rspfile='$out.rsp', + rspfile_content='$defines $includes $cflags $cflags_cc', + deps=deps) + master_ninja.rule( + 'idl', + description='IDL $in', + command=('%s gyp-win-tool midl-wrapper $arch $outdir ' + '$tlb $h $dlldata $iid $proxy $in ' + '$midl_includes $idlflags' % sys.executable)) + master_ninja.rule( + 'rc', + description='RC $in', + # Note: $in must be last otherwise rc.exe complains. + command=('%s gyp-win-tool rc-wrapper ' + '$arch $rc $defines $resource_includes $rcflags /fo$out $in' % + sys.executable)) + master_ninja.rule( + 'asm', + description='ASM $out', + command=('%s gyp-win-tool asm-wrapper ' + '$arch $asm $defines $includes $asmflags /c /Fo $out $in' % + sys.executable)) + + if flavor != 'mac' and flavor != 'win': + master_ninja.rule( + 'alink', + description='AR $out', + command='rm -f $out && $ar rcs $arflags $out $in') + master_ninja.rule( + 'alink_thin', + description='AR $out', + command='rm -f $out && $ar rcsT $arflags $out $in') + + # This allows targets that only need to depend on $lib's API to declare an + # order-only dependency on $lib.TOC and avoid relinking such downstream + # dependencies when $lib changes only in non-public ways. + # The resulting string leaves an uninterpolated %{suffix} which + # is used in the final substitution below. + mtime_preserving_solink_base = ( + 'if [ ! -e $lib -o ! -e $lib.TOC ]; then ' + '%(solink)s && %(extract_toc)s > $lib.TOC; else ' + '%(solink)s && %(extract_toc)s > $lib.tmp && ' + 'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; ' + 'fi; fi' + % { 'solink': + '$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s', + 'extract_toc': + ('{ $readelf -d $lib | grep SONAME ; ' + '$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')}) + + master_ninja.rule( + 'solink', + description='SOLINK $lib', + restat=True, + command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'}, + rspfile='$link_file_list', + rspfile_content= + '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs', + pool='link_pool') + master_ninja.rule( + 'solink_module', + description='SOLINK(module) $lib', + restat=True, + command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'}, + rspfile='$link_file_list', + rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs', + pool='link_pool') + master_ninja.rule( + 'link', + description='LINK $out', + command=('$ld $ldflags -o $out ' + '-Wl,--start-group $in -Wl,--end-group $solibs $libs'), + pool='link_pool') + elif flavor == 'win': + master_ninja.rule( + 'alink', + description='LIB $out', + command=('%s gyp-win-tool link-wrapper $arch False ' + '$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' % + sys.executable), + rspfile='$out.rsp', + rspfile_content='$in_newline $libflags') + _AddWinLinkRules(master_ninja, embed_manifest=True) + _AddWinLinkRules(master_ninja, embed_manifest=False) + else: + master_ninja.rule( + 'objc', + description='OBJC $out', + command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc ' + '$cflags_pch_objc -c $in -o $out'), + depfile='$out.d', + deps=deps) + master_ninja.rule( + 'objcxx', + description='OBJCXX $out', + command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc ' + '$cflags_pch_objcc -c $in -o $out'), + depfile='$out.d', + deps=deps) + master_ninja.rule( + 'alink', + description='LIBTOOL-STATIC $out, POSTBUILDS', + command='rm -f $out && ' + './gyp-mac-tool filter-libtool libtool $libtool_flags ' + '-static -o $out $in' + '$postbuilds') + master_ninja.rule( + 'lipo', + description='LIPO $out, POSTBUILDS', + command='rm -f $out && lipo -create $in -output $out$postbuilds') + master_ninja.rule( + 'solipo', + description='SOLIPO $out, POSTBUILDS', + command=( + 'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&' + '%(extract_toc)s > $lib.TOC' + % { 'extract_toc': + '{ otool -l $lib | grep LC_ID_DYLIB -A 5; ' + 'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})) + + + # Record the public interface of $lib in $lib.TOC. See the corresponding + # comment in the posix section above for details. + solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s' + mtime_preserving_solink_base = ( + 'if [ ! -e $lib -o ! -e $lib.TOC ] || ' + # Always force dependent targets to relink if this library + # reexports something. Handling this correctly would require + # recursive TOC dumping but this is rare in practice, so punt. + 'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then ' + '%(solink)s && %(extract_toc)s > $lib.TOC; ' + 'else ' + '%(solink)s && %(extract_toc)s > $lib.tmp && ' + 'if ! cmp -s $lib.tmp $lib.TOC; then ' + 'mv $lib.tmp $lib.TOC ; ' + 'fi; ' + 'fi' + % { 'solink': solink_base, + 'extract_toc': + '{ otool -l $lib | grep LC_ID_DYLIB -A 5; ' + 'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}) + + + solink_suffix = '@$link_file_list$postbuilds' + master_ninja.rule( + 'solink', + description='SOLINK $lib, POSTBUILDS', + restat=True, + command=mtime_preserving_solink_base % {'suffix': solink_suffix, + 'type': '-shared'}, + rspfile='$link_file_list', + rspfile_content='$in $solibs $libs', + pool='link_pool') + master_ninja.rule( + 'solink_notoc', + description='SOLINK $lib, POSTBUILDS', + restat=True, + command=solink_base % {'suffix':solink_suffix, 'type': '-shared'}, + rspfile='$link_file_list', + rspfile_content='$in $solibs $libs', + pool='link_pool') + + master_ninja.rule( + 'solink_module', + description='SOLINK(module) $lib, POSTBUILDS', + restat=True, + command=mtime_preserving_solink_base % {'suffix': solink_suffix, + 'type': '-bundle'}, + rspfile='$link_file_list', + rspfile_content='$in $solibs $libs', + pool='link_pool') + master_ninja.rule( + 'solink_module_notoc', + description='SOLINK(module) $lib, POSTBUILDS', + restat=True, + command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'}, + rspfile='$link_file_list', + rspfile_content='$in $solibs $libs', + pool='link_pool') + + master_ninja.rule( + 'link', + description='LINK $out, POSTBUILDS', + command=('$ld $ldflags -o $out ' + '$in $solibs $libs$postbuilds'), + pool='link_pool') + master_ninja.rule( + 'preprocess_infoplist', + description='PREPROCESS INFOPLIST $out', + command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && ' + 'plutil -convert xml1 $out $out')) + master_ninja.rule( + 'copy_infoplist', + description='COPY INFOPLIST $in', + command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys') + master_ninja.rule( + 'merge_infoplist', + description='MERGE INFOPLISTS $in', + command='$env ./gyp-mac-tool merge-info-plist $out $in') + master_ninja.rule( + 'compile_xcassets', + description='COMPILE XCASSETS $in', + command='$env ./gyp-mac-tool compile-xcassets $keys $in') + master_ninja.rule( + 'compile_ios_framework_headers', + description='COMPILE HEADER MAPS AND COPY FRAMEWORK HEADERS $in', + command='$env ./gyp-mac-tool compile-ios-framework-header-map $out ' + '$framework $in && $env ./gyp-mac-tool ' + 'copy-ios-framework-headers $framework $copy_headers') + master_ninja.rule( + 'mac_tool', + description='MACTOOL $mactool_cmd $in', + command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary') + master_ninja.rule( + 'package_framework', + description='PACKAGE FRAMEWORK $out, POSTBUILDS', + command='./gyp-mac-tool package-framework $out $version$postbuilds ' + '&& touch $out') + master_ninja.rule( + 'package_ios_framework', + description='PACKAGE IOS FRAMEWORK $out, POSTBUILDS', + command='./gyp-mac-tool package-ios-framework $out $postbuilds ' + '&& touch $out') + if flavor == 'win': + master_ninja.rule( + 'stamp', + description='STAMP $out', + command='%s gyp-win-tool stamp $out' % sys.executable) + master_ninja.rule( + 'copy', + description='COPY $in $out', + command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable) + else: + master_ninja.rule( + 'stamp', + description='STAMP $out', + command='${postbuilds}touch $out') + master_ninja.rule( + 'copy', + description='COPY $in $out', + command='ln -f $in $out 2>/dev/null || (rm -rf $out && cp -af $in $out)') + master_ninja.newline() + + all_targets = set() + for build_file in params['build_files']: + for target in gyp.common.AllTargets(target_list, + target_dicts, + os.path.normpath(build_file)): + all_targets.add(target) + all_outputs = set() + + # target_outputs is a map from qualified target name to a Target object. + target_outputs = {} + # target_short_names is a map from target short name to a list of Target + # objects. + target_short_names = {} + + # short name of targets that were skipped because they didn't contain anything + # interesting. + # NOTE: there may be overlap between this an non_empty_target_names. + empty_target_names = set() + + # Set of non-empty short target names. + # NOTE: there may be overlap between this an empty_target_names. + non_empty_target_names = set() + + for qualified_target in target_list: + # qualified_target is like: third_party/icu/icu.gyp:icui18n#target + build_file, name, toolset = \ + gyp.common.ParseQualifiedTarget(qualified_target) + + this_make_global_settings = data[build_file].get('make_global_settings', []) + assert make_global_settings == this_make_global_settings, ( + "make_global_settings needs to be the same for all targets. %s vs. %s" % + (this_make_global_settings, make_global_settings)) + + spec = target_dicts[qualified_target] + if flavor == 'mac': + gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec) + + # If build_file is a symlink, we must not follow it because there's a chance + # it could point to a path above toplevel_dir, and we cannot correctly deal + # with that case at the moment. + build_file = gyp.common.RelativePath(build_file, options.toplevel_dir, + False) + + qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name, + toolset) + hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest() + + base_path = os.path.dirname(build_file) + obj = 'obj' + if toolset != 'target': + obj += '.' + toolset + output_file = os.path.join(obj, base_path, name + '.ninja') + + ninja_output = StringIO() + writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir, + ninja_output, + toplevel_build, output_file, + flavor, toplevel_dir=options.toplevel_dir) + + target = writer.WriteSpec(spec, config_name, generator_flags) + + if ninja_output.tell() > 0: + # Only create files for ninja files that actually have contents. + with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file: + ninja_file.write(ninja_output.getvalue()) + ninja_output.close() + master_ninja.subninja(output_file) + + if target: + if name != target.FinalOutput() and spec['toolset'] == 'target': + target_short_names.setdefault(name, []).append(target) + target_outputs[qualified_target] = target + if qualified_target in all_targets: + all_outputs.add(target.FinalOutput()) + non_empty_target_names.add(name) + else: + empty_target_names.add(name) + + if target_short_names: + # Write a short name to build this target. This benefits both the + # "build chrome" case as well as the gyp tests, which expect to be + # able to run actions and build libraries by their short name. + master_ninja.newline() + master_ninja.comment('Short names for targets.') + for short_name in sorted(target_short_names): + master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in + target_short_names[short_name]]) + + # Write phony targets for any empty targets that weren't written yet. As + # short names are not necessarily unique only do this for short names that + # haven't already been output for another target. + empty_target_names = empty_target_names - non_empty_target_names + if empty_target_names: + master_ninja.newline() + master_ninja.comment('Empty targets (output for completeness).') + for name in sorted(empty_target_names): + master_ninja.build(name, 'phony') + + if all_outputs: + master_ninja.newline() + master_ninja.build('all', 'phony', sorted(all_outputs)) + master_ninja.default(generator_flags.get('default_target', 'all')) + + master_ninja_file.close() + + +def PerformBuild(data, configurations, params): + options = params['options'] + for config in configurations: + builddir = os.path.join(options.toplevel_dir, 'out', config) + arguments = ['ninja', '-C', builddir] + print 'Building [%s]: %s' % (config, arguments) + subprocess.check_call(arguments) + + +def CallGenerateOutputForConfig(arglist): + # Ignore the interrupt signal so that the parent process catches it and + # kills all multiprocessing children. + signal.signal(signal.SIGINT, signal.SIG_IGN) + + (target_list, target_dicts, data, params, config_name) = arglist + GenerateOutputForConfig(target_list, target_dicts, data, params, config_name) + + +def GenerateOutput(target_list, target_dicts, data, params): + # Update target_dicts for iOS device builds. + target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator( + target_dicts) + + user_config = params.get('generator_flags', {}).get('config', None) + if gyp.common.GetFlavor(params) == 'win': + target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts) + target_list, target_dicts = MSVSUtil.InsertLargePdbShims( + target_list, target_dicts, generator_default_variables) + + if user_config: + GenerateOutputForConfig(target_list, target_dicts, data, params, + user_config) + else: + config_names = target_dicts[target_list[0]]['configurations'].keys() + if params['parallel']: + try: + pool = multiprocessing.Pool(len(config_names)) + arglists = [] + for config_name in config_names: + arglists.append( + (target_list, target_dicts, data, params, config_name)) + pool.map(CallGenerateOutputForConfig, arglists) + except KeyboardInterrupt, e: + pool.terminate() + raise e + else: + for config_name in config_names: + GenerateOutputForConfig(target_list, target_dicts, data, params, + config_name) diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/ninja_test.py b/deps/libuv/build/gyp/pylib/gyp/generator/ninja_test.py new file mode 100644 index 00000000..1767b2f4 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/ninja_test.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python + +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" Unit tests for the ninja.py file. """ + +import gyp.generator.ninja as ninja +import unittest +import StringIO +import sys +import TestCommon + + +class TestPrefixesAndSuffixes(unittest.TestCase): + def test_BinaryNamesWindows(self): + # These cannot run on non-Windows as they require a VS installation to + # correctly handle variable expansion. + if sys.platform.startswith('win'): + writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.', + 'build.ninja', 'win') + spec = { 'target_name': 'wee' } + self.assertTrue(writer.ComputeOutputFileName(spec, 'executable'). + endswith('.exe')) + self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library'). + endswith('.dll')) + self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library'). + endswith('.lib')) + + def test_BinaryNamesLinux(self): + writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.', + 'build.ninja', 'linux') + spec = { 'target_name': 'wee' } + self.assertTrue('.' not in writer.ComputeOutputFileName(spec, + 'executable')) + self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library'). + startswith('lib')) + self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library'). + startswith('lib')) + self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library'). + endswith('.so')) + self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library'). + endswith('.a')) + +if __name__ == '__main__': + unittest.main() diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/xcode.py b/deps/libuv/build/gyp/pylib/gyp/generator/xcode.py new file mode 100644 index 00000000..db99d6ab --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/xcode.py @@ -0,0 +1,1311 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import filecmp +import gyp.common +import gyp.xcodeproj_file +import gyp.xcode_ninja +import errno +import os +import sys +import posixpath +import re +import shutil +import subprocess +import tempfile + + +# Project files generated by this module will use _intermediate_var as a +# custom Xcode setting whose value is a DerivedSources-like directory that's +# project-specific and configuration-specific. The normal choice, +# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive +# as it is likely that multiple targets within a single project file will want +# to access the same set of generated files. The other option, +# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific, +# it is not configuration-specific. INTERMEDIATE_DIR is defined as +# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION). +_intermediate_var = 'INTERMEDIATE_DIR' + +# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all +# targets that share the same BUILT_PRODUCTS_DIR. +_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR' + +_library_search_paths_var = 'LIBRARY_SEARCH_PATHS' + +generator_default_variables = { + 'EXECUTABLE_PREFIX': '', + 'EXECUTABLE_SUFFIX': '', + 'STATIC_LIB_PREFIX': 'lib', + 'SHARED_LIB_PREFIX': 'lib', + 'STATIC_LIB_SUFFIX': '.a', + 'SHARED_LIB_SUFFIX': '.dylib', + # INTERMEDIATE_DIR is a place for targets to build up intermediate products. + # It is specific to each build environment. It is only guaranteed to exist + # and be constant within the context of a project, corresponding to a single + # input file. Some build environments may allow their intermediate directory + # to be shared on a wider scale, but this is not guaranteed. + 'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var, + 'OS': 'mac', + 'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)', + 'LIB_DIR': '$(BUILT_PRODUCTS_DIR)', + 'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)', + 'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)', + 'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)', + 'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)', + 'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)', + 'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var, + 'CONFIGURATION_NAME': '$(CONFIGURATION)', +} + +# The Xcode-specific sections that hold paths. +generator_additional_path_sections = [ + 'mac_bundle_resources', + 'mac_framework_headers', + 'mac_framework_private_headers', + # 'mac_framework_dirs', input already handles _dirs endings. +] + +# The Xcode-specific keys that exist on targets and aren't moved down to +# configurations. +generator_additional_non_configuration_keys = [ + 'ios_app_extension', + 'ios_watch_app', + 'ios_watchkit_extension', + 'mac_bundle', + 'mac_bundle_resources', + 'mac_framework_headers', + 'mac_framework_private_headers', + 'mac_xctest_bundle', + 'mac_xcuitest_bundle', + 'xcode_create_dependents_test_runner', +] + +# We want to let any rules apply to files that are resources also. +generator_extra_sources_for_rules = [ + 'mac_bundle_resources', + 'mac_framework_headers', + 'mac_framework_private_headers', +] + +generator_filelist_paths = None + +# Xcode's standard set of library directories, which don't need to be duplicated +# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay. +xcode_standard_library_dirs = frozenset([ + '$(SDKROOT)/usr/lib', + '$(SDKROOT)/usr/local/lib', +]) + +def CreateXCConfigurationList(configuration_names): + xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []}) + if len(configuration_names) == 0: + configuration_names = ['Default'] + for configuration_name in configuration_names: + xcbc = gyp.xcodeproj_file.XCBuildConfiguration({ + 'name': configuration_name}) + xccl.AppendProperty('buildConfigurations', xcbc) + xccl.SetProperty('defaultConfigurationName', configuration_names[0]) + return xccl + + +class XcodeProject(object): + def __init__(self, gyp_path, path, build_file_dict): + self.gyp_path = gyp_path + self.path = path + self.project = gyp.xcodeproj_file.PBXProject(path=path) + projectDirPath = gyp.common.RelativePath( + os.path.dirname(os.path.abspath(self.gyp_path)), + os.path.dirname(path) or '.') + self.project.SetProperty('projectDirPath', projectDirPath) + self.project_file = \ + gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project}) + self.build_file_dict = build_file_dict + + # TODO(mark): add destructor that cleans up self.path if created_dir is + # True and things didn't complete successfully. Or do something even + # better with "try"? + self.created_dir = False + try: + os.makedirs(self.path) + self.created_dir = True + except OSError, e: + if e.errno != errno.EEXIST: + raise + + def Finalize1(self, xcode_targets, serialize_all_tests): + # Collect a list of all of the build configuration names used by the + # various targets in the file. It is very heavily advised to keep each + # target in an entire project (even across multiple project files) using + # the same set of configuration names. + configurations = [] + for xct in self.project.GetProperty('targets'): + xccl = xct.GetProperty('buildConfigurationList') + xcbcs = xccl.GetProperty('buildConfigurations') + for xcbc in xcbcs: + name = xcbc.GetProperty('name') + if name not in configurations: + configurations.append(name) + + # Replace the XCConfigurationList attached to the PBXProject object with + # a new one specifying all of the configuration names used by the various + # targets. + try: + xccl = CreateXCConfigurationList(configurations) + self.project.SetProperty('buildConfigurationList', xccl) + except: + sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path) + raise + + # The need for this setting is explained above where _intermediate_var is + # defined. The comments below about wanting to avoid project-wide build + # settings apply here too, but this needs to be set on a project-wide basis + # so that files relative to the _intermediate_var setting can be displayed + # properly in the Xcode UI. + # + # Note that for configuration-relative files such as anything relative to + # _intermediate_var, for the purposes of UI tree view display, Xcode will + # only resolve the configuration name once, when the project file is + # opened. If the active build configuration is changed, the project file + # must be closed and reopened if it is desired for the tree view to update. + # This is filed as Apple radar 6588391. + xccl.SetBuildSetting(_intermediate_var, + '$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)') + xccl.SetBuildSetting(_shared_intermediate_var, + '$(SYMROOT)/DerivedSources/$(CONFIGURATION)') + + # Set user-specified project-wide build settings and config files. This + # is intended to be used very sparingly. Really, almost everything should + # go into target-specific build settings sections. The project-wide + # settings are only intended to be used in cases where Xcode attempts to + # resolve variable references in a project context as opposed to a target + # context, such as when resolving sourceTree references while building up + # the tree tree view for UI display. + # Any values set globally are applied to all configurations, then any + # per-configuration values are applied. + for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems(): + xccl.SetBuildSetting(xck, xcv) + if 'xcode_config_file' in self.build_file_dict: + config_ref = self.project.AddOrGetFileInRootGroup( + self.build_file_dict['xcode_config_file']) + xccl.SetBaseConfiguration(config_ref) + build_file_configurations = self.build_file_dict.get('configurations', {}) + if build_file_configurations: + for config_name in configurations: + build_file_configuration_named = \ + build_file_configurations.get(config_name, {}) + if build_file_configuration_named: + xcc = xccl.ConfigurationNamed(config_name) + for xck, xcv in build_file_configuration_named.get('xcode_settings', + {}).iteritems(): + xcc.SetBuildSetting(xck, xcv) + if 'xcode_config_file' in build_file_configuration_named: + config_ref = self.project.AddOrGetFileInRootGroup( + build_file_configurations[config_name]['xcode_config_file']) + xcc.SetBaseConfiguration(config_ref) + + # Sort the targets based on how they appeared in the input. + # TODO(mark): Like a lot of other things here, this assumes internal + # knowledge of PBXProject - in this case, of its "targets" property. + + # ordinary_targets are ordinary targets that are already in the project + # file. run_test_targets are the targets that run unittests and should be + # used for the Run All Tests target. support_targets are the action/rule + # targets used by GYP file targets, just kept for the assert check. + ordinary_targets = [] + run_test_targets = [] + support_targets = [] + + # targets is full list of targets in the project. + targets = [] + + # does the it define it's own "all"? + has_custom_all = False + + # targets_for_all is the list of ordinary_targets that should be listed + # in this project's "All" target. It includes each non_runtest_target + # that does not have suppress_wildcard set. + targets_for_all = [] + + for target in self.build_file_dict['targets']: + target_name = target['target_name'] + toolset = target['toolset'] + qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name, + toolset) + xcode_target = xcode_targets[qualified_target] + # Make sure that the target being added to the sorted list is already in + # the unsorted list. + assert xcode_target in self.project._properties['targets'] + targets.append(xcode_target) + ordinary_targets.append(xcode_target) + if xcode_target.support_target: + support_targets.append(xcode_target.support_target) + targets.append(xcode_target.support_target) + + if not int(target.get('suppress_wildcard', False)): + targets_for_all.append(xcode_target) + + if target_name.lower() == 'all': + has_custom_all = True; + + # If this target has a 'run_as' attribute, add its target to the + # targets, and add it to the test targets. + if target.get('run_as'): + # Make a target to run something. It should have one + # dependency, the parent xcode target. + xccl = CreateXCConfigurationList(configurations) + run_target = gyp.xcodeproj_file.PBXAggregateTarget({ + 'name': 'Run ' + target_name, + 'productName': xcode_target.GetProperty('productName'), + 'buildConfigurationList': xccl, + }, + parent=self.project) + run_target.AddDependency(xcode_target) + + command = target['run_as'] + script = '' + if command.get('working_directory'): + script = script + 'cd "%s"\n' % \ + gyp.xcodeproj_file.ConvertVariablesToShellSyntax( + command.get('working_directory')) + + if command.get('environment'): + script = script + "\n".join( + ['export %s="%s"' % + (key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val)) + for (key, val) in command.get('environment').iteritems()]) + "\n" + + # Some test end up using sockets, files on disk, etc. and can get + # confused if more then one test runs at a time. The generator + # flag 'xcode_serialize_all_test_runs' controls the forcing of all + # tests serially. It defaults to True. To get serial runs this + # little bit of python does the same as the linux flock utility to + # make sure only one runs at a time. + command_prefix = '' + if serialize_all_tests: + command_prefix = \ +"""python -c "import fcntl, subprocess, sys +file = open('$TMPDIR/GYP_serialize_test_runs', 'a') +fcntl.flock(file.fileno(), fcntl.LOCK_EX) +sys.exit(subprocess.call(sys.argv[1:]))" """ + + # If we were unable to exec for some reason, we want to exit + # with an error, and fixup variable references to be shell + # syntax instead of xcode syntax. + script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \ + gyp.xcodeproj_file.ConvertVariablesToShellSyntax( + gyp.common.EncodePOSIXShellList(command.get('action'))) + + ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({ + 'shellScript': script, + 'showEnvVarsInLog': 0, + }) + run_target.AppendProperty('buildPhases', ssbp) + + # Add the run target to the project file. + targets.append(run_target) + run_test_targets.append(run_target) + xcode_target.test_runner = run_target + + + # Make sure that the list of targets being replaced is the same length as + # the one replacing it, but allow for the added test runner targets. + assert len(self.project._properties['targets']) == \ + len(ordinary_targets) + len(support_targets) + + self.project._properties['targets'] = targets + + # Get rid of unnecessary levels of depth in groups like the Source group. + self.project.RootGroupsTakeOverOnlyChildren(True) + + # Sort the groups nicely. Do this after sorting the targets, because the + # Products group is sorted based on the order of the targets. + self.project.SortGroups() + + # Create an "All" target if there's more than one target in this project + # file and the project didn't define its own "All" target. Put a generated + # "All" target first so that people opening up the project for the first + # time will build everything by default. + if len(targets_for_all) > 1 and not has_custom_all: + xccl = CreateXCConfigurationList(configurations) + all_target = gyp.xcodeproj_file.PBXAggregateTarget( + { + 'buildConfigurationList': xccl, + 'name': 'All', + }, + parent=self.project) + + for target in targets_for_all: + all_target.AddDependency(target) + + # TODO(mark): This is evil because it relies on internal knowledge of + # PBXProject._properties. It's important to get the "All" target first, + # though. + self.project._properties['targets'].insert(0, all_target) + + # The same, but for run_test_targets. + if len(run_test_targets) > 1: + xccl = CreateXCConfigurationList(configurations) + run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget( + { + 'buildConfigurationList': xccl, + 'name': 'Run All Tests', + }, + parent=self.project) + for run_test_target in run_test_targets: + run_all_tests_target.AddDependency(run_test_target) + + # Insert after the "All" target, which must exist if there is more than + # one run_test_target. + self.project._properties['targets'].insert(1, run_all_tests_target) + + def Finalize2(self, xcode_targets, xcode_target_to_target_dict): + # Finalize2 needs to happen in a separate step because the process of + # updating references to other projects depends on the ordering of targets + # within remote project files. Finalize1 is responsible for sorting duty, + # and once all project files are sorted, Finalize2 can come in and update + # these references. + + # To support making a "test runner" target that will run all the tests + # that are direct dependents of any given target, we look for + # xcode_create_dependents_test_runner being set on an Aggregate target, + # and generate a second target that will run the tests runners found under + # the marked target. + for bf_tgt in self.build_file_dict['targets']: + if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)): + tgt_name = bf_tgt['target_name'] + toolset = bf_tgt['toolset'] + qualified_target = gyp.common.QualifiedTarget(self.gyp_path, + tgt_name, toolset) + xcode_target = xcode_targets[qualified_target] + if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget): + # Collect all the run test targets. + all_run_tests = [] + pbxtds = xcode_target.GetProperty('dependencies') + for pbxtd in pbxtds: + pbxcip = pbxtd.GetProperty('targetProxy') + dependency_xct = pbxcip.GetProperty('remoteGlobalIDString') + if hasattr(dependency_xct, 'test_runner'): + all_run_tests.append(dependency_xct.test_runner) + + # Directly depend on all the runners as they depend on the target + # that builds them. + if len(all_run_tests) > 0: + run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({ + 'name': 'Run %s Tests' % tgt_name, + 'productName': tgt_name, + }, + parent=self.project) + for run_test_target in all_run_tests: + run_all_target.AddDependency(run_test_target) + + # Insert the test runner after the related target. + idx = self.project._properties['targets'].index(xcode_target) + self.project._properties['targets'].insert(idx + 1, run_all_target) + + # Update all references to other projects, to make sure that the lists of + # remote products are complete. Otherwise, Xcode will fill them in when + # it opens the project file, which will result in unnecessary diffs. + # TODO(mark): This is evil because it relies on internal knowledge of + # PBXProject._other_pbxprojects. + for other_pbxproject in self.project._other_pbxprojects.keys(): + self.project.AddOrGetProjectReference(other_pbxproject) + + self.project.SortRemoteProductReferences() + + # Give everything an ID. + self.project_file.ComputeIDs() + + # Make sure that no two objects in the project file have the same ID. If + # multiple objects wind up with the same ID, upon loading the file, Xcode + # will only recognize one object (the last one in the file?) and the + # results are unpredictable. + self.project_file.EnsureNoIDCollisions() + + def Write(self): + # Write the project file to a temporary location first. Xcode watches for + # changes to the project file and presents a UI sheet offering to reload + # the project when it does change. However, in some cases, especially when + # multiple projects are open or when Xcode is busy, things don't work so + # seamlessly. Sometimes, Xcode is able to detect that a project file has + # changed but can't unload it because something else is referencing it. + # To mitigate this problem, and to avoid even having Xcode present the UI + # sheet when an open project is rewritten for inconsequential changes, the + # project file is written to a temporary file in the xcodeproj directory + # first. The new temporary file is then compared to the existing project + # file, if any. If they differ, the new file replaces the old; otherwise, + # the new project file is simply deleted. Xcode properly detects a file + # being renamed over an open project file as a change and so it remains + # able to present the "project file changed" sheet under this system. + # Writing to a temporary file first also avoids the possible problem of + # Xcode rereading an incomplete project file. + (output_fd, new_pbxproj_path) = \ + tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.', + dir=self.path) + + try: + output_file = os.fdopen(output_fd, 'wb') + + self.project_file.Print(output_file) + output_file.close() + + pbxproj_path = os.path.join(self.path, 'project.pbxproj') + + same = False + try: + same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False) + except OSError, e: + if e.errno != errno.ENOENT: + raise + + if same: + # The new file is identical to the old one, just get rid of the new + # one. + os.unlink(new_pbxproj_path) + else: + # The new file is different from the old one, or there is no old one. + # Rename the new file to the permanent name. + # + # tempfile.mkstemp uses an overly restrictive mode, resulting in a + # file that can only be read by the owner, regardless of the umask. + # There's no reason to not respect the umask here, which means that + # an extra hoop is required to fetch it and reset the new file's mode. + # + # No way to get the umask without setting a new one? Set a safe one + # and then set it back to the old value. + umask = os.umask(077) + os.umask(umask) + + os.chmod(new_pbxproj_path, 0666 & ~umask) + os.rename(new_pbxproj_path, pbxproj_path) + + except Exception: + # Don't leave turds behind. In fact, if this code was responsible for + # creating the xcodeproj directory, get rid of that too. + os.unlink(new_pbxproj_path) + if self.created_dir: + shutil.rmtree(self.path, True) + raise + + +def AddSourceToTarget(source, type, pbxp, xct): + # TODO(mark): Perhaps source_extensions and library_extensions can be made a + # little bit fancier. + source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift'] + + # .o is conceptually more of a "source" than a "library," but Xcode thinks + # of "sources" as things to compile and "libraries" (or "frameworks") as + # things to link with. Adding an object file to an Xcode target's frameworks + # phase works properly. + library_extensions = ['a', 'dylib', 'framework', 'o'] + + basename = posixpath.basename(source) + (root, ext) = posixpath.splitext(basename) + if ext: + ext = ext[1:].lower() + + if ext in source_extensions and type != 'none': + xct.SourcesPhase().AddFile(source) + elif ext in library_extensions and type != 'none': + xct.FrameworksPhase().AddFile(source) + else: + # Files that aren't added to a sources or frameworks build phase can still + # go into the project file, just not as part of a build phase. + pbxp.AddOrGetFileInRootGroup(source) + + +def AddResourceToTarget(resource, pbxp, xct): + # TODO(mark): Combine with AddSourceToTarget above? Or just inline this call + # where it's used. + xct.ResourcesPhase().AddFile(resource) + + +def AddHeaderToTarget(header, pbxp, xct, is_public): + # TODO(mark): Combine with AddSourceToTarget above? Or just inline this call + # where it's used. + settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public] + xct.HeadersPhase().AddFile(header, settings) + + +_xcode_variable_re = re.compile(r'(\$\((.*?)\))') +def ExpandXcodeVariables(string, expansions): + """Expands Xcode-style $(VARIABLES) in string per the expansions dict. + + In some rare cases, it is appropriate to expand Xcode variables when a + project file is generated. For any substring $(VAR) in string, if VAR is a + key in the expansions dict, $(VAR) will be replaced with expansions[VAR]. + Any $(VAR) substring in string for which VAR is not a key in the expansions + dict will remain in the returned string. + """ + + matches = _xcode_variable_re.findall(string) + if matches == None: + return string + + matches.reverse() + for match in matches: + (to_replace, variable) = match + if not variable in expansions: + continue + + replacement = expansions[variable] + string = re.sub(re.escape(to_replace), replacement, string) + + return string + + +_xcode_define_re = re.compile(r'([\\\"\' ])') +def EscapeXcodeDefine(s): + """We must escape the defines that we give to XCode so that it knows not to + split on spaces and to respect backslash and quote literals. However, we + must not quote the define, or Xcode will incorrectly intepret variables + especially $(inherited).""" + return re.sub(_xcode_define_re, r'\\\1', s) + + +def PerformBuild(data, configurations, params): + options = params['options'] + + for build_file, build_file_dict in data.iteritems(): + (build_file_root, build_file_ext) = os.path.splitext(build_file) + if build_file_ext != '.gyp': + continue + xcodeproj_path = build_file_root + options.suffix + '.xcodeproj' + if options.generator_output: + xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path) + + for config in configurations: + arguments = ['xcodebuild', '-project', xcodeproj_path] + arguments += ['-configuration', config] + print "Building [%s]: %s" % (config, arguments) + subprocess.check_call(arguments) + + +def CalculateGeneratorInputInfo(params): + toplevel = params['options'].toplevel_dir + if params.get('flavor') == 'ninja': + generator_dir = os.path.relpath(params['options'].generator_output or '.') + output_dir = params.get('generator_flags', {}).get('output_dir', 'out') + output_dir = os.path.normpath(os.path.join(generator_dir, output_dir)) + qualified_out_dir = os.path.normpath(os.path.join( + toplevel, output_dir, 'gypfiles-xcode-ninja')) + else: + output_dir = os.path.normpath(os.path.join(toplevel, 'xcodebuild')) + qualified_out_dir = os.path.normpath(os.path.join( + toplevel, output_dir, 'gypfiles')) + + global generator_filelist_paths + generator_filelist_paths = { + 'toplevel': toplevel, + 'qualified_out_dir': qualified_out_dir, + } + + +def GenerateOutput(target_list, target_dicts, data, params): + # Optionally configure each spec to use ninja as the external builder. + ninja_wrapper = params.get('flavor') == 'ninja' + if ninja_wrapper: + (target_list, target_dicts, data) = \ + gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params) + + options = params['options'] + generator_flags = params.get('generator_flags', {}) + parallel_builds = generator_flags.get('xcode_parallel_builds', True) + serialize_all_tests = \ + generator_flags.get('xcode_serialize_all_test_runs', True) + upgrade_check_project_version = \ + generator_flags.get('xcode_upgrade_check_project_version', None) + + # Format upgrade_check_project_version with leading zeros as needed. + if upgrade_check_project_version: + upgrade_check_project_version = str(upgrade_check_project_version) + while len(upgrade_check_project_version) < 4: + upgrade_check_project_version = '0' + upgrade_check_project_version + + skip_excluded_files = \ + not generator_flags.get('xcode_list_excluded_files', True) + xcode_projects = {} + for build_file, build_file_dict in data.iteritems(): + (build_file_root, build_file_ext) = os.path.splitext(build_file) + if build_file_ext != '.gyp': + continue + xcodeproj_path = build_file_root + options.suffix + '.xcodeproj' + if options.generator_output: + xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path) + xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict) + xcode_projects[build_file] = xcp + pbxp = xcp.project + + # Set project-level attributes from multiple options + project_attributes = {}; + if parallel_builds: + project_attributes['BuildIndependentTargetsInParallel'] = 'YES' + if upgrade_check_project_version: + project_attributes['LastUpgradeCheck'] = upgrade_check_project_version + project_attributes['LastTestingUpgradeCheck'] = \ + upgrade_check_project_version + project_attributes['LastSwiftUpdateCheck'] = \ + upgrade_check_project_version + pbxp.SetProperty('attributes', project_attributes) + + # Add gyp/gypi files to project + if not generator_flags.get('standalone'): + main_group = pbxp.GetProperty('mainGroup') + build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'}) + main_group.AppendChild(build_group) + for included_file in build_file_dict['included_files']: + build_group.AddOrGetFileByPath(included_file, False) + + xcode_targets = {} + xcode_target_to_target_dict = {} + for qualified_target in target_list: + [build_file, target_name, toolset] = \ + gyp.common.ParseQualifiedTarget(qualified_target) + + spec = target_dicts[qualified_target] + if spec['toolset'] != 'target': + raise Exception( + 'Multiple toolsets not supported in xcode build (target %s)' % + qualified_target) + configuration_names = [spec['default_configuration']] + for configuration_name in sorted(spec['configurations'].keys()): + if configuration_name not in configuration_names: + configuration_names.append(configuration_name) + xcp = xcode_projects[build_file] + pbxp = xcp.project + + # Set up the configurations for the target according to the list of names + # supplied. + xccl = CreateXCConfigurationList(configuration_names) + + # Create an XCTarget subclass object for the target. The type with + # "+bundle" appended will be used if the target has "mac_bundle" set. + # loadable_modules not in a mac_bundle are mapped to + # com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets + # to create a single-file mh_bundle. + _types = { + 'executable': 'com.apple.product-type.tool', + 'loadable_module': 'com.googlecode.gyp.xcode.bundle', + 'shared_library': 'com.apple.product-type.library.dynamic', + 'static_library': 'com.apple.product-type.library.static', + 'mac_kernel_extension': 'com.apple.product-type.kernel-extension', + 'executable+bundle': 'com.apple.product-type.application', + 'loadable_module+bundle': 'com.apple.product-type.bundle', + 'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test', + 'loadable_module+xcuitest': 'com.apple.product-type.bundle.ui-testing', + 'shared_library+bundle': 'com.apple.product-type.framework', + 'executable+extension+bundle': 'com.apple.product-type.app-extension', + 'executable+watch+extension+bundle': + 'com.apple.product-type.watchkit-extension', + 'executable+watch+bundle': + 'com.apple.product-type.application.watchapp', + 'mac_kernel_extension+bundle': 'com.apple.product-type.kernel-extension', + } + + target_properties = { + 'buildConfigurationList': xccl, + 'name': target_name, + } + + type = spec['type'] + is_xctest = int(spec.get('mac_xctest_bundle', 0)) + is_xcuitest = int(spec.get('mac_xcuitest_bundle', 0)) + is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest + is_app_extension = int(spec.get('ios_app_extension', 0)) + is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0)) + is_watch_app = int(spec.get('ios_watch_app', 0)) + if type != 'none': + type_bundle_key = type + if is_xcuitest: + type_bundle_key += '+xcuitest' + assert type == 'loadable_module', ( + 'mac_xcuitest_bundle targets must have type loadable_module ' + '(target %s)' % target_name) + elif is_xctest: + type_bundle_key += '+xctest' + assert type == 'loadable_module', ( + 'mac_xctest_bundle targets must have type loadable_module ' + '(target %s)' % target_name) + elif is_app_extension: + assert is_bundle, ('ios_app_extension flag requires mac_bundle ' + '(target %s)' % target_name) + type_bundle_key += '+extension+bundle' + elif is_watchkit_extension: + assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle ' + '(target %s)' % target_name) + type_bundle_key += '+watch+extension+bundle' + elif is_watch_app: + assert is_bundle, ('ios_watch_app flag requires mac_bundle ' + '(target %s)' % target_name) + type_bundle_key += '+watch+bundle' + elif is_bundle: + type_bundle_key += '+bundle' + + xctarget_type = gyp.xcodeproj_file.PBXNativeTarget + try: + target_properties['productType'] = _types[type_bundle_key] + except KeyError, e: + gyp.common.ExceptionAppend(e, "-- unknown product type while " + "writing target %s" % target_name) + raise + else: + xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget + assert not is_bundle, ( + 'mac_bundle targets cannot have type none (target "%s")' % + target_name) + assert not is_xcuitest, ( + 'mac_xcuitest_bundle targets cannot have type none (target "%s")' % + target_name) + assert not is_xctest, ( + 'mac_xctest_bundle targets cannot have type none (target "%s")' % + target_name) + + target_product_name = spec.get('product_name') + if target_product_name is not None: + target_properties['productName'] = target_product_name + + xct = xctarget_type(target_properties, parent=pbxp, + force_outdir=spec.get('product_dir'), + force_prefix=spec.get('product_prefix'), + force_extension=spec.get('product_extension')) + pbxp.AppendProperty('targets', xct) + xcode_targets[qualified_target] = xct + xcode_target_to_target_dict[xct] = spec + + spec_actions = spec.get('actions', []) + spec_rules = spec.get('rules', []) + + # Xcode has some "issues" with checking dependencies for the "Compile + # sources" step with any source files/headers generated by actions/rules. + # To work around this, if a target is building anything directly (not + # type "none"), then a second target is used to run the GYP actions/rules + # and is made a dependency of this target. This way the work is done + # before the dependency checks for what should be recompiled. + support_xct = None + # The Xcode "issues" don't affect xcode-ninja builds, since the dependency + # logic all happens in ninja. Don't bother creating the extra targets in + # that case. + if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper: + support_xccl = CreateXCConfigurationList(configuration_names); + support_target_suffix = generator_flags.get( + 'support_target_suffix', ' Support') + support_target_properties = { + 'buildConfigurationList': support_xccl, + 'name': target_name + support_target_suffix, + } + if target_product_name: + support_target_properties['productName'] = \ + target_product_name + ' Support' + support_xct = \ + gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties, + parent=pbxp) + pbxp.AppendProperty('targets', support_xct) + xct.AddDependency(support_xct) + # Hang the support target off the main target so it can be tested/found + # by the generator during Finalize. + xct.support_target = support_xct + + prebuild_index = 0 + + # Add custom shell script phases for "actions" sections. + for action in spec_actions: + # There's no need to write anything into the script to ensure that the + # output directories already exist, because Xcode will look at the + # declared outputs and automatically ensure that they exist for us. + + # Do we have a message to print when this action runs? + message = action.get('message') + if message: + message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message) + else: + message = '' + + # Turn the list into a string that can be passed to a shell. + action_string = gyp.common.EncodePOSIXShellList(action['action']) + + # Convert Xcode-type variable references to sh-compatible environment + # variable references. + message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message) + action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax( + action_string) + + script = '' + # Include the optional message + if message_sh: + script += message_sh + '\n' + # Be sure the script runs in exec, and that if exec fails, the script + # exits signalling an error. + script += 'exec ' + action_string_sh + '\nexit 1\n' + ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({ + 'inputPaths': action['inputs'], + 'name': 'Action "' + action['action_name'] + '"', + 'outputPaths': action['outputs'], + 'shellScript': script, + 'showEnvVarsInLog': 0, + }) + + if support_xct: + support_xct.AppendProperty('buildPhases', ssbp) + else: + # TODO(mark): this assumes too much knowledge of the internals of + # xcodeproj_file; some of these smarts should move into xcodeproj_file + # itself. + xct._properties['buildPhases'].insert(prebuild_index, ssbp) + prebuild_index = prebuild_index + 1 + + # TODO(mark): Should verify that at most one of these is specified. + if int(action.get('process_outputs_as_sources', False)): + for output in action['outputs']: + AddSourceToTarget(output, type, pbxp, xct) + + if int(action.get('process_outputs_as_mac_bundle_resources', False)): + for output in action['outputs']: + AddResourceToTarget(output, pbxp, xct) + + # tgt_mac_bundle_resources holds the list of bundle resources so + # the rule processing can check against it. + if is_bundle: + tgt_mac_bundle_resources = spec.get('mac_bundle_resources', []) + else: + tgt_mac_bundle_resources = [] + + # Add custom shell script phases driving "make" for "rules" sections. + # + # Xcode's built-in rule support is almost powerful enough to use directly, + # but there are a few significant deficiencies that render them unusable. + # There are workarounds for some of its inadequacies, but in aggregate, + # the workarounds added complexity to the generator, and some workarounds + # actually require input files to be crafted more carefully than I'd like. + # Consequently, until Xcode rules are made more capable, "rules" input + # sections will be handled in Xcode output by shell script build phases + # performed prior to the compilation phase. + # + # The following problems with Xcode rules were found. The numbers are + # Apple radar IDs. I hope that these shortcomings are addressed, I really + # liked having the rules handled directly in Xcode during the period that + # I was prototyping this. + # + # 6588600 Xcode compiles custom script rule outputs too soon, compilation + # fails. This occurs when rule outputs from distinct inputs are + # interdependent. The only workaround is to put rules and their + # inputs in a separate target from the one that compiles the rule + # outputs. This requires input file cooperation and it means that + # process_outputs_as_sources is unusable. + # 6584932 Need to declare that custom rule outputs should be excluded from + # compilation. A possible workaround is to lie to Xcode about a + # rule's output, giving it a dummy file it doesn't know how to + # compile. The rule action script would need to touch the dummy. + # 6584839 I need a way to declare additional inputs to a custom rule. + # A possible workaround is a shell script phase prior to + # compilation that touches a rule's primary input files if any + # would-be additional inputs are newer than the output. Modifying + # the source tree - even just modification times - feels dirty. + # 6564240 Xcode "custom script" build rules always dump all environment + # variables. This is a low-prioroty problem and is not a + # show-stopper. + rules_by_ext = {} + for rule in spec_rules: + rules_by_ext[rule['extension']] = rule + + # First, some definitions: + # + # A "rule source" is a file that was listed in a target's "sources" + # list and will have a rule applied to it on the basis of matching the + # rule's "extensions" attribute. Rule sources are direct inputs to + # rules. + # + # Rule definitions may specify additional inputs in their "inputs" + # attribute. These additional inputs are used for dependency tracking + # purposes. + # + # A "concrete output" is a rule output with input-dependent variables + # resolved. For example, given a rule with: + # 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'], + # if the target's "sources" list contained "one.ext" and "two.ext", + # the "concrete output" for rule input "two.ext" would be "two.cc". If + # a rule specifies multiple outputs, each input file that the rule is + # applied to will have the same number of concrete outputs. + # + # If any concrete outputs are outdated or missing relative to their + # corresponding rule_source or to any specified additional input, the + # rule action must be performed to generate the concrete outputs. + + # concrete_outputs_by_rule_source will have an item at the same index + # as the rule['rule_sources'] that it corresponds to. Each item is a + # list of all of the concrete outputs for the rule_source. + concrete_outputs_by_rule_source = [] + + # concrete_outputs_all is a flat list of all concrete outputs that this + # rule is able to produce, given the known set of input files + # (rule_sources) that apply to it. + concrete_outputs_all = [] + + # messages & actions are keyed by the same indices as rule['rule_sources'] + # and concrete_outputs_by_rule_source. They contain the message and + # action to perform after resolving input-dependent variables. The + # message is optional, in which case None is stored for each rule source. + messages = [] + actions = [] + + for rule_source in rule.get('rule_sources', []): + rule_source_dirname, rule_source_basename = \ + posixpath.split(rule_source) + (rule_source_root, rule_source_ext) = \ + posixpath.splitext(rule_source_basename) + + # These are the same variable names that Xcode uses for its own native + # rule support. Because Xcode's rule engine is not being used, they + # need to be expanded as they are written to the makefile. + rule_input_dict = { + 'INPUT_FILE_BASE': rule_source_root, + 'INPUT_FILE_SUFFIX': rule_source_ext, + 'INPUT_FILE_NAME': rule_source_basename, + 'INPUT_FILE_PATH': rule_source, + 'INPUT_FILE_DIRNAME': rule_source_dirname, + } + + concrete_outputs_for_this_rule_source = [] + for output in rule.get('outputs', []): + # Fortunately, Xcode and make both use $(VAR) format for their + # variables, so the expansion is the only transformation necessary. + # Any remaning $(VAR)-type variables in the string can be given + # directly to make, which will pick up the correct settings from + # what Xcode puts into the environment. + concrete_output = ExpandXcodeVariables(output, rule_input_dict) + concrete_outputs_for_this_rule_source.append(concrete_output) + + # Add all concrete outputs to the project. + pbxp.AddOrGetFileInRootGroup(concrete_output) + + concrete_outputs_by_rule_source.append( \ + concrete_outputs_for_this_rule_source) + concrete_outputs_all.extend(concrete_outputs_for_this_rule_source) + + # TODO(mark): Should verify that at most one of these is specified. + if int(rule.get('process_outputs_as_sources', False)): + for output in concrete_outputs_for_this_rule_source: + AddSourceToTarget(output, type, pbxp, xct) + + # If the file came from the mac_bundle_resources list or if the rule + # is marked to process outputs as bundle resource, do so. + was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources + if was_mac_bundle_resource or \ + int(rule.get('process_outputs_as_mac_bundle_resources', False)): + for output in concrete_outputs_for_this_rule_source: + AddResourceToTarget(output, pbxp, xct) + + # Do we have a message to print when this rule runs? + message = rule.get('message') + if message: + message = gyp.common.EncodePOSIXShellArgument(message) + message = ExpandXcodeVariables(message, rule_input_dict) + messages.append(message) + + # Turn the list into a string that can be passed to a shell. + action_string = gyp.common.EncodePOSIXShellList(rule['action']) + + action = ExpandXcodeVariables(action_string, rule_input_dict) + actions.append(action) + + if len(concrete_outputs_all) > 0: + # TODO(mark): There's a possibilty for collision here. Consider + # target "t" rule "A_r" and target "t_A" rule "r". + makefile_name = '%s.make' % re.sub( + '[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name'])) + makefile_path = os.path.join(xcode_projects[build_file].path, + makefile_name) + # TODO(mark): try/close? Write to a temporary file and swap it only + # if it's got changes? + makefile = open(makefile_path, 'wb') + + # make will build the first target in the makefile by default. By + # convention, it's called "all". List all (or at least one) + # concrete output for each rule source as a prerequisite of the "all" + # target. + makefile.write('all: \\\n') + for concrete_output_index in \ + xrange(0, len(concrete_outputs_by_rule_source)): + # Only list the first (index [0]) concrete output of each input + # in the "all" target. Otherwise, a parallel make (-j > 1) would + # attempt to process each input multiple times simultaneously. + # Otherwise, "all" could just contain the entire list of + # concrete_outputs_all. + concrete_output = \ + concrete_outputs_by_rule_source[concrete_output_index][0] + if concrete_output_index == len(concrete_outputs_by_rule_source) - 1: + eol = '' + else: + eol = ' \\' + makefile.write(' %s%s\n' % (concrete_output, eol)) + + for (rule_source, concrete_outputs, message, action) in \ + zip(rule['rule_sources'], concrete_outputs_by_rule_source, + messages, actions): + makefile.write('\n') + + # Add a rule that declares it can build each concrete output of a + # rule source. Collect the names of the directories that are + # required. + concrete_output_dirs = [] + for concrete_output_index in xrange(0, len(concrete_outputs)): + concrete_output = concrete_outputs[concrete_output_index] + if concrete_output_index == 0: + bol = '' + else: + bol = ' ' + makefile.write('%s%s \\\n' % (bol, concrete_output)) + + concrete_output_dir = posixpath.dirname(concrete_output) + if (concrete_output_dir and + concrete_output_dir not in concrete_output_dirs): + concrete_output_dirs.append(concrete_output_dir) + + makefile.write(' : \\\n') + + # The prerequisites for this rule are the rule source itself and + # the set of additional rule inputs, if any. + prerequisites = [rule_source] + prerequisites.extend(rule.get('inputs', [])) + for prerequisite_index in xrange(0, len(prerequisites)): + prerequisite = prerequisites[prerequisite_index] + if prerequisite_index == len(prerequisites) - 1: + eol = '' + else: + eol = ' \\' + makefile.write(' %s%s\n' % (prerequisite, eol)) + + # Make sure that output directories exist before executing the rule + # action. + if len(concrete_output_dirs) > 0: + makefile.write('\t@mkdir -p "%s"\n' % + '" "'.join(concrete_output_dirs)) + + # The rule message and action have already had the necessary variable + # substitutions performed. + if message: + # Mark it with note: so Xcode picks it up in build output. + makefile.write('\t@echo note: %s\n' % message) + makefile.write('\t%s\n' % action) + + makefile.close() + + # It might be nice to ensure that needed output directories exist + # here rather than in each target in the Makefile, but that wouldn't + # work if there ever was a concrete output that had an input-dependent + # variable anywhere other than in the leaf position. + + # Don't declare any inputPaths or outputPaths. If they're present, + # Xcode will provide a slight optimization by only running the script + # phase if any output is missing or outdated relative to any input. + # Unfortunately, it will also assume that all outputs are touched by + # the script, and if the outputs serve as files in a compilation + # phase, they will be unconditionally rebuilt. Since make might not + # rebuild everything that could be declared here as an output, this + # extra compilation activity is unnecessary. With inputPaths and + # outputPaths not supplied, make will always be called, but it knows + # enough to not do anything when everything is up-to-date. + + # To help speed things up, pass -j COUNT to make so it does some work + # in parallel. Don't use ncpus because Xcode will build ncpus targets + # in parallel and if each target happens to have a rules step, there + # would be ncpus^2 things going. With a machine that has 2 quad-core + # Xeons, a build can quickly run out of processes based on + # scheduling/other tasks, and randomly failing builds are no good. + script = \ +"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)" +if [ "${JOB_COUNT}" -gt 4 ]; then + JOB_COUNT=4 +fi +exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}" +exit 1 +""" % makefile_name + ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({ + 'name': 'Rule "' + rule['rule_name'] + '"', + 'shellScript': script, + 'showEnvVarsInLog': 0, + }) + + if support_xct: + support_xct.AppendProperty('buildPhases', ssbp) + else: + # TODO(mark): this assumes too much knowledge of the internals of + # xcodeproj_file; some of these smarts should move into xcodeproj_file + # itself. + xct._properties['buildPhases'].insert(prebuild_index, ssbp) + prebuild_index = prebuild_index + 1 + + # Extra rule inputs also go into the project file. Concrete outputs were + # already added when they were computed. + groups = ['inputs', 'inputs_excluded'] + if skip_excluded_files: + groups = [x for x in groups if not x.endswith('_excluded')] + for group in groups: + for item in rule.get(group, []): + pbxp.AddOrGetFileInRootGroup(item) + + # Add "sources". + for source in spec.get('sources', []): + (source_root, source_extension) = posixpath.splitext(source) + if source_extension[1:] not in rules_by_ext: + # AddSourceToTarget will add the file to a root group if it's not + # already there. + AddSourceToTarget(source, type, pbxp, xct) + else: + pbxp.AddOrGetFileInRootGroup(source) + + # Add "mac_bundle_resources" and "mac_framework_private_headers" if + # it's a bundle of any type. + if is_bundle: + for resource in tgt_mac_bundle_resources: + (resource_root, resource_extension) = posixpath.splitext(resource) + if resource_extension[1:] not in rules_by_ext: + AddResourceToTarget(resource, pbxp, xct) + else: + pbxp.AddOrGetFileInRootGroup(resource) + + for header in spec.get('mac_framework_private_headers', []): + AddHeaderToTarget(header, pbxp, xct, False) + + # Add "mac_framework_headers". These can be valid for both frameworks + # and static libraries. + if is_bundle or type == 'static_library': + for header in spec.get('mac_framework_headers', []): + AddHeaderToTarget(header, pbxp, xct, True) + + # Add "copies". + pbxcp_dict = {} + for copy_group in spec.get('copies', []): + dest = copy_group['destination'] + if dest[0] not in ('/', '$'): + # Relative paths are relative to $(SRCROOT). + dest = '$(SRCROOT)/' + dest + + code_sign = int(copy_group.get('xcode_code_sign', 0)) + settings = (None, '{ATTRIBUTES = (CodeSignOnCopy, ); }')[code_sign]; + + # Coalesce multiple "copies" sections in the same target with the same + # "destination" property into the same PBXCopyFilesBuildPhase, otherwise + # they'll wind up with ID collisions. + pbxcp = pbxcp_dict.get(dest, None) + if pbxcp is None: + pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({ + 'name': 'Copy to ' + copy_group['destination'] + }, + parent=xct) + pbxcp.SetDestination(dest) + + # TODO(mark): The usual comment about this knowing too much about + # gyp.xcodeproj_file internals applies. + xct._properties['buildPhases'].insert(prebuild_index, pbxcp) + + pbxcp_dict[dest] = pbxcp + + for file in copy_group['files']: + pbxcp.AddFile(file, settings) + + # Excluded files can also go into the project file. + if not skip_excluded_files: + for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers', + 'mac_framework_private_headers']: + excluded_key = key + '_excluded' + for item in spec.get(excluded_key, []): + pbxp.AddOrGetFileInRootGroup(item) + + # So can "inputs" and "outputs" sections of "actions" groups. + groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded'] + if skip_excluded_files: + groups = [x for x in groups if not x.endswith('_excluded')] + for action in spec.get('actions', []): + for group in groups: + for item in action.get(group, []): + # Exclude anything in BUILT_PRODUCTS_DIR. They're products, not + # sources. + if not item.startswith('$(BUILT_PRODUCTS_DIR)/'): + pbxp.AddOrGetFileInRootGroup(item) + + for postbuild in spec.get('postbuilds', []): + action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action']) + script = 'exec ' + action_string_sh + '\nexit 1\n' + + # Make the postbuild step depend on the output of ld or ar from this + # target. Apparently putting the script step after the link step isn't + # sufficient to ensure proper ordering in all cases. With an input + # declared but no outputs, the script step should run every time, as + # desired. + ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({ + 'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'], + 'name': 'Postbuild "' + postbuild['postbuild_name'] + '"', + 'shellScript': script, + 'showEnvVarsInLog': 0, + }) + xct.AppendProperty('buildPhases', ssbp) + + # Add dependencies before libraries, because adding a dependency may imply + # adding a library. It's preferable to keep dependencies listed first + # during a link phase so that they can override symbols that would + # otherwise be provided by libraries, which will usually include system + # libraries. On some systems, ld is finicky and even requires the + # libraries to be ordered in such a way that unresolved symbols in + # earlier-listed libraries may only be resolved by later-listed libraries. + # The Mac linker doesn't work that way, but other platforms do, and so + # their linker invocations need to be constructed in this way. There's + # no compelling reason for Xcode's linker invocations to differ. + + if 'dependencies' in spec: + for dependency in spec['dependencies']: + xct.AddDependency(xcode_targets[dependency]) + # The support project also gets the dependencies (in case they are + # needed for the actions/rules to work). + if support_xct: + support_xct.AddDependency(xcode_targets[dependency]) + + if 'libraries' in spec: + for library in spec['libraries']: + xct.FrameworksPhase().AddFile(library) + # Add the library's directory to LIBRARY_SEARCH_PATHS if necessary. + # I wish Xcode handled this automatically. + library_dir = posixpath.dirname(library) + if library_dir not in xcode_standard_library_dirs and ( + not xct.HasBuildSetting(_library_search_paths_var) or + library_dir not in xct.GetBuildSetting(_library_search_paths_var)): + xct.AppendBuildSetting(_library_search_paths_var, library_dir) + + for configuration_name in configuration_names: + configuration = spec['configurations'][configuration_name] + xcbc = xct.ConfigurationNamed(configuration_name) + for include_dir in configuration.get('mac_framework_dirs', []): + xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir) + for include_dir in configuration.get('include_dirs', []): + xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir) + for library_dir in configuration.get('library_dirs', []): + if library_dir not in xcode_standard_library_dirs and ( + not xcbc.HasBuildSetting(_library_search_paths_var) or + library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)): + xcbc.AppendBuildSetting(_library_search_paths_var, library_dir) + + if 'defines' in configuration: + for define in configuration['defines']: + set_define = EscapeXcodeDefine(define) + xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define) + if 'xcode_settings' in configuration: + for xck, xcv in configuration['xcode_settings'].iteritems(): + xcbc.SetBuildSetting(xck, xcv) + if 'xcode_config_file' in configuration: + config_ref = pbxp.AddOrGetFileInRootGroup( + configuration['xcode_config_file']) + xcbc.SetBaseConfiguration(config_ref) + + build_files = [] + for build_file, build_file_dict in data.iteritems(): + if build_file.endswith('.gyp'): + build_files.append(build_file) + + for build_file in build_files: + xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests) + + for build_file in build_files: + xcode_projects[build_file].Finalize2(xcode_targets, + xcode_target_to_target_dict) + + for build_file in build_files: + xcode_projects[build_file].Write() diff --git a/deps/libuv/build/gyp/pylib/gyp/generator/xcode_test.py b/deps/libuv/build/gyp/pylib/gyp/generator/xcode_test.py new file mode 100644 index 00000000..260324a4 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/generator/xcode_test.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +# Copyright (c) 2013 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" Unit tests for the xcode.py file. """ + +import gyp.generator.xcode as xcode +import unittest +import sys + + +class TestEscapeXcodeDefine(unittest.TestCase): + if sys.platform == 'darwin': + def test_InheritedRemainsUnescaped(self): + self.assertEqual(xcode.EscapeXcodeDefine('$(inherited)'), '$(inherited)') + + def test_Escaping(self): + self.assertEqual(xcode.EscapeXcodeDefine('a b"c\\'), 'a\\ b\\"c\\\\') + +if __name__ == '__main__': + unittest.main() diff --git a/deps/libuv/build/gyp/pylib/gyp/input.py b/deps/libuv/build/gyp/pylib/gyp/input.py new file mode 100644 index 00000000..a046a15c --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/input.py @@ -0,0 +1,2901 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from compiler.ast import Const +from compiler.ast import Dict +from compiler.ast import Discard +from compiler.ast import List +from compiler.ast import Module +from compiler.ast import Node +from compiler.ast import Stmt +import compiler +import gyp.common +import gyp.simple_copy +import multiprocessing +import optparse +import os.path +import re +import shlex +import signal +import subprocess +import sys +import threading +import time +import traceback +from gyp.common import GypError +from gyp.common import OrderedSet + + +# A list of types that are treated as linkable. +linkable_types = [ + 'executable', + 'shared_library', + 'loadable_module', + 'mac_kernel_extension', + 'windows_driver', +] + +# A list of sections that contain links to other targets. +dependency_sections = ['dependencies', 'export_dependent_settings'] + +# base_path_sections is a list of sections defined by GYP that contain +# pathnames. The generators can provide more keys, the two lists are merged +# into path_sections, but you should call IsPathSection instead of using either +# list directly. +base_path_sections = [ + 'destination', + 'files', + 'include_dirs', + 'inputs', + 'libraries', + 'outputs', + 'sources', +] +path_sections = set() + +# These per-process dictionaries are used to cache build file data when loading +# in parallel mode. +per_process_data = {} +per_process_aux_data = {} + +def IsPathSection(section): + # If section ends in one of the '=+?!' characters, it's applied to a section + # without the trailing characters. '/' is notably absent from this list, + # because there's no way for a regular expression to be treated as a path. + while section and section[-1:] in '=+?!': + section = section[:-1] + + if section in path_sections: + return True + + # Sections mathing the regexp '_(dir|file|path)s?$' are also + # considered PathSections. Using manual string matching since that + # is much faster than the regexp and this can be called hundreds of + # thousands of times so micro performance matters. + if "_" in section: + tail = section[-6:] + if tail[-1] == 's': + tail = tail[:-1] + if tail[-5:] in ('_file', '_path'): + return True + return tail[-4:] == '_dir' + + return False + +# base_non_configuration_keys is a list of key names that belong in the target +# itself and should not be propagated into its configurations. It is merged +# with a list that can come from the generator to +# create non_configuration_keys. +base_non_configuration_keys = [ + # Sections that must exist inside targets and not configurations. + 'actions', + 'configurations', + 'copies', + 'default_configuration', + 'dependencies', + 'dependencies_original', + 'libraries', + 'postbuilds', + 'product_dir', + 'product_extension', + 'product_name', + 'product_prefix', + 'rules', + 'run_as', + 'sources', + 'standalone_static_library', + 'suppress_wildcard', + 'target_name', + 'toolset', + 'toolsets', + 'type', + + # Sections that can be found inside targets or configurations, but that + # should not be propagated from targets into their configurations. + 'variables', +] +non_configuration_keys = [] + +# Keys that do not belong inside a configuration dictionary. +invalid_configuration_keys = [ + 'actions', + 'all_dependent_settings', + 'configurations', + 'dependencies', + 'direct_dependent_settings', + 'libraries', + 'link_settings', + 'sources', + 'standalone_static_library', + 'target_name', + 'type', +] + +# Controls whether or not the generator supports multiple toolsets. +multiple_toolsets = False + +# Paths for converting filelist paths to output paths: { +# toplevel, +# qualified_output_dir, +# } +generator_filelist_paths = None + +def GetIncludedBuildFiles(build_file_path, aux_data, included=None): + """Return a list of all build files included into build_file_path. + + The returned list will contain build_file_path as well as all other files + that it included, either directly or indirectly. Note that the list may + contain files that were included into a conditional section that evaluated + to false and was not merged into build_file_path's dict. + + aux_data is a dict containing a key for each build file or included build + file. Those keys provide access to dicts whose "included" keys contain + lists of all other files included by the build file. + + included should be left at its default None value by external callers. It + is used for recursion. + + The returned list will not contain any duplicate entries. Each build file + in the list will be relative to the current directory. + """ + + if included == None: + included = [] + + if build_file_path in included: + return included + + included.append(build_file_path) + + for included_build_file in aux_data[build_file_path].get('included', []): + GetIncludedBuildFiles(included_build_file, aux_data, included) + + return included + + +def CheckedEval(file_contents): + """Return the eval of a gyp file. + + The gyp file is restricted to dictionaries and lists only, and + repeated keys are not allowed. + + Note that this is slower than eval() is. + """ + + ast = compiler.parse(file_contents) + assert isinstance(ast, Module) + c1 = ast.getChildren() + assert c1[0] is None + assert isinstance(c1[1], Stmt) + c2 = c1[1].getChildren() + assert isinstance(c2[0], Discard) + c3 = c2[0].getChildren() + assert len(c3) == 1 + return CheckNode(c3[0], []) + + +def CheckNode(node, keypath): + if isinstance(node, Dict): + c = node.getChildren() + dict = {} + for n in range(0, len(c), 2): + assert isinstance(c[n], Const) + key = c[n].getChildren()[0] + if key in dict: + raise GypError("Key '" + key + "' repeated at level " + + repr(len(keypath) + 1) + " with key path '" + + '.'.join(keypath) + "'") + kp = list(keypath) # Make a copy of the list for descending this node. + kp.append(key) + dict[key] = CheckNode(c[n + 1], kp) + return dict + elif isinstance(node, List): + c = node.getChildren() + children = [] + for index, child in enumerate(c): + kp = list(keypath) # Copy list. + kp.append(repr(index)) + children.append(CheckNode(child, kp)) + return children + elif isinstance(node, Const): + return node.getChildren()[0] + else: + raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) + + "': " + repr(node)) + + +def LoadOneBuildFile(build_file_path, data, aux_data, includes, + is_target, check): + if build_file_path in data: + return data[build_file_path] + + if os.path.exists(build_file_path): + build_file_contents = open(build_file_path).read() + else: + raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd())) + + build_file_data = None + try: + if check: + build_file_data = CheckedEval(build_file_contents) + else: + build_file_data = eval(build_file_contents, {'__builtins__': None}, + None) + except SyntaxError, e: + e.filename = build_file_path + raise + except Exception, e: + gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path) + raise + + if type(build_file_data) is not dict: + raise GypError("%s does not evaluate to a dictionary." % build_file_path) + + data[build_file_path] = build_file_data + aux_data[build_file_path] = {} + + # Scan for includes and merge them in. + if ('skip_includes' not in build_file_data or + not build_file_data['skip_includes']): + try: + if is_target: + LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data, + aux_data, includes, check) + else: + LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data, + aux_data, None, check) + except Exception, e: + gyp.common.ExceptionAppend(e, + 'while reading includes of ' + build_file_path) + raise + + return build_file_data + + +def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data, + includes, check): + includes_list = [] + if includes != None: + includes_list.extend(includes) + if 'includes' in subdict: + for include in subdict['includes']: + # "include" is specified relative to subdict_path, so compute the real + # path to include by appending the provided "include" to the directory + # in which subdict_path resides. + relative_include = \ + os.path.normpath(os.path.join(os.path.dirname(subdict_path), include)) + includes_list.append(relative_include) + # Unhook the includes list, it's no longer needed. + del subdict['includes'] + + # Merge in the included files. + for include in includes_list: + if not 'included' in aux_data[subdict_path]: + aux_data[subdict_path]['included'] = [] + aux_data[subdict_path]['included'].append(include) + + gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include) + + MergeDicts(subdict, + LoadOneBuildFile(include, data, aux_data, None, False, check), + subdict_path, include) + + # Recurse into subdictionaries. + for k, v in subdict.iteritems(): + if type(v) is dict: + LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data, + None, check) + elif type(v) is list: + LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data, + check) + + +# This recurses into lists so that it can look for dicts. +def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check): + for item in sublist: + if type(item) is dict: + LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data, + None, check) + elif type(item) is list: + LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check) + +# Processes toolsets in all the targets. This recurses into condition entries +# since they can contain toolsets as well. +def ProcessToolsetsInDict(data): + if 'targets' in data: + target_list = data['targets'] + new_target_list = [] + for target in target_list: + # If this target already has an explicit 'toolset', and no 'toolsets' + # list, don't modify it further. + if 'toolset' in target and 'toolsets' not in target: + new_target_list.append(target) + continue + if multiple_toolsets: + toolsets = target.get('toolsets', ['target']) + else: + toolsets = ['target'] + # Make sure this 'toolsets' definition is only processed once. + if 'toolsets' in target: + del target['toolsets'] + if len(toolsets) > 0: + # Optimization: only do copies if more than one toolset is specified. + for build in toolsets[1:]: + new_target = gyp.simple_copy.deepcopy(target) + new_target['toolset'] = build + new_target_list.append(new_target) + target['toolset'] = toolsets[0] + new_target_list.append(target) + data['targets'] = new_target_list + if 'conditions' in data: + for condition in data['conditions']: + if type(condition) is list: + for condition_dict in condition[1:]: + if type(condition_dict) is dict: + ProcessToolsetsInDict(condition_dict) + + +# TODO(mark): I don't love this name. It just means that it's going to load +# a build file that contains targets and is expected to provide a targets dict +# that contains the targets... +def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes, + depth, check, load_dependencies): + # If depth is set, predefine the DEPTH variable to be a relative path from + # this build file's directory to the directory identified by depth. + if depth: + # TODO(dglazkov) The backslash/forward-slash replacement at the end is a + # temporary measure. This should really be addressed by keeping all paths + # in POSIX until actual project generation. + d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path)) + if d == '': + variables['DEPTH'] = '.' + else: + variables['DEPTH'] = d.replace('\\', '/') + + # The 'target_build_files' key is only set when loading target build files in + # the non-parallel code path, where LoadTargetBuildFile is called + # recursively. In the parallel code path, we don't need to check whether the + # |build_file_path| has already been loaded, because the 'scheduled' set in + # ParallelState guarantees that we never load the same |build_file_path| + # twice. + if 'target_build_files' in data: + if build_file_path in data['target_build_files']: + # Already loaded. + return False + data['target_build_files'].add(build_file_path) + + gyp.DebugOutput(gyp.DEBUG_INCLUDES, + "Loading Target Build File '%s'", build_file_path) + + build_file_data = LoadOneBuildFile(build_file_path, data, aux_data, + includes, True, check) + + # Store DEPTH for later use in generators. + build_file_data['_DEPTH'] = depth + + # Set up the included_files key indicating which .gyp files contributed to + # this target dict. + if 'included_files' in build_file_data: + raise GypError(build_file_path + ' must not contain included_files key') + + included = GetIncludedBuildFiles(build_file_path, aux_data) + build_file_data['included_files'] = [] + for included_file in included: + # included_file is relative to the current directory, but it needs to + # be made relative to build_file_path's directory. + included_relative = \ + gyp.common.RelativePath(included_file, + os.path.dirname(build_file_path)) + build_file_data['included_files'].append(included_relative) + + # Do a first round of toolsets expansion so that conditions can be defined + # per toolset. + ProcessToolsetsInDict(build_file_data) + + # Apply "pre"/"early" variable expansions and condition evaluations. + ProcessVariablesAndConditionsInDict( + build_file_data, PHASE_EARLY, variables, build_file_path) + + # Since some toolsets might have been defined conditionally, perform + # a second round of toolsets expansion now. + ProcessToolsetsInDict(build_file_data) + + # Look at each project's target_defaults dict, and merge settings into + # targets. + if 'target_defaults' in build_file_data: + if 'targets' not in build_file_data: + raise GypError("Unable to find targets in build file %s" % + build_file_path) + + index = 0 + while index < len(build_file_data['targets']): + # This procedure needs to give the impression that target_defaults is + # used as defaults, and the individual targets inherit from that. + # The individual targets need to be merged into the defaults. Make + # a deep copy of the defaults for each target, merge the target dict + # as found in the input file into that copy, and then hook up the + # copy with the target-specific data merged into it as the replacement + # target dict. + old_target_dict = build_file_data['targets'][index] + new_target_dict = gyp.simple_copy.deepcopy( + build_file_data['target_defaults']) + MergeDicts(new_target_dict, old_target_dict, + build_file_path, build_file_path) + build_file_data['targets'][index] = new_target_dict + index += 1 + + # No longer needed. + del build_file_data['target_defaults'] + + # Look for dependencies. This means that dependency resolution occurs + # after "pre" conditionals and variable expansion, but before "post" - + # in other words, you can't put a "dependencies" section inside a "post" + # conditional within a target. + + dependencies = [] + if 'targets' in build_file_data: + for target_dict in build_file_data['targets']: + if 'dependencies' not in target_dict: + continue + for dependency in target_dict['dependencies']: + dependencies.append( + gyp.common.ResolveTarget(build_file_path, dependency, None)[0]) + + if load_dependencies: + for dependency in dependencies: + try: + LoadTargetBuildFile(dependency, data, aux_data, variables, + includes, depth, check, load_dependencies) + except Exception, e: + gyp.common.ExceptionAppend( + e, 'while loading dependencies of %s' % build_file_path) + raise + else: + return (build_file_path, dependencies) + +def CallLoadTargetBuildFile(global_flags, + build_file_path, variables, + includes, depth, check, + generator_input_info): + """Wrapper around LoadTargetBuildFile for parallel processing. + + This wrapper is used when LoadTargetBuildFile is executed in + a worker process. + """ + + try: + signal.signal(signal.SIGINT, signal.SIG_IGN) + + # Apply globals so that the worker process behaves the same. + for key, value in global_flags.iteritems(): + globals()[key] = value + + SetGeneratorGlobals(generator_input_info) + result = LoadTargetBuildFile(build_file_path, per_process_data, + per_process_aux_data, variables, + includes, depth, check, False) + if not result: + return result + + (build_file_path, dependencies) = result + + # We can safely pop the build_file_data from per_process_data because it + # will never be referenced by this process again, so we don't need to keep + # it in the cache. + build_file_data = per_process_data.pop(build_file_path) + + # This gets serialized and sent back to the main process via a pipe. + # It's handled in LoadTargetBuildFileCallback. + return (build_file_path, + build_file_data, + dependencies) + except GypError, e: + sys.stderr.write("gyp: %s\n" % e) + return None + except Exception, e: + print >>sys.stderr, 'Exception:', e + print >>sys.stderr, traceback.format_exc() + return None + + +class ParallelProcessingError(Exception): + pass + + +class ParallelState(object): + """Class to keep track of state when processing input files in parallel. + + If build files are loaded in parallel, use this to keep track of + state during farming out and processing parallel jobs. It's stored + in a global so that the callback function can have access to it. + """ + + def __init__(self): + # The multiprocessing pool. + self.pool = None + # The condition variable used to protect this object and notify + # the main loop when there might be more data to process. + self.condition = None + # The "data" dict that was passed to LoadTargetBuildFileParallel + self.data = None + # The number of parallel calls outstanding; decremented when a response + # was received. + self.pending = 0 + # The set of all build files that have been scheduled, so we don't + # schedule the same one twice. + self.scheduled = set() + # A list of dependency build file paths that haven't been scheduled yet. + self.dependencies = [] + # Flag to indicate if there was an error in a child process. + self.error = False + + def LoadTargetBuildFileCallback(self, result): + """Handle the results of running LoadTargetBuildFile in another process. + """ + self.condition.acquire() + if not result: + self.error = True + self.condition.notify() + self.condition.release() + return + (build_file_path0, build_file_data0, dependencies0) = result + self.data[build_file_path0] = build_file_data0 + self.data['target_build_files'].add(build_file_path0) + for new_dependency in dependencies0: + if new_dependency not in self.scheduled: + self.scheduled.add(new_dependency) + self.dependencies.append(new_dependency) + self.pending -= 1 + self.condition.notify() + self.condition.release() + + +def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth, + check, generator_input_info): + parallel_state = ParallelState() + parallel_state.condition = threading.Condition() + # Make copies of the build_files argument that we can modify while working. + parallel_state.dependencies = list(build_files) + parallel_state.scheduled = set(build_files) + parallel_state.pending = 0 + parallel_state.data = data + + try: + parallel_state.condition.acquire() + while parallel_state.dependencies or parallel_state.pending: + if parallel_state.error: + break + if not parallel_state.dependencies: + parallel_state.condition.wait() + continue + + dependency = parallel_state.dependencies.pop() + + parallel_state.pending += 1 + global_flags = { + 'path_sections': globals()['path_sections'], + 'non_configuration_keys': globals()['non_configuration_keys'], + 'multiple_toolsets': globals()['multiple_toolsets']} + + if not parallel_state.pool: + parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count()) + parallel_state.pool.apply_async( + CallLoadTargetBuildFile, + args = (global_flags, dependency, + variables, includes, depth, check, generator_input_info), + callback = parallel_state.LoadTargetBuildFileCallback) + except KeyboardInterrupt, e: + parallel_state.pool.terminate() + raise e + + parallel_state.condition.release() + + parallel_state.pool.close() + parallel_state.pool.join() + parallel_state.pool = None + + if parallel_state.error: + sys.exit(1) + +# Look for the bracket that matches the first bracket seen in a +# string, and return the start and end as a tuple. For example, if +# the input is something like "<(foo <(bar)) blah", then it would +# return (1, 13), indicating the entire string except for the leading +# "<" and trailing " blah". +LBRACKETS= set('{[(') +BRACKETS = {'}': '{', ']': '[', ')': '('} +def FindEnclosingBracketGroup(input_str): + stack = [] + start = -1 + for index, char in enumerate(input_str): + if char in LBRACKETS: + stack.append(char) + if start == -1: + start = index + elif char in BRACKETS: + if not stack: + return (-1, -1) + if stack.pop() != BRACKETS[char]: + return (-1, -1) + if not stack: + return (start, index + 1) + return (-1, -1) + + +def IsStrCanonicalInt(string): + """Returns True if |string| is in its canonical integer form. + + The canonical form is such that str(int(string)) == string. + """ + if type(string) is str: + # This function is called a lot so for maximum performance, avoid + # involving regexps which would otherwise make the code much + # shorter. Regexps would need twice the time of this function. + if string: + if string == "0": + return True + if string[0] == "-": + string = string[1:] + if not string: + return False + if '1' <= string[0] <= '9': + return string.isdigit() + + return False + + +# This matches things like "<(asdf)", "(?P<(?:(?:!?@?)|\|)?)' + r'(?P[-a-zA-Z0-9_.]+)?' + r'\((?P\s*\[?)' + r'(?P.*?)(\]?)\))') + +# This matches the same as early_variable_re, but with '>' instead of '<'. +late_variable_re = re.compile( + r'(?P(?P>(?:(?:!?@?)|\|)?)' + r'(?P[-a-zA-Z0-9_.]+)?' + r'\((?P\s*\[?)' + r'(?P.*?)(\]?)\))') + +# This matches the same as early_variable_re, but with '^' instead of '<'. +latelate_variable_re = re.compile( + r'(?P(?P[\^](?:(?:!?@?)|\|)?)' + r'(?P[-a-zA-Z0-9_.]+)?' + r'\((?P\s*\[?)' + r'(?P.*?)(\]?)\))') + +# Global cache of results from running commands so they don't have to be run +# more then once. +cached_command_results = {} + + +def FixupPlatformCommand(cmd): + if sys.platform == 'win32': + if type(cmd) is list: + cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:] + else: + cmd = re.sub('^cat ', 'type ', cmd) + return cmd + + +PHASE_EARLY = 0 +PHASE_LATE = 1 +PHASE_LATELATE = 2 + + +def ExpandVariables(input, phase, variables, build_file): + # Look for the pattern that gets expanded into variables + if phase == PHASE_EARLY: + variable_re = early_variable_re + expansion_symbol = '<' + elif phase == PHASE_LATE: + variable_re = late_variable_re + expansion_symbol = '>' + elif phase == PHASE_LATELATE: + variable_re = latelate_variable_re + expansion_symbol = '^' + else: + assert False + + input_str = str(input) + if IsStrCanonicalInt(input_str): + return int(input_str) + + # Do a quick scan to determine if an expensive regex search is warranted. + if expansion_symbol not in input_str: + return input_str + + # Get the entire list of matches as a list of MatchObject instances. + # (using findall here would return strings instead of MatchObjects). + matches = list(variable_re.finditer(input_str)) + if not matches: + return input_str + + output = input_str + # Reverse the list of matches so that replacements are done right-to-left. + # That ensures that earlier replacements won't mess up the string in a + # way that causes later calls to find the earlier substituted text instead + # of what's intended for replacement. + matches.reverse() + for match_group in matches: + match = match_group.groupdict() + gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match) + # match['replace'] is the substring to look for, match['type'] + # is the character code for the replacement type (< > ! <| >| <@ + # >@ !@), match['is_array'] contains a '[' for command + # arrays, and match['content'] is the name of the variable (< >) + # or command to run (!). match['command_string'] is an optional + # command string. Currently, only 'pymod_do_main' is supported. + + # run_command is true if a ! variant is used. + run_command = '!' in match['type'] + command_string = match['command_string'] + + # file_list is true if a | variant is used. + file_list = '|' in match['type'] + + # Capture these now so we can adjust them later. + replace_start = match_group.start('replace') + replace_end = match_group.end('replace') + + # Find the ending paren, and re-evaluate the contained string. + (c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:]) + + # Adjust the replacement range to match the entire command + # found by FindEnclosingBracketGroup (since the variable_re + # probably doesn't match the entire command if it contained + # nested variables). + replace_end = replace_start + c_end + + # Find the "real" replacement, matching the appropriate closing + # paren, and adjust the replacement start and end. + replacement = input_str[replace_start:replace_end] + + # Figure out what the contents of the variable parens are. + contents_start = replace_start + c_start + 1 + contents_end = replace_end - 1 + contents = input_str[contents_start:contents_end] + + # Do filter substitution now for <|(). + # Admittedly, this is different than the evaluation order in other + # contexts. However, since filtration has no chance to run on <|(), + # this seems like the only obvious way to give them access to filters. + if file_list: + processed_variables = gyp.simple_copy.deepcopy(variables) + ProcessListFiltersInDict(contents, processed_variables) + # Recurse to expand variables in the contents + contents = ExpandVariables(contents, phase, + processed_variables, build_file) + else: + # Recurse to expand variables in the contents + contents = ExpandVariables(contents, phase, variables, build_file) + + # Strip off leading/trailing whitespace so that variable matches are + # simpler below (and because they are rarely needed). + contents = contents.strip() + + # expand_to_list is true if an @ variant is used. In that case, + # the expansion should result in a list. Note that the caller + # is to be expecting a list in return, and not all callers do + # because not all are working in list context. Also, for list + # expansions, there can be no other text besides the variable + # expansion in the input string. + expand_to_list = '@' in match['type'] and input_str == replacement + + if run_command or file_list: + # Find the build file's directory, so commands can be run or file lists + # generated relative to it. + build_file_dir = os.path.dirname(build_file) + if build_file_dir == '' and not file_list: + # If build_file is just a leaf filename indicating a file in the + # current directory, build_file_dir might be an empty string. Set + # it to None to signal to subprocess.Popen that it should run the + # command in the current directory. + build_file_dir = None + + # Support <|(listfile.txt ...) which generates a file + # containing items from a gyp list, generated at gyp time. + # This works around actions/rules which have more inputs than will + # fit on the command line. + if file_list: + if type(contents) is list: + contents_list = contents + else: + contents_list = contents.split(' ') + replacement = contents_list[0] + if os.path.isabs(replacement): + raise GypError('| cannot handle absolute paths, got "%s"' % replacement) + + if not generator_filelist_paths: + path = os.path.join(build_file_dir, replacement) + else: + if os.path.isabs(build_file_dir): + toplevel = generator_filelist_paths['toplevel'] + rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel) + else: + rel_build_file_dir = build_file_dir + qualified_out_dir = generator_filelist_paths['qualified_out_dir'] + path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement) + gyp.common.EnsureDirExists(path) + + replacement = gyp.common.RelativePath(path, build_file_dir) + f = gyp.common.WriteOnDiff(path) + for i in contents_list[1:]: + f.write('%s\n' % i) + f.close() + + elif run_command: + use_shell = True + if match['is_array']: + contents = eval(contents) + use_shell = False + + # Check for a cached value to avoid executing commands, or generating + # file lists more than once. The cache key contains the command to be + # run as well as the directory to run it from, to account for commands + # that depend on their current directory. + # TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory, + # someone could author a set of GYP files where each time the command + # is invoked it produces different output by design. When the need + # arises, the syntax should be extended to support no caching off a + # command's output so it is run every time. + cache_key = (str(contents), build_file_dir) + cached_value = cached_command_results.get(cache_key, None) + if cached_value is None: + gyp.DebugOutput(gyp.DEBUG_VARIABLES, + "Executing command '%s' in directory '%s'", + contents, build_file_dir) + + replacement = '' + + if command_string == 'pymod_do_main': + # (sources/) etc. to resolve to + # and empty list if undefined. This allows actions to: + # 'action!': [ + # '>@(_sources!)', + # ], + # 'action/': [ + # '>@(_sources/)', + # ], + replacement = [] + else: + raise GypError('Undefined variable ' + contents + + ' in ' + build_file) + else: + replacement = variables[contents] + + if type(replacement) is list: + for item in replacement: + if not contents[-1] == '/' and type(item) not in (str, int): + raise GypError('Variable ' + contents + + ' must expand to a string or list of strings; ' + + 'list contains a ' + + item.__class__.__name__) + # Run through the list and handle variable expansions in it. Since + # the list is guaranteed not to contain dicts, this won't do anything + # with conditions sections. + ProcessVariablesAndConditionsInList(replacement, phase, variables, + build_file) + elif type(replacement) not in (str, int): + raise GypError('Variable ' + contents + + ' must expand to a string or list of strings; ' + + 'found a ' + replacement.__class__.__name__) + + if expand_to_list: + # Expanding in list context. It's guaranteed that there's only one + # replacement to do in |input_str| and that it's this replacement. See + # above. + if type(replacement) is list: + # If it's already a list, make a copy. + output = replacement[:] + else: + # Split it the same way sh would split arguments. + output = shlex.split(str(replacement)) + else: + # Expanding in string context. + encoded_replacement = '' + if type(replacement) is list: + # When expanding a list into string context, turn the list items + # into a string in a way that will work with a subprocess call. + # + # TODO(mark): This isn't completely correct. This should + # call a generator-provided function that observes the + # proper list-to-argument quoting rules on a specific + # platform instead of just calling the POSIX encoding + # routine. + encoded_replacement = gyp.common.EncodePOSIXShellList(replacement) + else: + encoded_replacement = replacement + + output = output[:replace_start] + str(encoded_replacement) + \ + output[replace_end:] + # Prepare for the next match iteration. + input_str = output + + if output == input: + gyp.DebugOutput(gyp.DEBUG_VARIABLES, + "Found only identity matches on %r, avoiding infinite " + "recursion.", + output) + else: + # Look for more matches now that we've replaced some, to deal with + # expanding local variables (variables defined in the same + # variables block as this one). + gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output) + if type(output) is list: + if output and type(output[0]) is list: + # Leave output alone if it's a list of lists. + # We don't want such lists to be stringified. + pass + else: + new_output = [] + for item in output: + new_output.append( + ExpandVariables(item, phase, variables, build_file)) + output = new_output + else: + output = ExpandVariables(output, phase, variables, build_file) + + # Convert all strings that are canonically-represented integers into integers. + if type(output) is list: + for index in xrange(0, len(output)): + if IsStrCanonicalInt(output[index]): + output[index] = int(output[index]) + elif IsStrCanonicalInt(output): + output = int(output) + + return output + +# The same condition is often evaluated over and over again so it +# makes sense to cache as much as possible between evaluations. +cached_conditions_asts = {} + +def EvalCondition(condition, conditions_key, phase, variables, build_file): + """Returns the dict that should be used or None if the result was + that nothing should be used.""" + if type(condition) is not list: + raise GypError(conditions_key + ' must be a list') + if len(condition) < 2: + # It's possible that condition[0] won't work in which case this + # attempt will raise its own IndexError. That's probably fine. + raise GypError(conditions_key + ' ' + condition[0] + + ' must be at least length 2, not ' + str(len(condition))) + + i = 0 + result = None + while i < len(condition): + cond_expr = condition[i] + true_dict = condition[i + 1] + if type(true_dict) is not dict: + raise GypError('{} {} must be followed by a dictionary, not {}'.format( + conditions_key, cond_expr, type(true_dict))) + if len(condition) > i + 2 and type(condition[i + 2]) is dict: + false_dict = condition[i + 2] + i = i + 3 + if i != len(condition): + raise GypError('{} {} has {} unexpected trailing items'.format( + conditions_key, cond_expr, len(condition) - i)) + else: + false_dict = None + i = i + 2 + if result == None: + result = EvalSingleCondition( + cond_expr, true_dict, false_dict, phase, variables, build_file) + + return result + + +def EvalSingleCondition( + cond_expr, true_dict, false_dict, phase, variables, build_file): + """Returns true_dict if cond_expr evaluates to true, and false_dict + otherwise.""" + # Do expansions on the condition itself. Since the conditon can naturally + # contain variable references without needing to resort to GYP expansion + # syntax, this is of dubious value for variables, but someone might want to + # use a command expansion directly inside a condition. + cond_expr_expanded = ExpandVariables(cond_expr, phase, variables, + build_file) + if type(cond_expr_expanded) not in (str, int): + raise ValueError( + 'Variable expansion in this context permits str and int ' + \ + 'only, found ' + cond_expr_expanded.__class__.__name__) + + try: + if cond_expr_expanded in cached_conditions_asts: + ast_code = cached_conditions_asts[cond_expr_expanded] + else: + ast_code = compile(cond_expr_expanded, '', 'eval') + cached_conditions_asts[cond_expr_expanded] = ast_code + if eval(ast_code, {'__builtins__': None}, variables): + return true_dict + return false_dict + except SyntaxError, e: + syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s ' + 'at character %d.' % + (str(e.args[0]), e.text, build_file, e.offset), + e.filename, e.lineno, e.offset, e.text) + raise syntax_error + except NameError, e: + gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' % + (cond_expr_expanded, build_file)) + raise GypError(e) + + +def ProcessConditionsInDict(the_dict, phase, variables, build_file): + # Process a 'conditions' or 'target_conditions' section in the_dict, + # depending on phase. + # early -> conditions + # late -> target_conditions + # latelate -> no conditions + # + # Each item in a conditions list consists of cond_expr, a string expression + # evaluated as the condition, and true_dict, a dict that will be merged into + # the_dict if cond_expr evaluates to true. Optionally, a third item, + # false_dict, may be present. false_dict is merged into the_dict if + # cond_expr evaluates to false. + # + # Any dict merged into the_dict will be recursively processed for nested + # conditionals and other expansions, also according to phase, immediately + # prior to being merged. + + if phase == PHASE_EARLY: + conditions_key = 'conditions' + elif phase == PHASE_LATE: + conditions_key = 'target_conditions' + elif phase == PHASE_LATELATE: + return + else: + assert False + + if not conditions_key in the_dict: + return + + conditions_list = the_dict[conditions_key] + # Unhook the conditions list, it's no longer needed. + del the_dict[conditions_key] + + for condition in conditions_list: + merge_dict = EvalCondition(condition, conditions_key, phase, variables, + build_file) + + if merge_dict != None: + # Expand variables and nested conditinals in the merge_dict before + # merging it. + ProcessVariablesAndConditionsInDict(merge_dict, phase, + variables, build_file) + + MergeDicts(the_dict, merge_dict, build_file, build_file) + + +def LoadAutomaticVariablesFromDict(variables, the_dict): + # Any keys with plain string values in the_dict become automatic variables. + # The variable name is the key name with a "_" character prepended. + for key, value in the_dict.iteritems(): + if type(value) in (str, int, list): + variables['_' + key] = value + + +def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key): + # Any keys in the_dict's "variables" dict, if it has one, becomes a + # variable. The variable name is the key name in the "variables" dict. + # Variables that end with the % character are set only if they are unset in + # the variables dict. the_dict_key is the name of the key that accesses + # the_dict in the_dict's parent dict. If the_dict's parent is not a dict + # (it could be a list or it could be parentless because it is a root dict), + # the_dict_key will be None. + for key, value in the_dict.get('variables', {}).iteritems(): + if type(value) not in (str, int, list): + continue + + if key.endswith('%'): + variable_name = key[:-1] + if variable_name in variables: + # If the variable is already set, don't set it. + continue + if the_dict_key is 'variables' and variable_name in the_dict: + # If the variable is set without a % in the_dict, and the_dict is a + # variables dict (making |variables| a varaibles sub-dict of a + # variables dict), use the_dict's definition. + value = the_dict[variable_name] + else: + variable_name = key + + variables[variable_name] = value + + +def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in, + build_file, the_dict_key=None): + """Handle all variable and command expansion and conditional evaluation. + + This function is the public entry point for all variable expansions and + conditional evaluations. The variables_in dictionary will not be modified + by this function. + """ + + # Make a copy of the variables_in dict that can be modified during the + # loading of automatics and the loading of the variables dict. + variables = variables_in.copy() + LoadAutomaticVariablesFromDict(variables, the_dict) + + if 'variables' in the_dict: + # Make sure all the local variables are added to the variables + # list before we process them so that you can reference one + # variable from another. They will be fully expanded by recursion + # in ExpandVariables. + for key, value in the_dict['variables'].iteritems(): + variables[key] = value + + # Handle the associated variables dict first, so that any variable + # references within can be resolved prior to using them as variables. + # Pass a copy of the variables dict to avoid having it be tainted. + # Otherwise, it would have extra automatics added for everything that + # should just be an ordinary variable in this scope. + ProcessVariablesAndConditionsInDict(the_dict['variables'], phase, + variables, build_file, 'variables') + + LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) + + for key, value in the_dict.iteritems(): + # Skip "variables", which was already processed if present. + if key != 'variables' and type(value) is str: + expanded = ExpandVariables(value, phase, variables, build_file) + if type(expanded) not in (str, int): + raise ValueError( + 'Variable expansion in this context permits str and int ' + \ + 'only, found ' + expanded.__class__.__name__ + ' for ' + key) + the_dict[key] = expanded + + # Variable expansion may have resulted in changes to automatics. Reload. + # TODO(mark): Optimization: only reload if no changes were made. + variables = variables_in.copy() + LoadAutomaticVariablesFromDict(variables, the_dict) + LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) + + # Process conditions in this dict. This is done after variable expansion + # so that conditions may take advantage of expanded variables. For example, + # if the_dict contains: + # {'type': '<(library_type)', + # 'conditions': [['_type=="static_library"', { ... }]]}, + # _type, as used in the condition, will only be set to the value of + # library_type if variable expansion is performed before condition + # processing. However, condition processing should occur prior to recursion + # so that variables (both automatic and "variables" dict type) may be + # adjusted by conditions sections, merged into the_dict, and have the + # intended impact on contained dicts. + # + # This arrangement means that a "conditions" section containing a "variables" + # section will only have those variables effective in subdicts, not in + # the_dict. The workaround is to put a "conditions" section within a + # "variables" section. For example: + # {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]], + # 'defines': ['<(define)'], + # 'my_subdict': {'defines': ['<(define)']}}, + # will not result in "IS_MAC" being appended to the "defines" list in the + # current scope but would result in it being appended to the "defines" list + # within "my_subdict". By comparison: + # {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]}, + # 'defines': ['<(define)'], + # 'my_subdict': {'defines': ['<(define)']}}, + # will append "IS_MAC" to both "defines" lists. + + # Evaluate conditions sections, allowing variable expansions within them + # as well as nested conditionals. This will process a 'conditions' or + # 'target_conditions' section, perform appropriate merging and recursive + # conditional and variable processing, and then remove the conditions section + # from the_dict if it is present. + ProcessConditionsInDict(the_dict, phase, variables, build_file) + + # Conditional processing may have resulted in changes to automatics or the + # variables dict. Reload. + variables = variables_in.copy() + LoadAutomaticVariablesFromDict(variables, the_dict) + LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key) + + # Recurse into child dicts, or process child lists which may result in + # further recursion into descendant dicts. + for key, value in the_dict.iteritems(): + # Skip "variables" and string values, which were already processed if + # present. + if key == 'variables' or type(value) is str: + continue + if type(value) is dict: + # Pass a copy of the variables dict so that subdicts can't influence + # parents. + ProcessVariablesAndConditionsInDict(value, phase, variables, + build_file, key) + elif type(value) is list: + # The list itself can't influence the variables dict, and + # ProcessVariablesAndConditionsInList will make copies of the variables + # dict if it needs to pass it to something that can influence it. No + # copy is necessary here. + ProcessVariablesAndConditionsInList(value, phase, variables, + build_file) + elif type(value) is not int: + raise TypeError('Unknown type ' + value.__class__.__name__ + \ + ' for ' + key) + + +def ProcessVariablesAndConditionsInList(the_list, phase, variables, + build_file): + # Iterate using an index so that new values can be assigned into the_list. + index = 0 + while index < len(the_list): + item = the_list[index] + if type(item) is dict: + # Make a copy of the variables dict so that it won't influence anything + # outside of its own scope. + ProcessVariablesAndConditionsInDict(item, phase, variables, build_file) + elif type(item) is list: + ProcessVariablesAndConditionsInList(item, phase, variables, build_file) + elif type(item) is str: + expanded = ExpandVariables(item, phase, variables, build_file) + if type(expanded) in (str, int): + the_list[index] = expanded + elif type(expanded) is list: + the_list[index:index+1] = expanded + index += len(expanded) + + # index now identifies the next item to examine. Continue right now + # without falling into the index increment below. + continue + else: + raise ValueError( + 'Variable expansion in this context permits strings and ' + \ + 'lists only, found ' + expanded.__class__.__name__ + ' at ' + \ + index) + elif type(item) is not int: + raise TypeError('Unknown type ' + item.__class__.__name__ + \ + ' at index ' + index) + index = index + 1 + + +def BuildTargetsDict(data): + """Builds a dict mapping fully-qualified target names to their target dicts. + + |data| is a dict mapping loaded build files by pathname relative to the + current directory. Values in |data| are build file contents. For each + |data| value with a "targets" key, the value of the "targets" key is taken + as a list containing target dicts. Each target's fully-qualified name is + constructed from the pathname of the build file (|data| key) and its + "target_name" property. These fully-qualified names are used as the keys + in the returned dict. These keys provide access to the target dicts, + the dicts in the "targets" lists. + """ + + targets = {} + for build_file in data['target_build_files']: + for target in data[build_file].get('targets', []): + target_name = gyp.common.QualifiedTarget(build_file, + target['target_name'], + target['toolset']) + if target_name in targets: + raise GypError('Duplicate target definitions for ' + target_name) + targets[target_name] = target + + return targets + + +def QualifyDependencies(targets): + """Make dependency links fully-qualified relative to the current directory. + + |targets| is a dict mapping fully-qualified target names to their target + dicts. For each target in this dict, keys known to contain dependency + links are examined, and any dependencies referenced will be rewritten + so that they are fully-qualified and relative to the current directory. + All rewritten dependencies are suitable for use as keys to |targets| or a + similar dict. + """ + + all_dependency_sections = [dep + op + for dep in dependency_sections + for op in ('', '!', '/')] + + for target, target_dict in targets.iteritems(): + target_build_file = gyp.common.BuildFile(target) + toolset = target_dict['toolset'] + for dependency_key in all_dependency_sections: + dependencies = target_dict.get(dependency_key, []) + for index in xrange(0, len(dependencies)): + dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget( + target_build_file, dependencies[index], toolset) + if not multiple_toolsets: + # Ignore toolset specification in the dependency if it is specified. + dep_toolset = toolset + dependency = gyp.common.QualifiedTarget(dep_file, + dep_target, + dep_toolset) + dependencies[index] = dependency + + # Make sure anything appearing in a list other than "dependencies" also + # appears in the "dependencies" list. + if dependency_key != 'dependencies' and \ + dependency not in target_dict['dependencies']: + raise GypError('Found ' + dependency + ' in ' + dependency_key + + ' of ' + target + ', but not in dependencies') + + +def ExpandWildcardDependencies(targets, data): + """Expands dependencies specified as build_file:*. + + For each target in |targets|, examines sections containing links to other + targets. If any such section contains a link of the form build_file:*, it + is taken as a wildcard link, and is expanded to list each target in + build_file. The |data| dict provides access to build file dicts. + + Any target that does not wish to be included by wildcard can provide an + optional "suppress_wildcard" key in its target dict. When present and + true, a wildcard dependency link will not include such targets. + + All dependency names, including the keys to |targets| and the values in each + dependency list, must be qualified when this function is called. + """ + + for target, target_dict in targets.iteritems(): + toolset = target_dict['toolset'] + target_build_file = gyp.common.BuildFile(target) + for dependency_key in dependency_sections: + dependencies = target_dict.get(dependency_key, []) + + # Loop this way instead of "for dependency in" or "for index in xrange" + # because the dependencies list will be modified within the loop body. + index = 0 + while index < len(dependencies): + (dependency_build_file, dependency_target, dependency_toolset) = \ + gyp.common.ParseQualifiedTarget(dependencies[index]) + if dependency_target != '*' and dependency_toolset != '*': + # Not a wildcard. Keep it moving. + index = index + 1 + continue + + if dependency_build_file == target_build_file: + # It's an error for a target to depend on all other targets in + # the same file, because a target cannot depend on itself. + raise GypError('Found wildcard in ' + dependency_key + ' of ' + + target + ' referring to same build file') + + # Take the wildcard out and adjust the index so that the next + # dependency in the list will be processed the next time through the + # loop. + del dependencies[index] + index = index - 1 + + # Loop through the targets in the other build file, adding them to + # this target's list of dependencies in place of the removed + # wildcard. + dependency_target_dicts = data[dependency_build_file]['targets'] + for dependency_target_dict in dependency_target_dicts: + if int(dependency_target_dict.get('suppress_wildcard', False)): + continue + dependency_target_name = dependency_target_dict['target_name'] + if (dependency_target != '*' and + dependency_target != dependency_target_name): + continue + dependency_target_toolset = dependency_target_dict['toolset'] + if (dependency_toolset != '*' and + dependency_toolset != dependency_target_toolset): + continue + dependency = gyp.common.QualifiedTarget(dependency_build_file, + dependency_target_name, + dependency_target_toolset) + index = index + 1 + dependencies.insert(index, dependency) + + index = index + 1 + + +def Unify(l): + """Removes duplicate elements from l, keeping the first element.""" + seen = {} + return [seen.setdefault(e, e) for e in l if e not in seen] + + +def RemoveDuplicateDependencies(targets): + """Makes sure every dependency appears only once in all targets's dependency + lists.""" + for target_name, target_dict in targets.iteritems(): + for dependency_key in dependency_sections: + dependencies = target_dict.get(dependency_key, []) + if dependencies: + target_dict[dependency_key] = Unify(dependencies) + + +def Filter(l, item): + """Removes item from l.""" + res = {} + return [res.setdefault(e, e) for e in l if e != item] + + +def RemoveSelfDependencies(targets): + """Remove self dependencies from targets that have the prune_self_dependency + variable set.""" + for target_name, target_dict in targets.iteritems(): + for dependency_key in dependency_sections: + dependencies = target_dict.get(dependency_key, []) + if dependencies: + for t in dependencies: + if t == target_name: + if targets[t].get('variables', {}).get('prune_self_dependency', 0): + target_dict[dependency_key] = Filter(dependencies, target_name) + + +def RemoveLinkDependenciesFromNoneTargets(targets): + """Remove dependencies having the 'link_dependency' attribute from the 'none' + targets.""" + for target_name, target_dict in targets.iteritems(): + for dependency_key in dependency_sections: + dependencies = target_dict.get(dependency_key, []) + if dependencies: + for t in dependencies: + if target_dict.get('type', None) == 'none': + if targets[t].get('variables', {}).get('link_dependency', 0): + target_dict[dependency_key] = \ + Filter(target_dict[dependency_key], t) + + +class DependencyGraphNode(object): + """ + + Attributes: + ref: A reference to an object that this DependencyGraphNode represents. + dependencies: List of DependencyGraphNodes on which this one depends. + dependents: List of DependencyGraphNodes that depend on this one. + """ + + class CircularException(GypError): + pass + + def __init__(self, ref): + self.ref = ref + self.dependencies = [] + self.dependents = [] + + def __repr__(self): + return '' % self.ref + + def FlattenToList(self): + # flat_list is the sorted list of dependencies - actually, the list items + # are the "ref" attributes of DependencyGraphNodes. Every target will + # appear in flat_list after all of its dependencies, and before all of its + # dependents. + flat_list = OrderedSet() + + def ExtractNodeRef(node): + """Extracts the object that the node represents from the given node.""" + return node.ref + + # in_degree_zeros is the list of DependencyGraphNodes that have no + # dependencies not in flat_list. Initially, it is a copy of the children + # of this node, because when the graph was built, nodes with no + # dependencies were made implicit dependents of the root node. + in_degree_zeros = sorted(self.dependents[:], key=ExtractNodeRef) + + while in_degree_zeros: + # Nodes in in_degree_zeros have no dependencies not in flat_list, so they + # can be appended to flat_list. Take these nodes out of in_degree_zeros + # as work progresses, so that the next node to process from the list can + # always be accessed at a consistent position. + node = in_degree_zeros.pop() + flat_list.add(node.ref) + + # Look at dependents of the node just added to flat_list. Some of them + # may now belong in in_degree_zeros. + for node_dependent in sorted(node.dependents, key=ExtractNodeRef): + is_in_degree_zero = True + # TODO: We want to check through the + # node_dependent.dependencies list but if it's long and we + # always start at the beginning, then we get O(n^2) behaviour. + for node_dependent_dependency in (sorted(node_dependent.dependencies, + key=ExtractNodeRef)): + if not node_dependent_dependency.ref in flat_list: + # The dependent one or more dependencies not in flat_list. There + # will be more chances to add it to flat_list when examining + # it again as a dependent of those other dependencies, provided + # that there are no cycles. + is_in_degree_zero = False + break + + if is_in_degree_zero: + # All of the dependent's dependencies are already in flat_list. Add + # it to in_degree_zeros where it will be processed in a future + # iteration of the outer loop. + in_degree_zeros += [node_dependent] + + return list(flat_list) + + def FindCycles(self): + """ + Returns a list of cycles in the graph, where each cycle is its own list. + """ + results = [] + visited = set() + + def Visit(node, path): + for child in node.dependents: + if child in path: + results.append([child] + path[:path.index(child) + 1]) + elif not child in visited: + visited.add(child) + Visit(child, [child] + path) + + visited.add(self) + Visit(self, [self]) + + return results + + def DirectDependencies(self, dependencies=None): + """Returns a list of just direct dependencies.""" + if dependencies == None: + dependencies = [] + + for dependency in self.dependencies: + # Check for None, corresponding to the root node. + if dependency.ref != None and dependency.ref not in dependencies: + dependencies.append(dependency.ref) + + return dependencies + + def _AddImportedDependencies(self, targets, dependencies=None): + """Given a list of direct dependencies, adds indirect dependencies that + other dependencies have declared to export their settings. + + This method does not operate on self. Rather, it operates on the list + of dependencies in the |dependencies| argument. For each dependency in + that list, if any declares that it exports the settings of one of its + own dependencies, those dependencies whose settings are "passed through" + are added to the list. As new items are added to the list, they too will + be processed, so it is possible to import settings through multiple levels + of dependencies. + + This method is not terribly useful on its own, it depends on being + "primed" with a list of direct dependencies such as one provided by + DirectDependencies. DirectAndImportedDependencies is intended to be the + public entry point. + """ + + if dependencies == None: + dependencies = [] + + index = 0 + while index < len(dependencies): + dependency = dependencies[index] + dependency_dict = targets[dependency] + # Add any dependencies whose settings should be imported to the list + # if not already present. Newly-added items will be checked for + # their own imports when the list iteration reaches them. + # Rather than simply appending new items, insert them after the + # dependency that exported them. This is done to more closely match + # the depth-first method used by DeepDependencies. + add_index = 1 + for imported_dependency in \ + dependency_dict.get('export_dependent_settings', []): + if imported_dependency not in dependencies: + dependencies.insert(index + add_index, imported_dependency) + add_index = add_index + 1 + index = index + 1 + + return dependencies + + def DirectAndImportedDependencies(self, targets, dependencies=None): + """Returns a list of a target's direct dependencies and all indirect + dependencies that a dependency has advertised settings should be exported + through the dependency for. + """ + + dependencies = self.DirectDependencies(dependencies) + return self._AddImportedDependencies(targets, dependencies) + + def DeepDependencies(self, dependencies=None): + """Returns an OrderedSet of all of a target's dependencies, recursively.""" + if dependencies is None: + # Using a list to get ordered output and a set to do fast "is it + # already added" checks. + dependencies = OrderedSet() + + for dependency in self.dependencies: + # Check for None, corresponding to the root node. + if dependency.ref is None: + continue + if dependency.ref not in dependencies: + dependency.DeepDependencies(dependencies) + dependencies.add(dependency.ref) + + return dependencies + + def _LinkDependenciesInternal(self, targets, include_shared_libraries, + dependencies=None, initial=True): + """Returns an OrderedSet of dependency targets that are linked + into this target. + + This function has a split personality, depending on the setting of + |initial|. Outside callers should always leave |initial| at its default + setting. + + When adding a target to the list of dependencies, this function will + recurse into itself with |initial| set to False, to collect dependencies + that are linked into the linkable target for which the list is being built. + + If |include_shared_libraries| is False, the resulting dependencies will not + include shared_library targets that are linked into this target. + """ + if dependencies is None: + # Using a list to get ordered output and a set to do fast "is it + # already added" checks. + dependencies = OrderedSet() + + # Check for None, corresponding to the root node. + if self.ref is None: + return dependencies + + # It's kind of sucky that |targets| has to be passed into this function, + # but that's presently the easiest way to access the target dicts so that + # this function can find target types. + + if 'target_name' not in targets[self.ref]: + raise GypError("Missing 'target_name' field in target.") + + if 'type' not in targets[self.ref]: + raise GypError("Missing 'type' field in target %s" % + targets[self.ref]['target_name']) + + target_type = targets[self.ref]['type'] + + is_linkable = target_type in linkable_types + + if initial and not is_linkable: + # If this is the first target being examined and it's not linkable, + # return an empty list of link dependencies, because the link + # dependencies are intended to apply to the target itself (initial is + # True) and this target won't be linked. + return dependencies + + # Don't traverse 'none' targets if explicitly excluded. + if (target_type == 'none' and + not targets[self.ref].get('dependencies_traverse', True)): + dependencies.add(self.ref) + return dependencies + + # Executables, mac kernel extensions, windows drivers and loadable modules + # are already fully and finally linked. Nothing else can be a link + # dependency of them, there can only be dependencies in the sense that a + # dependent target might run an executable or load the loadable_module. + if not initial and target_type in ('executable', 'loadable_module', + 'mac_kernel_extension', + 'windows_driver'): + return dependencies + + # Shared libraries are already fully linked. They should only be included + # in |dependencies| when adjusting static library dependencies (in order to + # link against the shared_library's import lib), but should not be included + # in |dependencies| when propagating link_settings. + # The |include_shared_libraries| flag controls which of these two cases we + # are handling. + if (not initial and target_type == 'shared_library' and + not include_shared_libraries): + return dependencies + + # The target is linkable, add it to the list of link dependencies. + if self.ref not in dependencies: + dependencies.add(self.ref) + if initial or not is_linkable: + # If this is a subsequent target and it's linkable, don't look any + # further for linkable dependencies, as they'll already be linked into + # this target linkable. Always look at dependencies of the initial + # target, and always look at dependencies of non-linkables. + for dependency in self.dependencies: + dependency._LinkDependenciesInternal(targets, + include_shared_libraries, + dependencies, False) + + return dependencies + + def DependenciesForLinkSettings(self, targets): + """ + Returns a list of dependency targets whose link_settings should be merged + into this target. + """ + + # TODO(sbaig) Currently, chrome depends on the bug that shared libraries' + # link_settings are propagated. So for now, we will allow it, unless the + # 'allow_sharedlib_linksettings_propagation' flag is explicitly set to + # False. Once chrome is fixed, we can remove this flag. + include_shared_libraries = \ + targets[self.ref].get('allow_sharedlib_linksettings_propagation', True) + return self._LinkDependenciesInternal(targets, include_shared_libraries) + + def DependenciesToLinkAgainst(self, targets): + """ + Returns a list of dependency targets that are linked into this target. + """ + return self._LinkDependenciesInternal(targets, True) + + +def BuildDependencyList(targets): + # Create a DependencyGraphNode for each target. Put it into a dict for easy + # access. + dependency_nodes = {} + for target, spec in targets.iteritems(): + if target not in dependency_nodes: + dependency_nodes[target] = DependencyGraphNode(target) + + # Set up the dependency links. Targets that have no dependencies are treated + # as dependent on root_node. + root_node = DependencyGraphNode(None) + for target, spec in targets.iteritems(): + target_node = dependency_nodes[target] + target_build_file = gyp.common.BuildFile(target) + dependencies = spec.get('dependencies') + if not dependencies: + target_node.dependencies = [root_node] + root_node.dependents.append(target_node) + else: + for dependency in dependencies: + dependency_node = dependency_nodes.get(dependency) + if not dependency_node: + raise GypError("Dependency '%s' not found while " + "trying to load target %s" % (dependency, target)) + target_node.dependencies.append(dependency_node) + dependency_node.dependents.append(target_node) + + flat_list = root_node.FlattenToList() + + # If there's anything left unvisited, there must be a circular dependency + # (cycle). + if len(flat_list) != len(targets): + if not root_node.dependents: + # If all targets have dependencies, add the first target as a dependent + # of root_node so that the cycle can be discovered from root_node. + target = targets.keys()[0] + target_node = dependency_nodes[target] + target_node.dependencies.append(root_node) + root_node.dependents.append(target_node) + + cycles = [] + for cycle in root_node.FindCycles(): + paths = [node.ref for node in cycle] + cycles.append('Cycle: %s' % ' -> '.join(paths)) + raise DependencyGraphNode.CircularException( + 'Cycles in dependency graph detected:\n' + '\n'.join(cycles)) + + return [dependency_nodes, flat_list] + + +def VerifyNoGYPFileCircularDependencies(targets): + # Create a DependencyGraphNode for each gyp file containing a target. Put + # it into a dict for easy access. + dependency_nodes = {} + for target in targets.iterkeys(): + build_file = gyp.common.BuildFile(target) + if not build_file in dependency_nodes: + dependency_nodes[build_file] = DependencyGraphNode(build_file) + + # Set up the dependency links. + for target, spec in targets.iteritems(): + build_file = gyp.common.BuildFile(target) + build_file_node = dependency_nodes[build_file] + target_dependencies = spec.get('dependencies', []) + for dependency in target_dependencies: + try: + dependency_build_file = gyp.common.BuildFile(dependency) + except GypError, e: + gyp.common.ExceptionAppend( + e, 'while computing dependencies of .gyp file %s' % build_file) + raise + + if dependency_build_file == build_file: + # A .gyp file is allowed to refer back to itself. + continue + dependency_node = dependency_nodes.get(dependency_build_file) + if not dependency_node: + raise GypError("Dependancy '%s' not found" % dependency_build_file) + if dependency_node not in build_file_node.dependencies: + build_file_node.dependencies.append(dependency_node) + dependency_node.dependents.append(build_file_node) + + + # Files that have no dependencies are treated as dependent on root_node. + root_node = DependencyGraphNode(None) + for build_file_node in dependency_nodes.itervalues(): + if len(build_file_node.dependencies) == 0: + build_file_node.dependencies.append(root_node) + root_node.dependents.append(build_file_node) + + flat_list = root_node.FlattenToList() + + # If there's anything left unvisited, there must be a circular dependency + # (cycle). + if len(flat_list) != len(dependency_nodes): + if not root_node.dependents: + # If all files have dependencies, add the first file as a dependent + # of root_node so that the cycle can be discovered from root_node. + file_node = dependency_nodes.values()[0] + file_node.dependencies.append(root_node) + root_node.dependents.append(file_node) + cycles = [] + for cycle in root_node.FindCycles(): + paths = [node.ref for node in cycle] + cycles.append('Cycle: %s' % ' -> '.join(paths)) + raise DependencyGraphNode.CircularException( + 'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles)) + + +def DoDependentSettings(key, flat_list, targets, dependency_nodes): + # key should be one of all_dependent_settings, direct_dependent_settings, + # or link_settings. + + for target in flat_list: + target_dict = targets[target] + build_file = gyp.common.BuildFile(target) + + if key == 'all_dependent_settings': + dependencies = dependency_nodes[target].DeepDependencies() + elif key == 'direct_dependent_settings': + dependencies = \ + dependency_nodes[target].DirectAndImportedDependencies(targets) + elif key == 'link_settings': + dependencies = \ + dependency_nodes[target].DependenciesForLinkSettings(targets) + else: + raise GypError("DoDependentSettings doesn't know how to determine " + 'dependencies for ' + key) + + for dependency in dependencies: + dependency_dict = targets[dependency] + if not key in dependency_dict: + continue + dependency_build_file = gyp.common.BuildFile(dependency) + MergeDicts(target_dict, dependency_dict[key], + build_file, dependency_build_file) + + +def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes, + sort_dependencies): + # Recompute target "dependencies" properties. For each static library + # target, remove "dependencies" entries referring to other static libraries, + # unless the dependency has the "hard_dependency" attribute set. For each + # linkable target, add a "dependencies" entry referring to all of the + # target's computed list of link dependencies (including static libraries + # if no such entry is already present. + for target in flat_list: + target_dict = targets[target] + target_type = target_dict['type'] + + if target_type == 'static_library': + if not 'dependencies' in target_dict: + continue + + target_dict['dependencies_original'] = target_dict.get( + 'dependencies', [])[:] + + # A static library should not depend on another static library unless + # the dependency relationship is "hard," which should only be done when + # a dependent relies on some side effect other than just the build + # product, like a rule or action output. Further, if a target has a + # non-hard dependency, but that dependency exports a hard dependency, + # the non-hard dependency can safely be removed, but the exported hard + # dependency must be added to the target to keep the same dependency + # ordering. + dependencies = \ + dependency_nodes[target].DirectAndImportedDependencies(targets) + index = 0 + while index < len(dependencies): + dependency = dependencies[index] + dependency_dict = targets[dependency] + + # Remove every non-hard static library dependency and remove every + # non-static library dependency that isn't a direct dependency. + if (dependency_dict['type'] == 'static_library' and \ + not dependency_dict.get('hard_dependency', False)) or \ + (dependency_dict['type'] != 'static_library' and \ + not dependency in target_dict['dependencies']): + # Take the dependency out of the list, and don't increment index + # because the next dependency to analyze will shift into the index + # formerly occupied by the one being removed. + del dependencies[index] + else: + index = index + 1 + + # Update the dependencies. If the dependencies list is empty, it's not + # needed, so unhook it. + if len(dependencies) > 0: + target_dict['dependencies'] = dependencies + else: + del target_dict['dependencies'] + + elif target_type in linkable_types: + # Get a list of dependency targets that should be linked into this + # target. Add them to the dependencies list if they're not already + # present. + + link_dependencies = \ + dependency_nodes[target].DependenciesToLinkAgainst(targets) + for dependency in link_dependencies: + if dependency == target: + continue + if not 'dependencies' in target_dict: + target_dict['dependencies'] = [] + if not dependency in target_dict['dependencies']: + target_dict['dependencies'].append(dependency) + # Sort the dependencies list in the order from dependents to dependencies. + # e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D. + # Note: flat_list is already sorted in the order from dependencies to + # dependents. + if sort_dependencies and 'dependencies' in target_dict: + target_dict['dependencies'] = [dep for dep in reversed(flat_list) + if dep in target_dict['dependencies']] + + +# Initialize this here to speed up MakePathRelative. +exception_re = re.compile(r'''["']?[-/$<>^]''') + + +def MakePathRelative(to_file, fro_file, item): + # If item is a relative path, it's relative to the build file dict that it's + # coming from. Fix it up to make it relative to the build file dict that + # it's going into. + # Exception: any |item| that begins with these special characters is + # returned without modification. + # / Used when a path is already absolute (shortcut optimization; + # such paths would be returned as absolute anyway) + # $ Used for build environment variables + # - Used for some build environment flags (such as -lapr-1 in a + # "libraries" section) + # < Used for our own variable and command expansions (see ExpandVariables) + # > Used for our own variable and command expansions (see ExpandVariables) + # ^ Used for our own variable and command expansions (see ExpandVariables) + # + # "/' Used when a value is quoted. If these are present, then we + # check the second character instead. + # + if to_file == fro_file or exception_re.match(item): + return item + else: + # TODO(dglazkov) The backslash/forward-slash replacement at the end is a + # temporary measure. This should really be addressed by keeping all paths + # in POSIX until actual project generation. + ret = os.path.normpath(os.path.join( + gyp.common.RelativePath(os.path.dirname(fro_file), + os.path.dirname(to_file)), + item)).replace('\\', '/') + if item[-1] == '/': + ret += '/' + return ret + +def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True): + # Python documentation recommends objects which do not support hash + # set this value to None. Python library objects follow this rule. + is_hashable = lambda val: val.__hash__ + + # If x is hashable, returns whether x is in s. Else returns whether x is in l. + def is_in_set_or_list(x, s, l): + if is_hashable(x): + return x in s + return x in l + + prepend_index = 0 + + # Make membership testing of hashables in |to| (in particular, strings) + # faster. + hashable_to_set = set(x for x in to if is_hashable(x)) + for item in fro: + singleton = False + if type(item) in (str, int): + # The cheap and easy case. + if is_paths: + to_item = MakePathRelative(to_file, fro_file, item) + else: + to_item = item + + if not (type(item) is str and item.startswith('-')): + # Any string that doesn't begin with a "-" is a singleton - it can + # only appear once in a list, to be enforced by the list merge append + # or prepend. + singleton = True + elif type(item) is dict: + # Make a copy of the dictionary, continuing to look for paths to fix. + # The other intelligent aspects of merge processing won't apply because + # item is being merged into an empty dict. + to_item = {} + MergeDicts(to_item, item, to_file, fro_file) + elif type(item) is list: + # Recurse, making a copy of the list. If the list contains any + # descendant dicts, path fixing will occur. Note that here, custom + # values for is_paths and append are dropped; those are only to be + # applied to |to| and |fro|, not sublists of |fro|. append shouldn't + # matter anyway because the new |to_item| list is empty. + to_item = [] + MergeLists(to_item, item, to_file, fro_file) + else: + raise TypeError( + 'Attempt to merge list item of unsupported type ' + \ + item.__class__.__name__) + + if append: + # If appending a singleton that's already in the list, don't append. + # This ensures that the earliest occurrence of the item will stay put. + if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to): + to.append(to_item) + if is_hashable(to_item): + hashable_to_set.add(to_item) + else: + # If prepending a singleton that's already in the list, remove the + # existing instance and proceed with the prepend. This ensures that the + # item appears at the earliest possible position in the list. + while singleton and to_item in to: + to.remove(to_item) + + # Don't just insert everything at index 0. That would prepend the new + # items to the list in reverse order, which would be an unwelcome + # surprise. + to.insert(prepend_index, to_item) + if is_hashable(to_item): + hashable_to_set.add(to_item) + prepend_index = prepend_index + 1 + + +def MergeDicts(to, fro, to_file, fro_file): + # I wanted to name the parameter "from" but it's a Python keyword... + for k, v in fro.iteritems(): + # It would be nice to do "if not k in to: to[k] = v" but that wouldn't give + # copy semantics. Something else may want to merge from the |fro| dict + # later, and having the same dict ref pointed to twice in the tree isn't + # what anyone wants considering that the dicts may subsequently be + # modified. + if k in to: + bad_merge = False + if type(v) in (str, int): + if type(to[k]) not in (str, int): + bad_merge = True + elif type(v) is not type(to[k]): + bad_merge = True + + if bad_merge: + raise TypeError( + 'Attempt to merge dict value of type ' + v.__class__.__name__ + \ + ' into incompatible type ' + to[k].__class__.__name__ + \ + ' for key ' + k) + if type(v) in (str, int): + # Overwrite the existing value, if any. Cheap and easy. + is_path = IsPathSection(k) + if is_path: + to[k] = MakePathRelative(to_file, fro_file, v) + else: + to[k] = v + elif type(v) is dict: + # Recurse, guaranteeing copies will be made of objects that require it. + if not k in to: + to[k] = {} + MergeDicts(to[k], v, to_file, fro_file) + elif type(v) is list: + # Lists in dicts can be merged with different policies, depending on + # how the key in the "from" dict (k, the from-key) is written. + # + # If the from-key has ...the to-list will have this action + # this character appended:... applied when receiving the from-list: + # = replace + # + prepend + # ? set, only if to-list does not yet exist + # (none) append + # + # This logic is list-specific, but since it relies on the associated + # dict key, it's checked in this dict-oriented function. + ext = k[-1] + append = True + if ext == '=': + list_base = k[:-1] + lists_incompatible = [list_base, list_base + '?'] + to[list_base] = [] + elif ext == '+': + list_base = k[:-1] + lists_incompatible = [list_base + '=', list_base + '?'] + append = False + elif ext == '?': + list_base = k[:-1] + lists_incompatible = [list_base, list_base + '=', list_base + '+'] + else: + list_base = k + lists_incompatible = [list_base + '=', list_base + '?'] + + # Some combinations of merge policies appearing together are meaningless. + # It's stupid to replace and append simultaneously, for example. Append + # and prepend are the only policies that can coexist. + for list_incompatible in lists_incompatible: + if list_incompatible in fro: + raise GypError('Incompatible list policies ' + k + ' and ' + + list_incompatible) + + if list_base in to: + if ext == '?': + # If the key ends in "?", the list will only be merged if it doesn't + # already exist. + continue + elif type(to[list_base]) is not list: + # This may not have been checked above if merging in a list with an + # extension character. + raise TypeError( + 'Attempt to merge dict value of type ' + v.__class__.__name__ + \ + ' into incompatible type ' + to[list_base].__class__.__name__ + \ + ' for key ' + list_base + '(' + k + ')') + else: + to[list_base] = [] + + # Call MergeLists, which will make copies of objects that require it. + # MergeLists can recurse back into MergeDicts, although this will be + # to make copies of dicts (with paths fixed), there will be no + # subsequent dict "merging" once entering a list because lists are + # always replaced, appended to, or prepended to. + is_paths = IsPathSection(list_base) + MergeLists(to[list_base], v, to_file, fro_file, is_paths, append) + else: + raise TypeError( + 'Attempt to merge dict value of unsupported type ' + \ + v.__class__.__name__ + ' for key ' + k) + + +def MergeConfigWithInheritance(new_configuration_dict, build_file, + target_dict, configuration, visited): + # Skip if previously visted. + if configuration in visited: + return + + # Look at this configuration. + configuration_dict = target_dict['configurations'][configuration] + + # Merge in parents. + for parent in configuration_dict.get('inherit_from', []): + MergeConfigWithInheritance(new_configuration_dict, build_file, + target_dict, parent, visited + [configuration]) + + # Merge it into the new config. + MergeDicts(new_configuration_dict, configuration_dict, + build_file, build_file) + + # Drop abstract. + if 'abstract' in new_configuration_dict: + del new_configuration_dict['abstract'] + + +def SetUpConfigurations(target, target_dict): + # key_suffixes is a list of key suffixes that might appear on key names. + # These suffixes are handled in conditional evaluations (for =, +, and ?) + # and rules/exclude processing (for ! and /). Keys with these suffixes + # should be treated the same as keys without. + key_suffixes = ['=', '+', '?', '!', '/'] + + build_file = gyp.common.BuildFile(target) + + # Provide a single configuration by default if none exists. + # TODO(mark): Signal an error if default_configurations exists but + # configurations does not. + if not 'configurations' in target_dict: + target_dict['configurations'] = {'Default': {}} + if not 'default_configuration' in target_dict: + concrete = [i for (i, config) in target_dict['configurations'].iteritems() + if not config.get('abstract')] + target_dict['default_configuration'] = sorted(concrete)[0] + + merged_configurations = {} + configs = target_dict['configurations'] + for (configuration, old_configuration_dict) in configs.iteritems(): + # Skip abstract configurations (saves work only). + if old_configuration_dict.get('abstract'): + continue + # Configurations inherit (most) settings from the enclosing target scope. + # Get the inheritance relationship right by making a copy of the target + # dict. + new_configuration_dict = {} + for (key, target_val) in target_dict.iteritems(): + key_ext = key[-1:] + if key_ext in key_suffixes: + key_base = key[:-1] + else: + key_base = key + if not key_base in non_configuration_keys: + new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val) + + # Merge in configuration (with all its parents first). + MergeConfigWithInheritance(new_configuration_dict, build_file, + target_dict, configuration, []) + + merged_configurations[configuration] = new_configuration_dict + + # Put the new configurations back into the target dict as a configuration. + for configuration in merged_configurations.keys(): + target_dict['configurations'][configuration] = ( + merged_configurations[configuration]) + + # Now drop all the abstract ones. + for configuration in target_dict['configurations'].keys(): + old_configuration_dict = target_dict['configurations'][configuration] + if old_configuration_dict.get('abstract'): + del target_dict['configurations'][configuration] + + # Now that all of the target's configurations have been built, go through + # the target dict's keys and remove everything that's been moved into a + # "configurations" section. + delete_keys = [] + for key in target_dict: + key_ext = key[-1:] + if key_ext in key_suffixes: + key_base = key[:-1] + else: + key_base = key + if not key_base in non_configuration_keys: + delete_keys.append(key) + for key in delete_keys: + del target_dict[key] + + # Check the configurations to see if they contain invalid keys. + for configuration in target_dict['configurations'].keys(): + configuration_dict = target_dict['configurations'][configuration] + for key in configuration_dict.keys(): + if key in invalid_configuration_keys: + raise GypError('%s not allowed in the %s configuration, found in ' + 'target %s' % (key, configuration, target)) + + + +def ProcessListFiltersInDict(name, the_dict): + """Process regular expression and exclusion-based filters on lists. + + An exclusion list is in a dict key named with a trailing "!", like + "sources!". Every item in such a list is removed from the associated + main list, which in this example, would be "sources". Removed items are + placed into a "sources_excluded" list in the dict. + + Regular expression (regex) filters are contained in dict keys named with a + trailing "/", such as "sources/" to operate on the "sources" list. Regex + filters in a dict take the form: + 'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'], + ['include', '_mac\\.cc$'] ], + The first filter says to exclude all files ending in _linux.cc, _mac.cc, and + _win.cc. The second filter then includes all files ending in _mac.cc that + are now or were once in the "sources" list. Items matching an "exclude" + filter are subject to the same processing as would occur if they were listed + by name in an exclusion list (ending in "!"). Items matching an "include" + filter are brought back into the main list if previously excluded by an + exclusion list or exclusion regex filter. Subsequent matching "exclude" + patterns can still cause items to be excluded after matching an "include". + """ + + # Look through the dictionary for any lists whose keys end in "!" or "/". + # These are lists that will be treated as exclude lists and regular + # expression-based exclude/include lists. Collect the lists that are + # needed first, looking for the lists that they operate on, and assemble + # then into |lists|. This is done in a separate loop up front, because + # the _included and _excluded keys need to be added to the_dict, and that + # can't be done while iterating through it. + + lists = [] + del_lists = [] + for key, value in the_dict.iteritems(): + operation = key[-1] + if operation != '!' and operation != '/': + continue + + if type(value) is not list: + raise ValueError(name + ' key ' + key + ' must be list, not ' + \ + value.__class__.__name__) + + list_key = key[:-1] + if list_key not in the_dict: + # This happens when there's a list like "sources!" but no corresponding + # "sources" list. Since there's nothing for it to operate on, queue up + # the "sources!" list for deletion now. + del_lists.append(key) + continue + + if type(the_dict[list_key]) is not list: + value = the_dict[list_key] + raise ValueError(name + ' key ' + list_key + \ + ' must be list, not ' + \ + value.__class__.__name__ + ' when applying ' + \ + {'!': 'exclusion', '/': 'regex'}[operation]) + + if not list_key in lists: + lists.append(list_key) + + # Delete the lists that are known to be unneeded at this point. + for del_list in del_lists: + del the_dict[del_list] + + for list_key in lists: + the_list = the_dict[list_key] + + # Initialize the list_actions list, which is parallel to the_list. Each + # item in list_actions identifies whether the corresponding item in + # the_list should be excluded, unconditionally preserved (included), or + # whether no exclusion or inclusion has been applied. Items for which + # no exclusion or inclusion has been applied (yet) have value -1, items + # excluded have value 0, and items included have value 1. Includes and + # excludes override previous actions. All items in list_actions are + # initialized to -1 because no excludes or includes have been processed + # yet. + list_actions = list((-1,) * len(the_list)) + + exclude_key = list_key + '!' + if exclude_key in the_dict: + for exclude_item in the_dict[exclude_key]: + for index in xrange(0, len(the_list)): + if exclude_item == the_list[index]: + # This item matches the exclude_item, so set its action to 0 + # (exclude). + list_actions[index] = 0 + + # The "whatever!" list is no longer needed, dump it. + del the_dict[exclude_key] + + regex_key = list_key + '/' + if regex_key in the_dict: + for regex_item in the_dict[regex_key]: + [action, pattern] = regex_item + pattern_re = re.compile(pattern) + + if action == 'exclude': + # This item matches an exclude regex, so set its value to 0 (exclude). + action_value = 0 + elif action == 'include': + # This item matches an include regex, so set its value to 1 (include). + action_value = 1 + else: + # This is an action that doesn't make any sense. + raise ValueError('Unrecognized action ' + action + ' in ' + name + \ + ' key ' + regex_key) + + for index in xrange(0, len(the_list)): + list_item = the_list[index] + if list_actions[index] == action_value: + # Even if the regex matches, nothing will change so continue (regex + # searches are expensive). + continue + if pattern_re.search(list_item): + # Regular expression match. + list_actions[index] = action_value + + # The "whatever/" list is no longer needed, dump it. + del the_dict[regex_key] + + # Add excluded items to the excluded list. + # + # Note that exclude_key ("sources!") is different from excluded_key + # ("sources_excluded"). The exclude_key list is input and it was already + # processed and deleted; the excluded_key list is output and it's about + # to be created. + excluded_key = list_key + '_excluded' + if excluded_key in the_dict: + raise GypError(name + ' key ' + excluded_key + + ' must not be present prior ' + ' to applying exclusion/regex filters for ' + list_key) + + excluded_list = [] + + # Go backwards through the list_actions list so that as items are deleted, + # the indices of items that haven't been seen yet don't shift. That means + # that things need to be prepended to excluded_list to maintain them in the + # same order that they existed in the_list. + for index in xrange(len(list_actions) - 1, -1, -1): + if list_actions[index] == 0: + # Dump anything with action 0 (exclude). Keep anything with action 1 + # (include) or -1 (no include or exclude seen for the item). + excluded_list.insert(0, the_list[index]) + del the_list[index] + + # If anything was excluded, put the excluded list into the_dict at + # excluded_key. + if len(excluded_list) > 0: + the_dict[excluded_key] = excluded_list + + # Now recurse into subdicts and lists that may contain dicts. + for key, value in the_dict.iteritems(): + if type(value) is dict: + ProcessListFiltersInDict(key, value) + elif type(value) is list: + ProcessListFiltersInList(key, value) + + +def ProcessListFiltersInList(name, the_list): + for item in the_list: + if type(item) is dict: + ProcessListFiltersInDict(name, item) + elif type(item) is list: + ProcessListFiltersInList(name, item) + + +def ValidateTargetType(target, target_dict): + """Ensures the 'type' field on the target is one of the known types. + + Arguments: + target: string, name of target. + target_dict: dict, target spec. + + Raises an exception on error. + """ + VALID_TARGET_TYPES = ('executable', 'loadable_module', + 'static_library', 'shared_library', + 'mac_kernel_extension', 'none', 'windows_driver') + target_type = target_dict.get('type', None) + if target_type not in VALID_TARGET_TYPES: + raise GypError("Target %s has an invalid target type '%s'. " + "Must be one of %s." % + (target, target_type, '/'.join(VALID_TARGET_TYPES))) + if (target_dict.get('standalone_static_library', 0) and + not target_type == 'static_library'): + raise GypError('Target %s has type %s but standalone_static_library flag is' + ' only valid for static_library type.' % (target, + target_type)) + + +def ValidateSourcesInTarget(target, target_dict, build_file, + duplicate_basename_check): + if not duplicate_basename_check: + return + if target_dict.get('type', None) != 'static_library': + return + sources = target_dict.get('sources', []) + basenames = {} + for source in sources: + name, ext = os.path.splitext(source) + is_compiled_file = ext in [ + '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S'] + if not is_compiled_file: + continue + basename = os.path.basename(name) # Don't include extension. + basenames.setdefault(basename, []).append(source) + + error = '' + for basename, files in basenames.iteritems(): + if len(files) > 1: + error += ' %s: %s\n' % (basename, ' '.join(files)) + + if error: + print('static library %s has several files with the same basename:\n' % + target + error + 'libtool on Mac cannot handle that. Use ' + '--no-duplicate-basename-check to disable this validation.') + raise GypError('Duplicate basenames in sources section, see list above') + + +def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules): + """Ensures that the rules sections in target_dict are valid and consistent, + and determines which sources they apply to. + + Arguments: + target: string, name of target. + target_dict: dict, target spec containing "rules" and "sources" lists. + extra_sources_for_rules: a list of keys to scan for rule matches in + addition to 'sources'. + """ + + # Dicts to map between values found in rules' 'rule_name' and 'extension' + # keys and the rule dicts themselves. + rule_names = {} + rule_extensions = {} + + rules = target_dict.get('rules', []) + for rule in rules: + # Make sure that there's no conflict among rule names and extensions. + rule_name = rule['rule_name'] + if rule_name in rule_names: + raise GypError('rule %s exists in duplicate, target %s' % + (rule_name, target)) + rule_names[rule_name] = rule + + rule_extension = rule['extension'] + if rule_extension.startswith('.'): + rule_extension = rule_extension[1:] + if rule_extension in rule_extensions: + raise GypError(('extension %s associated with multiple rules, ' + + 'target %s rules %s and %s') % + (rule_extension, target, + rule_extensions[rule_extension]['rule_name'], + rule_name)) + rule_extensions[rule_extension] = rule + + # Make sure rule_sources isn't already there. It's going to be + # created below if needed. + if 'rule_sources' in rule: + raise GypError( + 'rule_sources must not exist in input, target %s rule %s' % + (target, rule_name)) + + rule_sources = [] + source_keys = ['sources'] + source_keys.extend(extra_sources_for_rules) + for source_key in source_keys: + for source in target_dict.get(source_key, []): + (source_root, source_extension) = os.path.splitext(source) + if source_extension.startswith('.'): + source_extension = source_extension[1:] + if source_extension == rule_extension: + rule_sources.append(source) + + if len(rule_sources) > 0: + rule['rule_sources'] = rule_sources + + +def ValidateRunAsInTarget(target, target_dict, build_file): + target_name = target_dict.get('target_name') + run_as = target_dict.get('run_as') + if not run_as: + return + if type(run_as) is not dict: + raise GypError("The 'run_as' in target %s from file %s should be a " + "dictionary." % + (target_name, build_file)) + action = run_as.get('action') + if not action: + raise GypError("The 'run_as' in target %s from file %s must have an " + "'action' section." % + (target_name, build_file)) + if type(action) is not list: + raise GypError("The 'action' for 'run_as' in target %s from file %s " + "must be a list." % + (target_name, build_file)) + working_directory = run_as.get('working_directory') + if working_directory and type(working_directory) is not str: + raise GypError("The 'working_directory' for 'run_as' in target %s " + "in file %s should be a string." % + (target_name, build_file)) + environment = run_as.get('environment') + if environment and type(environment) is not dict: + raise GypError("The 'environment' for 'run_as' in target %s " + "in file %s should be a dictionary." % + (target_name, build_file)) + + +def ValidateActionsInTarget(target, target_dict, build_file): + '''Validates the inputs to the actions in a target.''' + target_name = target_dict.get('target_name') + actions = target_dict.get('actions', []) + for action in actions: + action_name = action.get('action_name') + if not action_name: + raise GypError("Anonymous action in target %s. " + "An action must have an 'action_name' field." % + target_name) + inputs = action.get('inputs', None) + if inputs is None: + raise GypError('Action in target %s has no inputs.' % target_name) + action_command = action.get('action') + if action_command and not action_command[0]: + raise GypError("Empty action as command in target %s." % target_name) + + +def TurnIntIntoStrInDict(the_dict): + """Given dict the_dict, recursively converts all integers into strings. + """ + # Use items instead of iteritems because there's no need to try to look at + # reinserted keys and their associated values. + for k, v in the_dict.items(): + if type(v) is int: + v = str(v) + the_dict[k] = v + elif type(v) is dict: + TurnIntIntoStrInDict(v) + elif type(v) is list: + TurnIntIntoStrInList(v) + + if type(k) is int: + del the_dict[k] + the_dict[str(k)] = v + + +def TurnIntIntoStrInList(the_list): + """Given list the_list, recursively converts all integers into strings. + """ + for index in xrange(0, len(the_list)): + item = the_list[index] + if type(item) is int: + the_list[index] = str(item) + elif type(item) is dict: + TurnIntIntoStrInDict(item) + elif type(item) is list: + TurnIntIntoStrInList(item) + + +def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets, + data): + """Return only the targets that are deep dependencies of |root_targets|.""" + qualified_root_targets = [] + for target in root_targets: + target = target.strip() + qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list) + if not qualified_targets: + raise GypError("Could not find target %s" % target) + qualified_root_targets.extend(qualified_targets) + + wanted_targets = {} + for target in qualified_root_targets: + wanted_targets[target] = targets[target] + for dependency in dependency_nodes[target].DeepDependencies(): + wanted_targets[dependency] = targets[dependency] + + wanted_flat_list = [t for t in flat_list if t in wanted_targets] + + # Prune unwanted targets from each build_file's data dict. + for build_file in data['target_build_files']: + if not 'targets' in data[build_file]: + continue + new_targets = [] + for target in data[build_file]['targets']: + qualified_name = gyp.common.QualifiedTarget(build_file, + target['target_name'], + target['toolset']) + if qualified_name in wanted_targets: + new_targets.append(target) + data[build_file]['targets'] = new_targets + + return wanted_targets, wanted_flat_list + + +def VerifyNoCollidingTargets(targets): + """Verify that no two targets in the same directory share the same name. + + Arguments: + targets: A list of targets in the form 'path/to/file.gyp:target_name'. + """ + # Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'. + used = {} + for target in targets: + # Separate out 'path/to/file.gyp, 'target_name' from + # 'path/to/file.gyp:target_name'. + path, name = target.rsplit(':', 1) + # Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'. + subdir, gyp = os.path.split(path) + # Use '.' for the current directory '', so that the error messages make + # more sense. + if not subdir: + subdir = '.' + # Prepare a key like 'path/to:target_name'. + key = subdir + ':' + name + if key in used: + # Complain if this target is already used. + raise GypError('Duplicate target name "%s" in directory "%s" used both ' + 'in "%s" and "%s".' % (name, subdir, gyp, used[key])) + used[key] = gyp + + +def SetGeneratorGlobals(generator_input_info): + # Set up path_sections and non_configuration_keys with the default data plus + # the generator-specific data. + global path_sections + path_sections = set(base_path_sections) + path_sections.update(generator_input_info['path_sections']) + + global non_configuration_keys + non_configuration_keys = base_non_configuration_keys[:] + non_configuration_keys.extend(generator_input_info['non_configuration_keys']) + + global multiple_toolsets + multiple_toolsets = generator_input_info[ + 'generator_supports_multiple_toolsets'] + + global generator_filelist_paths + generator_filelist_paths = generator_input_info['generator_filelist_paths'] + + +def Load(build_files, variables, includes, depth, generator_input_info, check, + circular_check, duplicate_basename_check, parallel, root_targets): + SetGeneratorGlobals(generator_input_info) + # A generator can have other lists (in addition to sources) be processed + # for rules. + extra_sources_for_rules = generator_input_info['extra_sources_for_rules'] + + # Load build files. This loads every target-containing build file into + # the |data| dictionary such that the keys to |data| are build file names, + # and the values are the entire build file contents after "early" or "pre" + # processing has been done and includes have been resolved. + # NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as + # well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps + # track of the keys corresponding to "target" files. + data = {'target_build_files': set()} + # Normalize paths everywhere. This is important because paths will be + # used as keys to the data dict and for references between input files. + build_files = set(map(os.path.normpath, build_files)) + if parallel: + LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth, + check, generator_input_info) + else: + aux_data = {} + for build_file in build_files: + try: + LoadTargetBuildFile(build_file, data, aux_data, + variables, includes, depth, check, True) + except Exception, e: + gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file) + raise + + # Build a dict to access each target's subdict by qualified name. + targets = BuildTargetsDict(data) + + # Fully qualify all dependency links. + QualifyDependencies(targets) + + # Remove self-dependencies from targets that have 'prune_self_dependencies' + # set to 1. + RemoveSelfDependencies(targets) + + # Expand dependencies specified as build_file:*. + ExpandWildcardDependencies(targets, data) + + # Remove all dependencies marked as 'link_dependency' from the targets of + # type 'none'. + RemoveLinkDependenciesFromNoneTargets(targets) + + # Apply exclude (!) and regex (/) list filters only for dependency_sections. + for target_name, target_dict in targets.iteritems(): + tmp_dict = {} + for key_base in dependency_sections: + for op in ('', '!', '/'): + key = key_base + op + if key in target_dict: + tmp_dict[key] = target_dict[key] + del target_dict[key] + ProcessListFiltersInDict(target_name, tmp_dict) + # Write the results back to |target_dict|. + for key in tmp_dict: + target_dict[key] = tmp_dict[key] + + # Make sure every dependency appears at most once. + RemoveDuplicateDependencies(targets) + + if circular_check: + # Make sure that any targets in a.gyp don't contain dependencies in other + # .gyp files that further depend on a.gyp. + VerifyNoGYPFileCircularDependencies(targets) + + [dependency_nodes, flat_list] = BuildDependencyList(targets) + + if root_targets: + # Remove, from |targets| and |flat_list|, the targets that are not deep + # dependencies of the targets specified in |root_targets|. + targets, flat_list = PruneUnwantedTargets( + targets, flat_list, dependency_nodes, root_targets, data) + + # Check that no two targets in the same directory have the same name. + VerifyNoCollidingTargets(flat_list) + + # Handle dependent settings of various types. + for settings_type in ['all_dependent_settings', + 'direct_dependent_settings', + 'link_settings']: + DoDependentSettings(settings_type, flat_list, targets, dependency_nodes) + + # Take out the dependent settings now that they've been published to all + # of the targets that require them. + for target in flat_list: + if settings_type in targets[target]: + del targets[target][settings_type] + + # Make sure static libraries don't declare dependencies on other static + # libraries, but that linkables depend on all unlinked static libraries + # that they need so that their link steps will be correct. + gii = generator_input_info + if gii['generator_wants_static_library_dependencies_adjusted']: + AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes, + gii['generator_wants_sorted_dependencies']) + + # Apply "post"/"late"/"target" variable expansions and condition evaluations. + for target in flat_list: + target_dict = targets[target] + build_file = gyp.common.BuildFile(target) + ProcessVariablesAndConditionsInDict( + target_dict, PHASE_LATE, variables, build_file) + + # Move everything that can go into a "configurations" section into one. + for target in flat_list: + target_dict = targets[target] + SetUpConfigurations(target, target_dict) + + # Apply exclude (!) and regex (/) list filters. + for target in flat_list: + target_dict = targets[target] + ProcessListFiltersInDict(target, target_dict) + + # Apply "latelate" variable expansions and condition evaluations. + for target in flat_list: + target_dict = targets[target] + build_file = gyp.common.BuildFile(target) + ProcessVariablesAndConditionsInDict( + target_dict, PHASE_LATELATE, variables, build_file) + + # Make sure that the rules make sense, and build up rule_sources lists as + # needed. Not all generators will need to use the rule_sources lists, but + # some may, and it seems best to build the list in a common spot. + # Also validate actions and run_as elements in targets. + for target in flat_list: + target_dict = targets[target] + build_file = gyp.common.BuildFile(target) + ValidateTargetType(target, target_dict) + ValidateSourcesInTarget(target, target_dict, build_file, + duplicate_basename_check) + ValidateRulesInTarget(target, target_dict, extra_sources_for_rules) + ValidateRunAsInTarget(target, target_dict, build_file) + ValidateActionsInTarget(target, target_dict, build_file) + + # Generators might not expect ints. Turn them into strs. + TurnIntIntoStrInDict(data) + + # TODO(mark): Return |data| for now because the generator needs a list of + # build files that came in. In the future, maybe it should just accept + # a list, and not the whole data dict. + return [flat_list, targets, data] diff --git a/deps/libuv/build/gyp/pylib/gyp/input_test.py b/deps/libuv/build/gyp/pylib/gyp/input_test.py new file mode 100755 index 00000000..4234fbb8 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/input_test.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python + +# Copyright 2013 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Unit tests for the input.py file.""" + +import gyp.input +import unittest +import sys + + +class TestFindCycles(unittest.TestCase): + def setUp(self): + self.nodes = {} + for x in ('a', 'b', 'c', 'd', 'e'): + self.nodes[x] = gyp.input.DependencyGraphNode(x) + + def _create_dependency(self, dependent, dependency): + dependent.dependencies.append(dependency) + dependency.dependents.append(dependent) + + def test_no_cycle_empty_graph(self): + for label, node in self.nodes.iteritems(): + self.assertEquals([], node.FindCycles()) + + def test_no_cycle_line(self): + self._create_dependency(self.nodes['a'], self.nodes['b']) + self._create_dependency(self.nodes['b'], self.nodes['c']) + self._create_dependency(self.nodes['c'], self.nodes['d']) + + for label, node in self.nodes.iteritems(): + self.assertEquals([], node.FindCycles()) + + def test_no_cycle_dag(self): + self._create_dependency(self.nodes['a'], self.nodes['b']) + self._create_dependency(self.nodes['a'], self.nodes['c']) + self._create_dependency(self.nodes['b'], self.nodes['c']) + + for label, node in self.nodes.iteritems(): + self.assertEquals([], node.FindCycles()) + + def test_cycle_self_reference(self): + self._create_dependency(self.nodes['a'], self.nodes['a']) + + self.assertEquals([[self.nodes['a'], self.nodes['a']]], + self.nodes['a'].FindCycles()) + + def test_cycle_two_nodes(self): + self._create_dependency(self.nodes['a'], self.nodes['b']) + self._create_dependency(self.nodes['b'], self.nodes['a']) + + self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]], + self.nodes['a'].FindCycles()) + self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]], + self.nodes['b'].FindCycles()) + + def test_two_cycles(self): + self._create_dependency(self.nodes['a'], self.nodes['b']) + self._create_dependency(self.nodes['b'], self.nodes['a']) + + self._create_dependency(self.nodes['b'], self.nodes['c']) + self._create_dependency(self.nodes['c'], self.nodes['b']) + + cycles = self.nodes['a'].FindCycles() + self.assertTrue( + [self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles) + self.assertTrue( + [self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles) + self.assertEquals(2, len(cycles)) + + def test_big_cycle(self): + self._create_dependency(self.nodes['a'], self.nodes['b']) + self._create_dependency(self.nodes['b'], self.nodes['c']) + self._create_dependency(self.nodes['c'], self.nodes['d']) + self._create_dependency(self.nodes['d'], self.nodes['e']) + self._create_dependency(self.nodes['e'], self.nodes['a']) + + self.assertEquals([[self.nodes['a'], + self.nodes['b'], + self.nodes['c'], + self.nodes['d'], + self.nodes['e'], + self.nodes['a']]], + self.nodes['a'].FindCycles()) + + +if __name__ == '__main__': + unittest.main() diff --git a/deps/libuv/build/gyp/pylib/gyp/mac_tool.py b/deps/libuv/build/gyp/pylib/gyp/mac_tool.py new file mode 100755 index 00000000..b0363cc3 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/mac_tool.py @@ -0,0 +1,712 @@ +#!/usr/bin/env python +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Utility functions to perform Xcode-style build steps. + +These functions are executed via gyp-mac-tool when using the Makefile generator. +""" + +import fcntl +import fnmatch +import glob +import json +import os +import plistlib +import re +import shutil +import string +import struct +import subprocess +import sys +import tempfile + + +def main(args): + executor = MacTool() + exit_code = executor.Dispatch(args) + if exit_code is not None: + sys.exit(exit_code) + + +class MacTool(object): + """This class performs all the Mac tooling steps. The methods can either be + executed directly, or dispatched from an argument list.""" + + def Dispatch(self, args): + """Dispatches a string command to a method.""" + if len(args) < 1: + raise Exception("Not enough arguments") + + method = "Exec%s" % self._CommandifyName(args[0]) + return getattr(self, method)(*args[1:]) + + def _CommandifyName(self, name_string): + """Transforms a tool name like copy-info-plist to CopyInfoPlist""" + return name_string.title().replace('-', '') + + def ExecCopyBundleResource(self, source, dest, convert_to_binary): + """Copies a resource file to the bundle/Resources directory, performing any + necessary compilation on each resource.""" + convert_to_binary = convert_to_binary == 'True' + extension = os.path.splitext(source)[1].lower() + if os.path.isdir(source): + # Copy tree. + # TODO(thakis): This copies file attributes like mtime, while the + # single-file branch below doesn't. This should probably be changed to + # be consistent with the single-file branch. + if os.path.exists(dest): + shutil.rmtree(dest) + shutil.copytree(source, dest) + elif extension == '.xib': + return self._CopyXIBFile(source, dest) + elif extension == '.storyboard': + return self._CopyXIBFile(source, dest) + elif extension == '.strings' and not convert_to_binary: + self._CopyStringsFile(source, dest) + else: + if os.path.exists(dest): + os.unlink(dest) + shutil.copy(source, dest) + + if convert_to_binary and extension in ('.plist', '.strings'): + self._ConvertToBinary(dest) + + def _CopyXIBFile(self, source, dest): + """Compiles a XIB file with ibtool into a binary plist in the bundle.""" + + # ibtool sometimes crashes with relative paths. See crbug.com/314728. + base = os.path.dirname(os.path.realpath(__file__)) + if os.path.relpath(source): + source = os.path.join(base, source) + if os.path.relpath(dest): + dest = os.path.join(base, dest) + + args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices'] + + if os.environ['XCODE_VERSION_ACTUAL'] > '0700': + args.extend(['--auto-activate-custom-fonts']) + if 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ: + args.extend([ + '--target-device', 'iphone', '--target-device', 'ipad', + '--minimum-deployment-target', + os.environ['IPHONEOS_DEPLOYMENT_TARGET'], + ]) + else: + args.extend([ + '--target-device', 'mac', + '--minimum-deployment-target', + os.environ['MACOSX_DEPLOYMENT_TARGET'], + ]) + + args.extend(['--output-format', 'human-readable-text', '--compile', dest, + source]) + + ibtool_section_re = re.compile(r'/\*.*\*/') + ibtool_re = re.compile(r'.*note:.*is clipping its content') + ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE) + current_section_header = None + for line in ibtoolout.stdout: + if ibtool_section_re.match(line): + current_section_header = line + elif not ibtool_re.match(line): + if current_section_header: + sys.stdout.write(current_section_header) + current_section_header = None + sys.stdout.write(line) + return ibtoolout.returncode + + def _ConvertToBinary(self, dest): + subprocess.check_call([ + 'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest]) + + def _CopyStringsFile(self, source, dest): + """Copies a .strings file using iconv to reconvert the input into UTF-16.""" + input_code = self._DetectInputEncoding(source) or "UTF-8" + + # Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call + # CFPropertyListCreateFromXMLData() behind the scenes; at least it prints + # CFPropertyListCreateFromXMLData(): Old-style plist parser: missing + # semicolon in dictionary. + # on invalid files. Do the same kind of validation. + import CoreFoundation + s = open(source, 'rb').read() + d = CoreFoundation.CFDataCreate(None, s, len(s)) + _, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None) + if error: + return + + fp = open(dest, 'wb') + fp.write(s.decode(input_code).encode('UTF-16')) + fp.close() + + def _DetectInputEncoding(self, file_name): + """Reads the first few bytes from file_name and tries to guess the text + encoding. Returns None as a guess if it can't detect it.""" + fp = open(file_name, 'rb') + try: + header = fp.read(3) + except: + fp.close() + return None + fp.close() + if header.startswith("\xFE\xFF"): + return "UTF-16" + elif header.startswith("\xFF\xFE"): + return "UTF-16" + elif header.startswith("\xEF\xBB\xBF"): + return "UTF-8" + else: + return None + + def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys): + """Copies the |source| Info.plist to the destination directory |dest|.""" + # Read the source Info.plist into memory. + fd = open(source, 'r') + lines = fd.read() + fd.close() + + # Insert synthesized key/value pairs (e.g. BuildMachineOSBuild). + plist = plistlib.readPlistFromString(lines) + if keys: + plist = dict(plist.items() + json.loads(keys[0]).items()) + lines = plistlib.writePlistToString(plist) + + # Go through all the environment variables and replace them as variables in + # the file. + IDENT_RE = re.compile(r'[_/\s]') + for key in os.environ: + if key.startswith('_'): + continue + evar = '${%s}' % key + evalue = os.environ[key] + lines = string.replace(lines, evar, evalue) + + # Xcode supports various suffices on environment variables, which are + # all undocumented. :rfc1034identifier is used in the standard project + # template these days, and :identifier was used earlier. They are used to + # convert non-url characters into things that look like valid urls -- + # except that the replacement character for :identifier, '_' isn't valid + # in a URL either -- oops, hence :rfc1034identifier was born. + evar = '${%s:identifier}' % key + evalue = IDENT_RE.sub('_', os.environ[key]) + lines = string.replace(lines, evar, evalue) + + evar = '${%s:rfc1034identifier}' % key + evalue = IDENT_RE.sub('-', os.environ[key]) + lines = string.replace(lines, evar, evalue) + + # Remove any keys with values that haven't been replaced. + lines = lines.split('\n') + for i in range(len(lines)): + if lines[i].strip().startswith("${"): + lines[i] = None + lines[i - 1] = None + lines = '\n'.join(filter(lambda x: x is not None, lines)) + + # Write out the file with variables replaced. + fd = open(dest, 'w') + fd.write(lines) + fd.close() + + # Now write out PkgInfo file now that the Info.plist file has been + # "compiled". + self._WritePkgInfo(dest) + + if convert_to_binary == 'True': + self._ConvertToBinary(dest) + + def _WritePkgInfo(self, info_plist): + """This writes the PkgInfo file from the data stored in Info.plist.""" + plist = plistlib.readPlist(info_plist) + if not plist: + return + + # Only create PkgInfo for executable types. + package_type = plist['CFBundlePackageType'] + if package_type != 'APPL': + return + + # The format of PkgInfo is eight characters, representing the bundle type + # and bundle signature, each four characters. If that is missing, four + # '?' characters are used instead. + signature_code = plist.get('CFBundleSignature', '????') + if len(signature_code) != 4: # Wrong length resets everything, too. + signature_code = '?' * 4 + + dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo') + fp = open(dest, 'w') + fp.write('%s%s' % (package_type, signature_code)) + fp.close() + + def ExecFlock(self, lockfile, *cmd_list): + """Emulates the most basic behavior of Linux's flock(1).""" + # Rely on exception handling to report errors. + fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666) + fcntl.flock(fd, fcntl.LOCK_EX) + return subprocess.call(cmd_list) + + def ExecFilterLibtool(self, *cmd_list): + """Calls libtool and filters out '/path/to/libtool: file: foo.o has no + symbols'.""" + libtool_re = re.compile(r'^.*libtool: (?:for architecture: \S* )?' + r'file: .* has no symbols$') + libtool_re5 = re.compile( + r'^.*libtool: warning for library: ' + + r'.* the table of contents is empty ' + + r'\(no object file members in the library define global symbols\)$') + env = os.environ.copy() + # Ref: + # http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c + # The problem with this flag is that it resets the file mtime on the file to + # epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone. + env['ZERO_AR_DATE'] = '1' + libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env) + _, err = libtoolout.communicate() + for line in err.splitlines(): + if not libtool_re.match(line) and not libtool_re5.match(line): + print >>sys.stderr, line + # Unconditionally touch the output .a file on the command line if present + # and the command succeeded. A bit hacky. + if not libtoolout.returncode: + for i in range(len(cmd_list) - 1): + if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'): + os.utime(cmd_list[i+1], None) + break + return libtoolout.returncode + + def ExecPackageIosFramework(self, framework): + # Find the name of the binary based on the part before the ".framework". + binary = os.path.basename(framework).split('.')[0] + module_path = os.path.join(framework, 'Modules'); + if not os.path.exists(module_path): + os.mkdir(module_path) + module_template = 'framework module %s {\n' \ + ' umbrella header "%s.h"\n' \ + '\n' \ + ' export *\n' \ + ' module * { export * }\n' \ + '}\n' % (binary, binary) + + module_file = open(os.path.join(module_path, 'module.modulemap'), "w") + module_file.write(module_template) + module_file.close() + + def ExecPackageFramework(self, framework, version): + """Takes a path to Something.framework and the Current version of that and + sets up all the symlinks.""" + # Find the name of the binary based on the part before the ".framework". + binary = os.path.basename(framework).split('.')[0] + + CURRENT = 'Current' + RESOURCES = 'Resources' + VERSIONS = 'Versions' + + if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)): + # Binary-less frameworks don't seem to contain symlinks (see e.g. + # chromium's out/Debug/org.chromium.Chromium.manifest/ bundle). + return + + # Move into the framework directory to set the symlinks correctly. + pwd = os.getcwd() + os.chdir(framework) + + # Set up the Current version. + self._Relink(version, os.path.join(VERSIONS, CURRENT)) + + # Set up the root symlinks. + self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary) + self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES) + + # Back to where we were before! + os.chdir(pwd) + + def _Relink(self, dest, link): + """Creates a symlink to |dest| named |link|. If |link| already exists, + it is overwritten.""" + if os.path.lexists(link): + os.remove(link) + os.symlink(dest, link) + + def ExecCompileIosFrameworkHeaderMap(self, out, framework, *all_headers): + framework_name = os.path.basename(framework).split('.')[0] + all_headers = map(os.path.abspath, all_headers) + filelist = {} + for header in all_headers: + filename = os.path.basename(header) + filelist[filename] = header + filelist[os.path.join(framework_name, filename)] = header + WriteHmap(out, filelist) + + def ExecCopyIosFrameworkHeaders(self, framework, *copy_headers): + header_path = os.path.join(framework, 'Headers'); + if not os.path.exists(header_path): + os.makedirs(header_path) + for header in copy_headers: + shutil.copy(header, os.path.join(header_path, os.path.basename(header))) + + def ExecCompileXcassets(self, keys, *inputs): + """Compiles multiple .xcassets files into a single .car file. + + This invokes 'actool' to compile all the inputs .xcassets files. The + |keys| arguments is a json-encoded dictionary of extra arguments to + pass to 'actool' when the asset catalogs contains an application icon + or a launch image. + + Note that 'actool' does not create the Assets.car file if the asset + catalogs does not contains imageset. + """ + command_line = [ + 'xcrun', 'actool', '--output-format', 'human-readable-text', + '--compress-pngs', '--notices', '--warnings', '--errors', + ] + is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ + if is_iphone_target: + platform = os.environ['CONFIGURATION'].split('-')[-1] + if platform not in ('iphoneos', 'iphonesimulator'): + platform = 'iphonesimulator' + command_line.extend([ + '--platform', platform, '--target-device', 'iphone', + '--target-device', 'ipad', '--minimum-deployment-target', + os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile', + os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']), + ]) + else: + command_line.extend([ + '--platform', 'macosx', '--target-device', 'mac', + '--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'], + '--compile', + os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']), + ]) + if keys: + keys = json.loads(keys) + for key, value in keys.iteritems(): + arg_name = '--' + key + if isinstance(value, bool): + if value: + command_line.append(arg_name) + elif isinstance(value, list): + for v in value: + command_line.append(arg_name) + command_line.append(str(v)) + else: + command_line.append(arg_name) + command_line.append(str(value)) + # Note: actool crashes if inputs path are relative, so use os.path.abspath + # to get absolute path name for inputs. + command_line.extend(map(os.path.abspath, inputs)) + subprocess.check_call(command_line) + + def ExecMergeInfoPlist(self, output, *inputs): + """Merge multiple .plist files into a single .plist file.""" + merged_plist = {} + for path in inputs: + plist = self._LoadPlistMaybeBinary(path) + self._MergePlist(merged_plist, plist) + plistlib.writePlist(merged_plist, output) + + def ExecCodeSignBundle(self, key, entitlements, provisioning, path, preserve): + """Code sign a bundle. + + This function tries to code sign an iOS bundle, following the same + algorithm as Xcode: + 1. pick the provisioning profile that best match the bundle identifier, + and copy it into the bundle as embedded.mobileprovision, + 2. copy Entitlements.plist from user or SDK next to the bundle, + 3. code sign the bundle. + """ + substitutions, overrides = self._InstallProvisioningProfile( + provisioning, self._GetCFBundleIdentifier()) + entitlements_path = self._InstallEntitlements( + entitlements, substitutions, overrides) + + args = ['codesign', '--force', '--sign', key] + if preserve == 'True': + args.extend(['--deep', '--preserve-metadata=identifier,entitlements']) + else: + args.extend(['--entitlements', entitlements_path]) + args.extend(['--timestamp=none', path]) + subprocess.check_call(args) + + def _InstallProvisioningProfile(self, profile, bundle_identifier): + """Installs embedded.mobileprovision into the bundle. + + Args: + profile: string, optional, short name of the .mobileprovision file + to use, if empty or the file is missing, the best file installed + will be used + bundle_identifier: string, value of CFBundleIdentifier from Info.plist + + Returns: + A tuple containing two dictionary: variables substitutions and values + to overrides when generating the entitlements file. + """ + source_path, provisioning_data, team_id = self._FindProvisioningProfile( + profile, bundle_identifier) + target_path = os.path.join( + os.environ['BUILT_PRODUCTS_DIR'], + os.environ['CONTENTS_FOLDER_PATH'], + 'embedded.mobileprovision') + shutil.copy2(source_path, target_path) + substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.') + return substitutions, provisioning_data['Entitlements'] + + def _FindProvisioningProfile(self, profile, bundle_identifier): + """Finds the .mobileprovision file to use for signing the bundle. + + Checks all the installed provisioning profiles (or if the user specified + the PROVISIONING_PROFILE variable, only consult it) and select the most + specific that correspond to the bundle identifier. + + Args: + profile: string, optional, short name of the .mobileprovision file + to use, if empty or the file is missing, the best file installed + will be used + bundle_identifier: string, value of CFBundleIdentifier from Info.plist + + Returns: + A tuple of the path to the selected provisioning profile, the data of + the embedded plist in the provisioning profile and the team identifier + to use for code signing. + + Raises: + SystemExit: if no .mobileprovision can be used to sign the bundle. + """ + profiles_dir = os.path.join( + os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles') + if not os.path.isdir(profiles_dir): + print >>sys.stderr, ( + 'cannot find mobile provisioning for %s' % bundle_identifier) + sys.exit(1) + provisioning_profiles = None + if profile: + profile_path = os.path.join(profiles_dir, profile + '.mobileprovision') + if os.path.exists(profile_path): + provisioning_profiles = [profile_path] + if not provisioning_profiles: + provisioning_profiles = glob.glob( + os.path.join(profiles_dir, '*.mobileprovision')) + valid_provisioning_profiles = {} + for profile_path in provisioning_profiles: + profile_data = self._LoadProvisioningProfile(profile_path) + app_id_pattern = profile_data.get( + 'Entitlements', {}).get('application-identifier', '') + for team_identifier in profile_data.get('TeamIdentifier', []): + app_id = '%s.%s' % (team_identifier, bundle_identifier) + if fnmatch.fnmatch(app_id, app_id_pattern): + valid_provisioning_profiles[app_id_pattern] = ( + profile_path, profile_data, team_identifier) + if not valid_provisioning_profiles: + print >>sys.stderr, ( + 'cannot find mobile provisioning for %s' % bundle_identifier) + sys.exit(1) + # If the user has multiple provisioning profiles installed that can be + # used for ${bundle_identifier}, pick the most specific one (ie. the + # provisioning profile whose pattern is the longest). + selected_key = max(valid_provisioning_profiles, key=lambda v: len(v)) + return valid_provisioning_profiles[selected_key] + + def _LoadProvisioningProfile(self, profile_path): + """Extracts the plist embedded in a provisioning profile. + + Args: + profile_path: string, path to the .mobileprovision file + + Returns: + Content of the plist embedded in the provisioning profile as a dictionary. + """ + with tempfile.NamedTemporaryFile() as temp: + subprocess.check_call([ + 'security', 'cms', '-D', '-i', profile_path, '-o', temp.name]) + return self._LoadPlistMaybeBinary(temp.name) + + def _MergePlist(self, merged_plist, plist): + """Merge |plist| into |merged_plist|.""" + for key, value in plist.iteritems(): + if isinstance(value, dict): + merged_value = merged_plist.get(key, {}) + if isinstance(merged_value, dict): + self._MergePlist(merged_value, value) + merged_plist[key] = merged_value + else: + merged_plist[key] = value + else: + merged_plist[key] = value + + def _LoadPlistMaybeBinary(self, plist_path): + """Loads into a memory a plist possibly encoded in binary format. + + This is a wrapper around plistlib.readPlist that tries to convert the + plist to the XML format if it can't be parsed (assuming that it is in + the binary format). + + Args: + plist_path: string, path to a plist file, in XML or binary format + + Returns: + Content of the plist as a dictionary. + """ + try: + # First, try to read the file using plistlib that only supports XML, + # and if an exception is raised, convert a temporary copy to XML and + # load that copy. + return plistlib.readPlist(plist_path) + except: + pass + with tempfile.NamedTemporaryFile() as temp: + shutil.copy2(plist_path, temp.name) + subprocess.check_call(['plutil', '-convert', 'xml1', temp.name]) + return plistlib.readPlist(temp.name) + + def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix): + """Constructs a dictionary of variable substitutions for Entitlements.plist. + + Args: + bundle_identifier: string, value of CFBundleIdentifier from Info.plist + app_identifier_prefix: string, value for AppIdentifierPrefix + + Returns: + Dictionary of substitutions to apply when generating Entitlements.plist. + """ + return { + 'CFBundleIdentifier': bundle_identifier, + 'AppIdentifierPrefix': app_identifier_prefix, + } + + def _GetCFBundleIdentifier(self): + """Extracts CFBundleIdentifier value from Info.plist in the bundle. + + Returns: + Value of CFBundleIdentifier in the Info.plist located in the bundle. + """ + info_plist_path = os.path.join( + os.environ['TARGET_BUILD_DIR'], + os.environ['INFOPLIST_PATH']) + info_plist_data = self._LoadPlistMaybeBinary(info_plist_path) + return info_plist_data['CFBundleIdentifier'] + + def _InstallEntitlements(self, entitlements, substitutions, overrides): + """Generates and install the ${BundleName}.xcent entitlements file. + + Expands variables "$(variable)" pattern in the source entitlements file, + add extra entitlements defined in the .mobileprovision file and the copy + the generated plist to "${BundlePath}.xcent". + + Args: + entitlements: string, optional, path to the Entitlements.plist template + to use, defaults to "${SDKROOT}/Entitlements.plist" + substitutions: dictionary, variable substitutions + overrides: dictionary, values to add to the entitlements + + Returns: + Path to the generated entitlements file. + """ + source_path = entitlements + target_path = os.path.join( + os.environ['BUILT_PRODUCTS_DIR'], + os.environ['PRODUCT_NAME'] + '.xcent') + if not source_path: + source_path = os.path.join( + os.environ['SDKROOT'], + 'Entitlements.plist') + shutil.copy2(source_path, target_path) + data = self._LoadPlistMaybeBinary(target_path) + data = self._ExpandVariables(data, substitutions) + if overrides: + for key in overrides: + if key not in data: + data[key] = overrides[key] + plistlib.writePlist(data, target_path) + return target_path + + def _ExpandVariables(self, data, substitutions): + """Expands variables "$(variable)" in data. + + Args: + data: object, can be either string, list or dictionary + substitutions: dictionary, variable substitutions to perform + + Returns: + Copy of data where each references to "$(variable)" has been replaced + by the corresponding value found in substitutions, or left intact if + the key was not found. + """ + if isinstance(data, str): + for key, value in substitutions.iteritems(): + data = data.replace('$(%s)' % key, value) + return data + if isinstance(data, list): + return [self._ExpandVariables(v, substitutions) for v in data] + if isinstance(data, dict): + return {k: self._ExpandVariables(data[k], substitutions) for k in data} + return data + +def NextGreaterPowerOf2(x): + return 2**(x).bit_length() + +def WriteHmap(output_name, filelist): + """Generates a header map based on |filelist|. + + Per Mark Mentovai: + A header map is structured essentially as a hash table, keyed by names used + in #includes, and providing pathnames to the actual files. + + The implementation below and the comment above comes from inspecting: + http://www.opensource.apple.com/source/distcc/distcc-2503/distcc_dist/include_server/headermap.py?txt + while also looking at the implementation in clang in: + https://llvm.org/svn/llvm-project/cfe/trunk/lib/Lex/HeaderMap.cpp + """ + magic = 1751998832 + version = 1 + _reserved = 0 + count = len(filelist) + capacity = NextGreaterPowerOf2(count) + strings_offset = 24 + (12 * capacity) + max_value_length = len(max(filelist.items(), key=lambda (k,v):len(v))[1]) + + out = open(output_name, "wb") + out.write(struct.pack(' 1: + raise Exception("Multiple .def files") + return None + + def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path): + """.def files get implicitly converted to a ModuleDefinitionFile for the + linker in the VS generator. Emulate that behaviour here.""" + def_file = self.GetDefFile(gyp_to_build_path) + if def_file: + ldflags.append('/DEF:"%s"' % def_file) + + def GetPGDName(self, config, expand_special): + """Gets the explicitly overridden pgd name for a target or returns None + if it's not overridden.""" + config = self._TargetConfig(config) + output_file = self._Setting( + ('VCLinkerTool', 'ProfileGuidedDatabase'), config) + if output_file: + output_file = expand_special(self.ConvertVSMacros( + output_file, config=config)) + return output_file + + def GetLdflags(self, config, gyp_to_build_path, expand_special, + manifest_base_name, output_name, is_executable, build_dir): + """Returns the flags that need to be added to link commands, and the + manifest files.""" + config = self._TargetConfig(config) + ldflags = [] + ld = self._GetWrapper(self, self.msvs_settings[config], + 'VCLinkerTool', append=ldflags) + self._GetDefFileAsLdflags(ldflags, gyp_to_build_path) + ld('GenerateDebugInformation', map={'true': '/DEBUG'}) + ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'}, + prefix='/MACHINE:') + ldflags.extend(self._GetAdditionalLibraryDirectories( + 'VCLinkerTool', config, gyp_to_build_path)) + ld('DelayLoadDLLs', prefix='/DELAYLOAD:') + ld('TreatLinkerWarningAsErrors', prefix='/WX', + map={'true': '', 'false': ':NO'}) + out = self.GetOutputName(config, expand_special) + if out: + ldflags.append('/OUT:' + out) + pdb = self.GetPDBName(config, expand_special, output_name + '.pdb') + if pdb: + ldflags.append('/PDB:' + pdb) + pgd = self.GetPGDName(config, expand_special) + if pgd: + ldflags.append('/PGD:' + pgd) + map_file = self.GetMapFileName(config, expand_special) + ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file + else '/MAP'}) + ld('MapExports', map={'true': '/MAPINFO:EXPORTS'}) + ld('AdditionalOptions', prefix='') + + minimum_required_version = self._Setting( + ('VCLinkerTool', 'MinimumRequiredVersion'), config, default='') + if minimum_required_version: + minimum_required_version = ',' + minimum_required_version + ld('SubSystem', + map={'1': 'CONSOLE%s' % minimum_required_version, + '2': 'WINDOWS%s' % minimum_required_version}, + prefix='/SUBSYSTEM:') + + stack_reserve_size = self._Setting( + ('VCLinkerTool', 'StackReserveSize'), config, default='') + if stack_reserve_size: + stack_commit_size = self._Setting( + ('VCLinkerTool', 'StackCommitSize'), config, default='') + if stack_commit_size: + stack_commit_size = ',' + stack_commit_size + ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size)) + + ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE') + ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL') + ld('BaseAddress', prefix='/BASE:') + ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED') + ld('RandomizedBaseAddress', + map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE') + ld('DataExecutionPrevention', + map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT') + ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:') + ld('ForceSymbolReferences', prefix='/INCLUDE:') + ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:') + ld('LinkTimeCodeGeneration', + map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE', + '4': ':PGUPDATE'}, + prefix='/LTCG') + ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:') + ld('ResourceOnlyDLL', map={'true': '/NOENTRY'}) + ld('EntryPointSymbol', prefix='/ENTRY:') + ld('Profile', map={'true': '/PROFILE'}) + ld('LargeAddressAware', + map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE') + # TODO(scottmg): This should sort of be somewhere else (not really a flag). + ld('AdditionalDependencies', prefix='') + + if self.GetArch(config) == 'x86': + safeseh_default = 'true' + else: + safeseh_default = None + ld('ImageHasSafeExceptionHandlers', + map={'false': ':NO', 'true': ''}, prefix='/SAFESEH', + default=safeseh_default) + + # If the base address is not specifically controlled, DYNAMICBASE should + # be on by default. + base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED', + ldflags) + if not base_flags: + ldflags.append('/DYNAMICBASE') + + # If the NXCOMPAT flag has not been specified, default to on. Despite the + # documentation that says this only defaults to on when the subsystem is + # Vista or greater (which applies to the linker), the IDE defaults it on + # unless it's explicitly off. + if not filter(lambda x: 'NXCOMPAT' in x, ldflags): + ldflags.append('/NXCOMPAT') + + have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags) + manifest_flags, intermediate_manifest, manifest_files = \ + self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path, + is_executable and not have_def_file, build_dir) + ldflags.extend(manifest_flags) + return ldflags, intermediate_manifest, manifest_files + + def _GetLdManifestFlags(self, config, name, gyp_to_build_path, + allow_isolation, build_dir): + """Returns a 3-tuple: + - the set of flags that need to be added to the link to generate + a default manifest + - the intermediate manifest that the linker will generate that should be + used to assert it doesn't add anything to the merged one. + - the list of all the manifest files to be merged by the manifest tool and + included into the link.""" + generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'), + config, + default='true') + if generate_manifest != 'true': + # This means not only that the linker should not generate the intermediate + # manifest but also that the manifest tool should do nothing even when + # additional manifests are specified. + return ['/MANIFEST:NO'], [], [] + + output_name = name + '.intermediate.manifest' + flags = [ + '/MANIFEST', + '/ManifestFile:' + output_name, + ] + + # Instead of using the MANIFESTUAC flags, we generate a .manifest to + # include into the list of manifests. This allows us to avoid the need to + # do two passes during linking. The /MANIFEST flag and /ManifestFile are + # still used, and the intermediate manifest is used to assert that the + # final manifest we get from merging all the additional manifest files + # (plus the one we generate here) isn't modified by merging the + # intermediate into it. + + # Always NO, because we generate a manifest file that has what we want. + flags.append('/MANIFESTUAC:NO') + + config = self._TargetConfig(config) + enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config, + default='true') + manifest_files = [] + generated_manifest_outer = \ +"" \ +"%s" \ +"" + if enable_uac == 'true': + execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'), + config, default='0') + execution_level_map = { + '0': 'asInvoker', + '1': 'highestAvailable', + '2': 'requireAdministrator' + } + + ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config, + default='false') + + inner = ''' + + + + + + +''' % (execution_level_map[execution_level], ui_access) + else: + inner = '' + + generated_manifest_contents = generated_manifest_outer % inner + generated_name = name + '.generated.manifest' + # Need to join with the build_dir here as we're writing it during + # generation time, but we return the un-joined version because the build + # will occur in that directory. We only write the file if the contents + # have changed so that simply regenerating the project files doesn't + # cause a relink. + build_dir_generated_name = os.path.join(build_dir, generated_name) + gyp.common.EnsureDirExists(build_dir_generated_name) + f = gyp.common.WriteOnDiff(build_dir_generated_name) + f.write(generated_manifest_contents) + f.close() + manifest_files = [generated_name] + + if allow_isolation: + flags.append('/ALLOWISOLATION') + + manifest_files += self._GetAdditionalManifestFiles(config, + gyp_to_build_path) + return flags, output_name, manifest_files + + def _GetAdditionalManifestFiles(self, config, gyp_to_build_path): + """Gets additional manifest files that are added to the default one + generated by the linker.""" + files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config, + default=[]) + if isinstance(files, str): + files = files.split(';') + return [os.path.normpath( + gyp_to_build_path(self.ConvertVSMacros(f, config=config))) + for f in files] + + def IsUseLibraryDependencyInputs(self, config): + """Returns whether the target should be linked via Use Library Dependency + Inputs (using component .objs of a given .lib).""" + config = self._TargetConfig(config) + uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config) + return uldi == 'true' + + def IsEmbedManifest(self, config): + """Returns whether manifest should be linked into binary.""" + config = self._TargetConfig(config) + embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config, + default='true') + return embed == 'true' + + def IsLinkIncremental(self, config): + """Returns whether the target should be linked incrementally.""" + config = self._TargetConfig(config) + link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config) + return link_inc != '1' + + def GetRcflags(self, config, gyp_to_ninja_path): + """Returns the flags that need to be added to invocations of the resource + compiler.""" + config = self._TargetConfig(config) + rcflags = [] + rc = self._GetWrapper(self, self.msvs_settings[config], + 'VCResourceCompilerTool', append=rcflags) + rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I') + rcflags.append('/I' + gyp_to_ninja_path('.')) + rc('PreprocessorDefinitions', prefix='/d') + # /l arg must be in hex without leading '0x' + rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:]) + return rcflags + + def BuildCygwinBashCommandLine(self, args, path_to_base): + """Build a command line that runs args via cygwin bash. We assume that all + incoming paths are in Windows normpath'd form, so they need to be + converted to posix style for the part of the command line that's passed to + bash. We also have to do some Visual Studio macro emulation here because + various rules use magic VS names for things. Also note that rules that + contain ninja variables cannot be fixed here (for example ${source}), so + the outer generator needs to make sure that the paths that are written out + are in posix style, if the command line will be used here.""" + cygwin_dir = os.path.normpath( + os.path.join(path_to_base, self.msvs_cygwin_dirs[0])) + cd = ('cd %s' % path_to_base).replace('\\', '/') + args = [a.replace('\\', '/').replace('"', '\\"') for a in args] + args = ["'%s'" % a.replace("'", "'\\''") for a in args] + bash_cmd = ' '.join(args) + cmd = ( + 'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir + + 'bash -c "%s ; %s"' % (cd, bash_cmd)) + return cmd + + def IsRuleRunUnderCygwin(self, rule): + """Determine if an action should be run under cygwin. If the variable is + unset, or set to 1 we use cygwin.""" + return int(rule.get('msvs_cygwin_shell', + self.spec.get('msvs_cygwin_shell', 1))) != 0 + + def _HasExplicitRuleForExtension(self, spec, extension): + """Determine if there's an explicit rule for a particular extension.""" + for rule in spec.get('rules', []): + if rule['extension'] == extension: + return True + return False + + def _HasExplicitIdlActions(self, spec): + """Determine if an action should not run midl for .idl files.""" + return any([action.get('explicit_idl_action', 0) + for action in spec.get('actions', [])]) + + def HasExplicitIdlRulesOrActions(self, spec): + """Determine if there's an explicit rule or action for idl files. When + there isn't we need to generate implicit rules to build MIDL .idl files.""" + return (self._HasExplicitRuleForExtension(spec, 'idl') or + self._HasExplicitIdlActions(spec)) + + def HasExplicitAsmRules(self, spec): + """Determine if there's an explicit rule for asm files. When there isn't we + need to generate implicit rules to assemble .asm files.""" + return self._HasExplicitRuleForExtension(spec, 'asm') + + def GetIdlBuildData(self, source, config): + """Determine the implicit outputs for an idl file. Returns output + directory, outputs, and variables and flags that are required.""" + config = self._TargetConfig(config) + midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool') + def midl(name, default=None): + return self.ConvertVSMacros(midl_get(name, default=default), + config=config) + tlb = midl('TypeLibraryName', default='${root}.tlb') + header = midl('HeaderFileName', default='${root}.h') + dlldata = midl('DLLDataFileName', default='dlldata.c') + iid = midl('InterfaceIdentifierFileName', default='${root}_i.c') + proxy = midl('ProxyFileName', default='${root}_p.c') + # Note that .tlb is not included in the outputs as it is not always + # generated depending on the content of the input idl file. + outdir = midl('OutputDirectory', default='') + output = [header, dlldata, iid, proxy] + variables = [('tlb', tlb), + ('h', header), + ('dlldata', dlldata), + ('iid', iid), + ('proxy', proxy)] + # TODO(scottmg): Are there configuration settings to set these flags? + target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64' + flags = ['/char', 'signed', '/env', target_platform, '/Oicf'] + return outdir, output, variables, flags + + +def _LanguageMatchesForPch(source_ext, pch_source_ext): + c_exts = ('.c',) + cc_exts = ('.cc', '.cxx', '.cpp') + return ((source_ext in c_exts and pch_source_ext in c_exts) or + (source_ext in cc_exts and pch_source_ext in cc_exts)) + + +class PrecompiledHeader(object): + """Helper to generate dependencies and build rules to handle generation of + precompiled headers. Interface matches the GCH handler in xcode_emulation.py. + """ + def __init__( + self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext): + self.settings = settings + self.config = config + pch_source = self.settings.msvs_precompiled_source[self.config] + self.pch_source = gyp_to_build_path(pch_source) + filename, _ = os.path.splitext(pch_source) + self.output_obj = gyp_to_unique_output(filename + obj_ext).lower() + + def _PchHeader(self): + """Get the header that will appear in an #include line for all source + files.""" + return self.settings.msvs_precompiled_header[self.config] + + def GetObjDependencies(self, sources, objs, arch): + """Given a list of sources files and the corresponding object files, + returns a list of the pch files that should be depended upon. The + additional wrapping in the return value is for interface compatibility + with make.py on Mac, and xcode_emulation.py.""" + assert arch is None + if not self._PchHeader(): + return [] + pch_ext = os.path.splitext(self.pch_source)[1] + for source in sources: + if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext): + return [(None, None, self.output_obj)] + return [] + + def GetPchBuildCommands(self, arch): + """Not used on Windows as there are no additional build steps required + (instead, existing steps are modified in GetFlagsModifications below).""" + return [] + + def GetFlagsModifications(self, input, output, implicit, command, + cflags_c, cflags_cc, expand_special): + """Get the modified cflags and implicit dependencies that should be used + for the pch compilation step.""" + if input == self.pch_source: + pch_output = ['/Yc' + self._PchHeader()] + if command == 'cxx': + return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))], + self.output_obj, []) + elif command == 'cc': + return ([('cflags_c', map(expand_special, cflags_c + pch_output))], + self.output_obj, []) + return [], output, implicit + + +vs_version = None +def GetVSVersion(generator_flags): + global vs_version + if not vs_version: + vs_version = gyp.MSVSVersion.SelectVisualStudioVersion( + generator_flags.get('msvs_version', 'auto'), + allow_fallback=False) + return vs_version + +def _GetVsvarsSetupArgs(generator_flags, arch): + vs = GetVSVersion(generator_flags) + return vs.SetupScript() + +def ExpandMacros(string, expansions): + """Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv + for the canonical way to retrieve a suitable dict.""" + if '$' in string: + for old, new in expansions.iteritems(): + assert '$(' not in new, new + string = string.replace(old, new) + return string + +def _ExtractImportantEnvironment(output_of_set): + """Extracts environment variables required for the toolchain to run from + a textual dump output by the cmd.exe 'set' command.""" + envvars_to_save = ( + 'goma_.*', # TODO(scottmg): This is ugly, but needed for goma. + 'include', + 'lib', + 'libpath', + 'path', + 'pathext', + 'systemroot', + 'temp', + 'tmp', + ) + env = {} + # This occasionally happens and leads to misleading SYSTEMROOT error messages + # if not caught here. + if output_of_set.count('=') == 0: + raise Exception('Invalid output_of_set. Value is:\n%s' % output_of_set) + for line in output_of_set.splitlines(): + for envvar in envvars_to_save: + if re.match(envvar + '=', line.lower()): + var, setting = line.split('=', 1) + if envvar == 'path': + # Our own rules (for running gyp-win-tool) and other actions in + # Chromium rely on python being in the path. Add the path to this + # python here so that if it's not in the path when ninja is run + # later, python will still be found. + setting = os.path.dirname(sys.executable) + os.pathsep + setting + env[var.upper()] = setting + break + for required in ('SYSTEMROOT', 'TEMP', 'TMP'): + if required not in env: + raise Exception('Environment variable "%s" ' + 'required to be set to valid path' % required) + return env + +def _FormatAsEnvironmentBlock(envvar_dict): + """Format as an 'environment block' directly suitable for CreateProcess. + Briefly this is a list of key=value\0, terminated by an additional \0. See + CreateProcess documentation for more details.""" + block = '' + nul = '\0' + for key, value in envvar_dict.iteritems(): + block += key + '=' + value + nul + block += nul + return block + +def _ExtractCLPath(output_of_where): + """Gets the path to cl.exe based on the output of calling the environment + setup batch file, followed by the equivalent of `where`.""" + # Take the first line, as that's the first found in the PATH. + for line in output_of_where.strip().splitlines(): + if line.startswith('LOC:'): + return line[len('LOC:'):].strip() + +def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags, + system_includes, open_out): + """It's not sufficient to have the absolute path to the compiler, linker, + etc. on Windows, as those tools rely on .dlls being in the PATH. We also + need to support both x86 and x64 compilers within the same build (to support + msvs_target_platform hackery). Different architectures require a different + compiler binary, and different supporting environment variables (INCLUDE, + LIB, LIBPATH). So, we extract the environment here, wrap all invocations + of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which + sets up the environment, and then we do not prefix the compiler with + an absolute path, instead preferring something like "cl.exe" in the rule + which will then run whichever the environment setup has put in the path. + When the following procedure to generate environment files does not + meet your requirement (e.g. for custom toolchains), you can pass + "-G ninja_use_custom_environment_files" to the gyp to suppress file + generation and use custom environment files prepared by yourself.""" + archs = ('x86', 'x64') + if generator_flags.get('ninja_use_custom_environment_files', 0): + cl_paths = {} + for arch in archs: + cl_paths[arch] = 'cl.exe' + return cl_paths + vs = GetVSVersion(generator_flags) + cl_paths = {} + for arch in archs: + # Extract environment variables for subprocesses. + args = vs.SetupScript(arch) + args.extend(('&&', 'set')) + popen = subprocess.Popen( + args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + variables, _ = popen.communicate() + if popen.returncode != 0: + raise Exception('"%s" failed with error %d' % (args, popen.returncode)) + env = _ExtractImportantEnvironment(variables) + + # Inject system includes from gyp files into INCLUDE. + if system_includes: + system_includes = system_includes | OrderedSet( + env.get('INCLUDE', '').split(';')) + env['INCLUDE'] = ';'.join(system_includes) + + env_block = _FormatAsEnvironmentBlock(env) + f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb') + f.write(env_block) + f.close() + + # Find cl.exe location for this architecture. + args = vs.SetupScript(arch) + args.extend(('&&', + 'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i')) + popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE) + output, _ = popen.communicate() + cl_paths[arch] = _ExtractCLPath(output) + return cl_paths + +def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja): + """Emulate behavior of msvs_error_on_missing_sources present in the msvs + generator: Check that all regular source files, i.e. not created at run time, + exist on disk. Missing files cause needless recompilation when building via + VS, and we want this check to match for people/bots that build using ninja, + so they're not surprised when the VS build fails.""" + if int(generator_flags.get('msvs_error_on_missing_sources', 0)): + no_specials = filter(lambda x: '$' not in x, sources) + relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials] + missing = filter(lambda x: not os.path.exists(x), relative) + if missing: + # They'll look like out\Release\..\..\stuff\things.cc, so normalize the + # path for a slightly less crazy looking output. + cleaned_up = [os.path.normpath(x) for x in missing] + raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up)) + +# Sets some values in default_variables, which are required for many +# generators, run on Windows. +def CalculateCommonVariables(default_variables, params): + generator_flags = params.get('generator_flags', {}) + + # Set a variable so conditions can be based on msvs_version. + msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags) + default_variables['MSVS_VERSION'] = msvs_version.ShortName() + + # To determine processor word size on Windows, in addition to checking + # PROCESSOR_ARCHITECTURE (which reflects the word size of the current + # process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which + # contains the actual word size of the system when running thru WOW64). + if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or + '64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')): + default_variables['MSVS_OS_BITS'] = 64 + else: + default_variables['MSVS_OS_BITS'] = 32 diff --git a/deps/libuv/build/gyp/pylib/gyp/ninja_syntax.py b/deps/libuv/build/gyp/pylib/gyp/ninja_syntax.py new file mode 100644 index 00000000..d2948f06 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/ninja_syntax.py @@ -0,0 +1,160 @@ +# This file comes from +# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py +# Do not edit! Edit the upstream one instead. + +"""Python module for generating .ninja files. + +Note that this is emphatically not a required piece of Ninja; it's +just a helpful utility for build-file-generation systems that already +use Python. +""" + +import textwrap +import re + +def escape_path(word): + return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:') + +class Writer(object): + def __init__(self, output, width=78): + self.output = output + self.width = width + + def newline(self): + self.output.write('\n') + + def comment(self, text): + for line in textwrap.wrap(text, self.width - 2): + self.output.write('# ' + line + '\n') + + def variable(self, key, value, indent=0): + if value is None: + return + if isinstance(value, list): + value = ' '.join(filter(None, value)) # Filter out empty strings. + self._line('%s = %s' % (key, value), indent) + + def pool(self, name, depth): + self._line('pool %s' % name) + self.variable('depth', depth, indent=1) + + def rule(self, name, command, description=None, depfile=None, + generator=False, pool=None, restat=False, rspfile=None, + rspfile_content=None, deps=None): + self._line('rule %s' % name) + self.variable('command', command, indent=1) + if description: + self.variable('description', description, indent=1) + if depfile: + self.variable('depfile', depfile, indent=1) + if generator: + self.variable('generator', '1', indent=1) + if pool: + self.variable('pool', pool, indent=1) + if restat: + self.variable('restat', '1', indent=1) + if rspfile: + self.variable('rspfile', rspfile, indent=1) + if rspfile_content: + self.variable('rspfile_content', rspfile_content, indent=1) + if deps: + self.variable('deps', deps, indent=1) + + def build(self, outputs, rule, inputs=None, implicit=None, order_only=None, + variables=None): + outputs = self._as_list(outputs) + all_inputs = self._as_list(inputs)[:] + out_outputs = list(map(escape_path, outputs)) + all_inputs = list(map(escape_path, all_inputs)) + + if implicit: + implicit = map(escape_path, self._as_list(implicit)) + all_inputs.append('|') + all_inputs.extend(implicit) + if order_only: + order_only = map(escape_path, self._as_list(order_only)) + all_inputs.append('||') + all_inputs.extend(order_only) + + self._line('build %s: %s' % (' '.join(out_outputs), + ' '.join([rule] + all_inputs))) + + if variables: + if isinstance(variables, dict): + iterator = iter(variables.items()) + else: + iterator = iter(variables) + + for key, val in iterator: + self.variable(key, val, indent=1) + + return outputs + + def include(self, path): + self._line('include %s' % path) + + def subninja(self, path): + self._line('subninja %s' % path) + + def default(self, paths): + self._line('default %s' % ' '.join(self._as_list(paths))) + + def _count_dollars_before_index(self, s, i): + """Returns the number of '$' characters right in front of s[i].""" + dollar_count = 0 + dollar_index = i - 1 + while dollar_index > 0 and s[dollar_index] == '$': + dollar_count += 1 + dollar_index -= 1 + return dollar_count + + def _line(self, text, indent=0): + """Write 'text' word-wrapped at self.width characters.""" + leading_space = ' ' * indent + while len(leading_space) + len(text) > self.width: + # The text is too wide; wrap if possible. + + # Find the rightmost space that would obey our width constraint and + # that's not an escaped space. + available_space = self.width - len(leading_space) - len(' $') + space = available_space + while True: + space = text.rfind(' ', 0, space) + if space < 0 or \ + self._count_dollars_before_index(text, space) % 2 == 0: + break + + if space < 0: + # No such space; just use the first unescaped space we can find. + space = available_space - 1 + while True: + space = text.find(' ', space + 1) + if space < 0 or \ + self._count_dollars_before_index(text, space) % 2 == 0: + break + if space < 0: + # Give up on breaking. + break + + self.output.write(leading_space + text[0:space] + ' $\n') + text = text[space+1:] + + # Subsequent lines are continuations, so indent them. + leading_space = ' ' * (indent+2) + + self.output.write(leading_space + text + '\n') + + def _as_list(self, input): + if input is None: + return [] + if isinstance(input, list): + return input + return [input] + + +def escape(string): + """Escape a string such that it can be embedded into a Ninja file without + further interpretation.""" + assert '\n' not in string, 'Ninja syntax does not allow newlines' + # We only have one special metacharacter: '$'. + return string.replace('$', '$$') diff --git a/deps/libuv/build/gyp/pylib/gyp/ordered_dict.py b/deps/libuv/build/gyp/pylib/gyp/ordered_dict.py new file mode 100644 index 00000000..a1e89f91 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/ordered_dict.py @@ -0,0 +1,289 @@ +# Unmodified from http://code.activestate.com/recipes/576693/ +# other than to add MIT license header (as specified on page, but not in code). +# Linked from Python documentation here: +# http://docs.python.org/2/library/collections.html#collections.OrderedDict +# +# This should be deleted once Py2.7 is available on all bots, see +# http://crbug.com/241769. +# +# Copyright (c) 2009 Raymond Hettinger. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. + +# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. +# Passes Python2.7's test suite and incorporates all the latest updates. + +try: + from thread import get_ident as _get_ident +except ImportError: + from dummy_thread import get_ident as _get_ident + +try: + from _abcoll import KeysView, ValuesView, ItemsView +except ImportError: + pass + + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as for regular dictionaries. + + # The internal self.__map dictionary maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(self, *args, **kwds): + '''Initialize an ordered dictionary. Signature is the same as for + regular dictionaries, but keyword arguments are not recommended + because their insertion order is arbitrary. + + ''' + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link which goes at the end of the linked + # list, and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which is + # then removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, key = self.__map.pop(key) + link_prev[1] = link_next + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + root = self.__root + curr = root[1] + while curr is not root: + yield curr[2] + curr = curr[1] + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + root = self.__root + curr = root[0] + while curr is not root: + yield curr[2] + curr = curr[0] + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + try: + for node in self.__map.itervalues(): + del node[:] + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + except AttributeError: + pass + dict.clear(self) + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root[0] + link_prev = link[0] + link_prev[1] = root + root[0] = link_prev + else: + link = root[1] + link_next = link[1] + root[1] = link_next + link_next[0] = root + key = link[2] + del self.__map[key] + value = dict.pop(self, key) + return key, value + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) items in od' + for k in self: + yield (k, self[k]) + + # Suppress 'OrderedDict.update: Method has no argument': + # pylint: disable=E0211 + def update(*args, **kwds): + '''od.update(E, **F) -> None. Update od from dict/iterable E and F. + + If E is a dict instance, does: for k in E: od[k] = E[k] + If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] + Or if E is an iterable of items, does: for k, v in E: od[k] = v + In either case, this is followed by: for k, v in F.items(): od[k] = v + + ''' + if len(args) > 2: + raise TypeError('update() takes at most 2 positional ' + 'arguments (%d given)' % (len(args),)) + elif not args: + raise TypeError('update() takes at least 1 argument (0 given)') + self = args[0] + # Make progressively weaker assumptions about "other" + other = () + if len(args) == 2: + other = args[1] + if isinstance(other, dict): + for key in other: + self[key] = other[key] + elif hasattr(other, 'keys'): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def __repr__(self, _repr_running={}): + 'od.__repr__() <==> repr(od)' + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S + and values equal to v (which defaults to None). + + ''' + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return len(self)==len(other) and self.items() == other.items() + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + + # -- the following methods are only used in Python 2.7 -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) + diff --git a/deps/libuv/build/gyp/pylib/gyp/simple_copy.py b/deps/libuv/build/gyp/pylib/gyp/simple_copy.py new file mode 100644 index 00000000..74c98c5a --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/simple_copy.py @@ -0,0 +1,46 @@ +# Copyright 2014 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""A clone of the default copy.deepcopy that doesn't handle cyclic +structures or complex types except for dicts and lists. This is +because gyp copies so large structure that small copy overhead ends up +taking seconds in a project the size of Chromium.""" + +class Error(Exception): + pass + +__all__ = ["Error", "deepcopy"] + +def deepcopy(x): + """Deep copy operation on gyp objects such as strings, ints, dicts + and lists. More than twice as fast as copy.deepcopy but much less + generic.""" + + try: + return _deepcopy_dispatch[type(x)](x) + except KeyError: + raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' + + 'or expand simple_copy support.' % type(x)) + +_deepcopy_dispatch = d = {} + +def _deepcopy_atomic(x): + return x + +for x in (type(None), int, long, float, + bool, str, unicode, type): + d[x] = _deepcopy_atomic + +def _deepcopy_list(x): + return [deepcopy(a) for a in x] +d[list] = _deepcopy_list + +def _deepcopy_dict(x): + y = {} + for key, value in x.iteritems(): + y[deepcopy(key)] = deepcopy(value) + return y +d[dict] = _deepcopy_dict + +del d diff --git a/deps/libuv/build/gyp/pylib/gyp/win_tool.py b/deps/libuv/build/gyp/pylib/gyp/win_tool.py new file mode 100755 index 00000000..1c843a0b --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/win_tool.py @@ -0,0 +1,322 @@ +#!/usr/bin/env python + +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Utility functions for Windows builds. + +These functions are executed via gyp-win-tool when using the ninja generator. +""" + +import os +import re +import shutil +import subprocess +import stat +import string +import sys + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) + +# A regex matching an argument corresponding to the output filename passed to +# link.exe. +_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P.+)$', re.IGNORECASE) + +def main(args): + executor = WinTool() + exit_code = executor.Dispatch(args) + if exit_code is not None: + sys.exit(exit_code) + + +class WinTool(object): + """This class performs all the Windows tooling steps. The methods can either + be executed directly, or dispatched from an argument list.""" + + def _UseSeparateMspdbsrv(self, env, args): + """Allows to use a unique instance of mspdbsrv.exe per linker instead of a + shared one.""" + if len(args) < 1: + raise Exception("Not enough arguments") + + if args[0] != 'link.exe': + return + + # Use the output filename passed to the linker to generate an endpoint name + # for mspdbsrv.exe. + endpoint_name = None + for arg in args: + m = _LINK_EXE_OUT_ARG.match(arg) + if m: + endpoint_name = re.sub(r'\W+', '', + '%s_%d' % (m.group('out'), os.getpid())) + break + + if endpoint_name is None: + return + + # Adds the appropriate environment variable. This will be read by link.exe + # to know which instance of mspdbsrv.exe it should connect to (if it's + # not set then the default endpoint is used). + env['_MSPDBSRV_ENDPOINT_'] = endpoint_name + + def Dispatch(self, args): + """Dispatches a string command to a method.""" + if len(args) < 1: + raise Exception("Not enough arguments") + + method = "Exec%s" % self._CommandifyName(args[0]) + return getattr(self, method)(*args[1:]) + + def _CommandifyName(self, name_string): + """Transforms a tool name like recursive-mirror to RecursiveMirror.""" + return name_string.title().replace('-', '') + + def _GetEnv(self, arch): + """Gets the saved environment from a file for a given architecture.""" + # The environment is saved as an "environment block" (see CreateProcess + # and msvs_emulation for details). We convert to a dict here. + # Drop last 2 NULs, one for list terminator, one for trailing vs. separator. + pairs = open(arch).read()[:-2].split('\0') + kvs = [item.split('=', 1) for item in pairs] + return dict(kvs) + + def ExecStamp(self, path): + """Simple stamp command.""" + open(path, 'w').close() + + def ExecRecursiveMirror(self, source, dest): + """Emulation of rm -rf out && cp -af in out.""" + if os.path.exists(dest): + if os.path.isdir(dest): + def _on_error(fn, path, excinfo): + # The operation failed, possibly because the file is set to + # read-only. If that's why, make it writable and try the op again. + if not os.access(path, os.W_OK): + os.chmod(path, stat.S_IWRITE) + fn(path) + shutil.rmtree(dest, onerror=_on_error) + else: + if not os.access(dest, os.W_OK): + # Attempt to make the file writable before deleting it. + os.chmod(dest, stat.S_IWRITE) + os.unlink(dest) + + if os.path.isdir(source): + shutil.copytree(source, dest) + else: + shutil.copy2(source, dest) + + def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args): + """Filter diagnostic output from link that looks like: + ' Creating library ui.dll.lib and object ui.dll.exp' + This happens when there are exports from the dll or exe. + """ + env = self._GetEnv(arch) + if use_separate_mspdbsrv == 'True': + self._UseSeparateMspdbsrv(env, args) + if sys.platform == 'win32': + args = list(args) # *args is a tuple by default, which is read-only. + args[0] = args[0].replace('/', '\\') + # https://docs.python.org/2/library/subprocess.html: + # "On Unix with shell=True [...] if args is a sequence, the first item + # specifies the command string, and any additional items will be treated as + # additional arguments to the shell itself. That is to say, Popen does the + # equivalent of: + # Popen(['/bin/sh', '-c', args[0], args[1], ...])" + # For that reason, since going through the shell doesn't seem necessary on + # non-Windows don't do that there. + link = subprocess.Popen(args, shell=sys.platform == 'win32', env=env, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + out, _ = link.communicate() + for line in out.splitlines(): + if (not line.startswith(' Creating library ') and + not line.startswith('Generating code') and + not line.startswith('Finished generating code')): + print line + return link.returncode + + def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname, + mt, rc, intermediate_manifest, *manifests): + """A wrapper for handling creating a manifest resource and then executing + a link command.""" + # The 'normal' way to do manifests is to have link generate a manifest + # based on gathering dependencies from the object files, then merge that + # manifest with other manifests supplied as sources, convert the merged + # manifest to a resource, and then *relink*, including the compiled + # version of the manifest resource. This breaks incremental linking, and + # is generally overly complicated. Instead, we merge all the manifests + # provided (along with one that includes what would normally be in the + # linker-generated one, see msvs_emulation.py), and include that into the + # first and only link. We still tell link to generate a manifest, but we + # only use that to assert that our simpler process did not miss anything. + variables = { + 'python': sys.executable, + 'arch': arch, + 'out': out, + 'ldcmd': ldcmd, + 'resname': resname, + 'mt': mt, + 'rc': rc, + 'intermediate_manifest': intermediate_manifest, + 'manifests': ' '.join(manifests), + } + add_to_ld = '' + if manifests: + subprocess.check_call( + '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo ' + '-manifest %(manifests)s -out:%(out)s.manifest' % variables) + if embed_manifest == 'True': + subprocess.check_call( + '%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest' + ' %(out)s.manifest.rc %(resname)s' % variables) + subprocess.check_call( + '%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s ' + '%(out)s.manifest.rc' % variables) + add_to_ld = ' %(out)s.manifest.res' % variables + subprocess.check_call(ldcmd + add_to_ld) + + # Run mt.exe on the theoretically complete manifest we generated, merging + # it with the one the linker generated to confirm that the linker + # generated one does not add anything. This is strictly unnecessary for + # correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not + # used in a #pragma comment. + if manifests: + # Merge the intermediate one with ours to .assert.manifest, then check + # that .assert.manifest is identical to ours. + subprocess.check_call( + '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo ' + '-manifest %(out)s.manifest %(intermediate_manifest)s ' + '-out:%(out)s.assert.manifest' % variables) + assert_manifest = '%(out)s.assert.manifest' % variables + our_manifest = '%(out)s.manifest' % variables + # Load and normalize the manifests. mt.exe sometimes removes whitespace, + # and sometimes doesn't unfortunately. + with open(our_manifest, 'rb') as our_f: + with open(assert_manifest, 'rb') as assert_f: + our_data = our_f.read().translate(None, string.whitespace) + assert_data = assert_f.read().translate(None, string.whitespace) + if our_data != assert_data: + os.unlink(out) + def dump(filename): + sys.stderr.write('%s\n-----\n' % filename) + with open(filename, 'rb') as f: + sys.stderr.write(f.read() + '\n-----\n') + dump(intermediate_manifest) + dump(our_manifest) + dump(assert_manifest) + sys.stderr.write( + 'Linker generated manifest "%s" added to final manifest "%s" ' + '(result in "%s"). ' + 'Were /MANIFEST switches used in #pragma statements? ' % ( + intermediate_manifest, our_manifest, assert_manifest)) + return 1 + + def ExecManifestWrapper(self, arch, *args): + """Run manifest tool with environment set. Strip out undesirable warning + (some XML blocks are recognized by the OS loader, but not the manifest + tool).""" + env = self._GetEnv(arch) + popen = subprocess.Popen(args, shell=True, env=env, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + out, _ = popen.communicate() + for line in out.splitlines(): + if line and 'manifest authoring warning 81010002' not in line: + print line + return popen.returncode + + def ExecManifestToRc(self, arch, *args): + """Creates a resource file pointing a SxS assembly manifest. + |args| is tuple containing path to resource file, path to manifest file + and resource name which can be "1" (for executables) or "2" (for DLLs).""" + manifest_path, resource_path, resource_name = args + with open(resource_path, 'wb') as output: + output.write('#include \n%s RT_MANIFEST "%s"' % ( + resource_name, + os.path.abspath(manifest_path).replace('\\', '/'))) + + def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl, + *flags): + """Filter noisy filenames output from MIDL compile step that isn't + quietable via command line flags. + """ + args = ['midl', '/nologo'] + list(flags) + [ + '/out', outdir, + '/tlb', tlb, + '/h', h, + '/dlldata', dlldata, + '/iid', iid, + '/proxy', proxy, + idl] + env = self._GetEnv(arch) + popen = subprocess.Popen(args, shell=True, env=env, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + out, _ = popen.communicate() + # Filter junk out of stdout, and write filtered versions. Output we want + # to filter is pairs of lines that look like this: + # Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl + # objidl.idl + lines = out.splitlines() + prefixes = ('Processing ', '64 bit Processing ') + processing = set(os.path.basename(x) + for x in lines if x.startswith(prefixes)) + for line in lines: + if not line.startswith(prefixes) and line not in processing: + print line + return popen.returncode + + def ExecAsmWrapper(self, arch, *args): + """Filter logo banner from invocations of asm.exe.""" + env = self._GetEnv(arch) + popen = subprocess.Popen(args, shell=True, env=env, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + out, _ = popen.communicate() + for line in out.splitlines(): + if (not line.startswith('Copyright (C) Microsoft Corporation') and + not line.startswith('Microsoft (R) Macro Assembler') and + not line.startswith(' Assembling: ') and + line): + print line + return popen.returncode + + def ExecRcWrapper(self, arch, *args): + """Filter logo banner from invocations of rc.exe. Older versions of RC + don't support the /nologo flag.""" + env = self._GetEnv(arch) + popen = subprocess.Popen(args, shell=True, env=env, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + out, _ = popen.communicate() + for line in out.splitlines(): + if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and + not line.startswith('Copyright (C) Microsoft Corporation') and + line): + print line + return popen.returncode + + def ExecActionWrapper(self, arch, rspfile, *dir): + """Runs an action command line from a response file using the environment + for |arch|. If |dir| is supplied, use that as the working directory.""" + env = self._GetEnv(arch) + # TODO(scottmg): This is a temporary hack to get some specific variables + # through to actions that are set after gyp-time. http://crbug.com/333738. + for k, v in os.environ.iteritems(): + if k not in env: + env[k] = v + args = open(rspfile).read() + dir = dir[0] if dir else None + return subprocess.call(args, shell=True, env=env, cwd=dir) + + def ExecClCompile(self, project_dir, selected_files): + """Executed by msvs-ninja projects when the 'ClCompile' target is used to + build selected C/C++ files.""" + project_dir = os.path.relpath(project_dir, BASE_DIR) + selected_files = selected_files.split(';') + ninja_targets = [os.path.join(project_dir, filename) + '^^' + for filename in selected_files] + cmd = ['ninja.exe'] + cmd.extend(ninja_targets) + return subprocess.call(cmd, shell=True, cwd=BASE_DIR) + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/deps/libuv/build/gyp/pylib/gyp/xcode_emulation.py b/deps/libuv/build/gyp/pylib/gyp/xcode_emulation.py new file mode 100644 index 00000000..dba8e769 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/xcode_emulation.py @@ -0,0 +1,1794 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" +This module contains classes that help to emulate xcodebuild behavior on top of +other build systems, such as make and ninja. +""" + +import copy +import gyp.common +import os +import os.path +import re +import shlex +import subprocess +import sys +import tempfile +from gyp.common import GypError + +# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when +# "xcodebuild" is called too quickly (it has been found to return incorrect +# version number). +XCODE_VERSION_CACHE = None + +# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance +# corresponding to the installed version of Xcode. +XCODE_ARCHS_DEFAULT_CACHE = None + + +def XcodeArchsVariableMapping(archs, archs_including_64_bit=None): + """Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable, + and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT).""" + mapping = {'$(ARCHS_STANDARD)': archs} + if archs_including_64_bit: + mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit + return mapping + +class XcodeArchsDefault(object): + """A class to resolve ARCHS variable from xcode_settings, resolving Xcode + macros and implementing filtering by VALID_ARCHS. The expansion of macros + depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and + on the version of Xcode. + """ + + # Match variable like $(ARCHS_STANDARD). + variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$') + + def __init__(self, default, mac, iphonesimulator, iphoneos): + self._default = (default,) + self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator} + + def _VariableMapping(self, sdkroot): + """Returns the dictionary of variable mapping depending on the SDKROOT.""" + sdkroot = sdkroot.lower() + if 'iphoneos' in sdkroot: + return self._archs['ios'] + elif 'iphonesimulator' in sdkroot: + return self._archs['iossim'] + else: + return self._archs['mac'] + + def _ExpandArchs(self, archs, sdkroot): + """Expands variables references in ARCHS, and remove duplicates.""" + variable_mapping = self._VariableMapping(sdkroot) + expanded_archs = [] + for arch in archs: + if self.variable_pattern.match(arch): + variable = arch + try: + variable_expansion = variable_mapping[variable] + for arch in variable_expansion: + if arch not in expanded_archs: + expanded_archs.append(arch) + except KeyError as e: + print 'Warning: Ignoring unsupported variable "%s".' % variable + elif arch not in expanded_archs: + expanded_archs.append(arch) + return expanded_archs + + def ActiveArchs(self, archs, valid_archs, sdkroot): + """Expands variables references in ARCHS, and filter by VALID_ARCHS if it + is defined (if not set, Xcode accept any value in ARCHS, otherwise, only + values present in VALID_ARCHS are kept).""" + expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '') + if valid_archs: + filtered_archs = [] + for arch in expanded_archs: + if arch in valid_archs: + filtered_archs.append(arch) + expanded_archs = filtered_archs + return expanded_archs + + +def GetXcodeArchsDefault(): + """Returns the |XcodeArchsDefault| object to use to expand ARCHS for the + installed version of Xcode. The default values used by Xcode for ARCHS + and the expansion of the variables depends on the version of Xcode used. + + For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included + uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses + $(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0 + and deprecated with Xcode 5.1. + + For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit + architecture as part of $(ARCHS_STANDARD) and default to only building it. + + For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part + of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they + are also part of $(ARCHS_STANDARD). + + All thoses rules are coded in the construction of the |XcodeArchsDefault| + object to use depending on the version of Xcode detected. The object is + for performance reason.""" + global XCODE_ARCHS_DEFAULT_CACHE + if XCODE_ARCHS_DEFAULT_CACHE: + return XCODE_ARCHS_DEFAULT_CACHE + xcode_version, _ = XcodeVersion() + if xcode_version < '0500': + XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( + '$(ARCHS_STANDARD)', + XcodeArchsVariableMapping(['i386']), + XcodeArchsVariableMapping(['i386']), + XcodeArchsVariableMapping(['armv7'])) + elif xcode_version < '0510': + XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( + '$(ARCHS_STANDARD_INCLUDING_64_BIT)', + XcodeArchsVariableMapping(['x86_64'], ['x86_64']), + XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']), + XcodeArchsVariableMapping( + ['armv7', 'armv7s'], + ['armv7', 'armv7s', 'arm64'])) + else: + XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault( + '$(ARCHS_STANDARD)', + XcodeArchsVariableMapping(['x86_64'], ['x86_64']), + XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']), + XcodeArchsVariableMapping( + ['armv7', 'armv7s', 'arm64'], + ['armv7', 'armv7s', 'arm64'])) + return XCODE_ARCHS_DEFAULT_CACHE + + +class XcodeSettings(object): + """A class that understands the gyp 'xcode_settings' object.""" + + # Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached + # at class-level for efficiency. + _sdk_path_cache = {} + _platform_path_cache = {} + _sdk_root_cache = {} + + # Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so + # cached at class-level for efficiency. + _plist_cache = {} + + # Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so + # cached at class-level for efficiency. + _codesigning_key_cache = {} + + def __init__(self, spec): + self.spec = spec + + self.isIOS = False + self.mac_toolchain_dir = None + self.header_map_path = None + + # Per-target 'xcode_settings' are pushed down into configs earlier by gyp. + # This means self.xcode_settings[config] always contains all settings + # for that config -- the per-target settings as well. Settings that are + # the same for all configs are implicitly per-target settings. + self.xcode_settings = {} + configs = spec['configurations'] + for configname, config in configs.iteritems(): + self.xcode_settings[configname] = config.get('xcode_settings', {}) + self._ConvertConditionalKeys(configname) + if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET', + None): + self.isIOS = True + + # This is only non-None temporarily during the execution of some methods. + self.configname = None + + # Used by _AdjustLibrary to match .a and .dylib entries in libraries. + self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$') + + def _ConvertConditionalKeys(self, configname): + """Converts or warns on conditional keys. Xcode supports conditional keys, + such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation + with some keys converted while the rest force a warning.""" + settings = self.xcode_settings[configname] + conditional_keys = [key for key in settings if key.endswith(']')] + for key in conditional_keys: + # If you need more, speak up at http://crbug.com/122592 + if key.endswith("[sdk=iphoneos*]"): + if configname.endswith("iphoneos"): + new_key = key.split("[")[0] + settings[new_key] = settings[key] + else: + print 'Warning: Conditional keys not implemented, ignoring:', \ + ' '.join(conditional_keys) + del settings[key] + + def _Settings(self): + assert self.configname + return self.xcode_settings[self.configname] + + def _Test(self, test_key, cond_key, default): + return self._Settings().get(test_key, default) == cond_key + + def _Appendf(self, lst, test_key, format_str, default=None): + if test_key in self._Settings(): + lst.append(format_str % str(self._Settings()[test_key])) + elif default: + lst.append(format_str % str(default)) + + def _WarnUnimplemented(self, test_key): + if test_key in self._Settings(): + print 'Warning: Ignoring not yet implemented key "%s".' % test_key + + def IsBinaryOutputFormat(self, configname): + default = "binary" if self.isIOS else "xml" + format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT', + default) + return format == "binary" + + def IsIosFramework(self): + return self.spec['type'] == 'shared_library' and self._IsBundle() and \ + self.isIOS + + def _IsBundle(self): + return int(self.spec.get('mac_bundle', 0)) != 0 or self._IsXCTest() or \ + self._IsXCUiTest() + + def _IsXCTest(self): + return int(self.spec.get('mac_xctest_bundle', 0)) != 0 + + def _IsXCUiTest(self): + return int(self.spec.get('mac_xcuitest_bundle', 0)) != 0 + + def _IsIosAppExtension(self): + return int(self.spec.get('ios_app_extension', 0)) != 0 + + def _IsIosWatchKitExtension(self): + return int(self.spec.get('ios_watchkit_extension', 0)) != 0 + + def _IsIosWatchApp(self): + return int(self.spec.get('ios_watch_app', 0)) != 0 + + def GetFrameworkVersion(self): + """Returns the framework version of the current target. Only valid for + bundles.""" + assert self._IsBundle() + return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A') + + def GetWrapperExtension(self): + """Returns the bundle extension (.app, .framework, .plugin, etc). Only + valid for bundles.""" + assert self._IsBundle() + if self.spec['type'] in ('loadable_module', 'shared_library'): + default_wrapper_extension = { + 'loadable_module': 'bundle', + 'shared_library': 'framework', + }[self.spec['type']] + wrapper_extension = self.GetPerTargetSetting( + 'WRAPPER_EXTENSION', default=default_wrapper_extension) + return '.' + self.spec.get('product_extension', wrapper_extension) + elif self.spec['type'] == 'executable': + if self._IsIosAppExtension() or self._IsIosWatchKitExtension(): + return '.' + self.spec.get('product_extension', 'appex') + else: + return '.' + self.spec.get('product_extension', 'app') + else: + assert False, "Don't know extension for '%s', target '%s'" % ( + self.spec['type'], self.spec['target_name']) + + def GetProductName(self): + """Returns PRODUCT_NAME.""" + return self.spec.get('product_name', self.spec['target_name']) + + def GetFullProductName(self): + """Returns FULL_PRODUCT_NAME.""" + if self._IsBundle(): + return self.GetWrapperName() + else: + return self._GetStandaloneBinaryPath() + + def GetWrapperName(self): + """Returns the directory name of the bundle represented by this target. + Only valid for bundles.""" + assert self._IsBundle() + return self.GetProductName() + self.GetWrapperExtension() + + def GetBundleContentsFolderPath(self): + """Returns the qualified path to the bundle's contents folder. E.g. + Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles.""" + if self.isIOS: + return self.GetWrapperName() + assert self._IsBundle() + if self.spec['type'] == 'shared_library': + return os.path.join( + self.GetWrapperName(), 'Versions', self.GetFrameworkVersion()) + else: + # loadable_modules have a 'Contents' folder like executables. + return os.path.join(self.GetWrapperName(), 'Contents') + + def GetBundleResourceFolder(self): + """Returns the qualified path to the bundle's resource folder. E.g. + Chromium.app/Contents/Resources. Only valid for bundles.""" + assert self._IsBundle() + if self.isIOS: + return self.GetBundleContentsFolderPath() + return os.path.join(self.GetBundleContentsFolderPath(), 'Resources') + + def GetBundleExecutableFolderPath(self): + """Returns the qualified path to the bundle's executables folder. E.g. + Chromium.app/Contents/MacOS. Only valid for bundles.""" + assert self._IsBundle() + if self.spec['type'] in ('shared_library') or self.isIOS: + return self.GetBundleContentsFolderPath() + elif self.spec['type'] in ('executable', 'loadable_module'): + return os.path.join(self.GetBundleContentsFolderPath(), 'MacOS') + + def GetBundleJavaFolderPath(self): + """Returns the qualified path to the bundle's Java resource folder. + E.g. Chromium.app/Contents/Resources/Java. Only valid for bundles.""" + assert self._IsBundle() + return os.path.join(self.GetBundleResourceFolder(), 'Java') + + def GetBundleFrameworksFolderPath(self): + """Returns the qualified path to the bundle's frameworks folder. E.g, + Chromium.app/Contents/Frameworks. Only valid for bundles.""" + assert self._IsBundle() + return os.path.join(self.GetBundleContentsFolderPath(), 'Frameworks') + + def GetBundleSharedFrameworksFolderPath(self): + """Returns the qualified path to the bundle's frameworks folder. E.g, + Chromium.app/Contents/SharedFrameworks. Only valid for bundles.""" + assert self._IsBundle() + return os.path.join(self.GetBundleContentsFolderPath(), + 'SharedFrameworks') + + def GetBundleSharedSupportFolderPath(self): + """Returns the qualified path to the bundle's shared support folder. E.g, + Chromium.app/Contents/SharedSupport. Only valid for bundles.""" + assert self._IsBundle() + if self.spec['type'] == 'shared_library': + return self.GetBundleResourceFolder() + else: + return os.path.join(self.GetBundleContentsFolderPath(), + 'SharedSupport') + + def GetBundlePlugInsFolderPath(self): + """Returns the qualified path to the bundle's plugins folder. E.g, + Chromium.app/Contents/PlugIns. Only valid for bundles.""" + assert self._IsBundle() + return os.path.join(self.GetBundleContentsFolderPath(), 'PlugIns') + + def GetBundleXPCServicesFolderPath(self): + """Returns the qualified path to the bundle's XPC services folder. E.g, + Chromium.app/Contents/XPCServices. Only valid for bundles.""" + assert self._IsBundle() + return os.path.join(self.GetBundleContentsFolderPath(), 'XPCServices') + + def GetBundlePlistPath(self): + """Returns the qualified path to the bundle's plist file. E.g. + Chromium.app/Contents/Info.plist. Only valid for bundles.""" + assert self._IsBundle() + if self.spec['type'] in ('executable', 'loadable_module') or \ + self.IsIosFramework(): + return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist') + else: + return os.path.join(self.GetBundleContentsFolderPath(), + 'Resources', 'Info.plist') + + def GetProductType(self): + """Returns the PRODUCT_TYPE of this target.""" + if self._IsIosAppExtension(): + assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle ' + '(target %s)' % self.spec['target_name']) + return 'com.apple.product-type.app-extension' + if self._IsIosWatchKitExtension(): + assert self._IsBundle(), ('ios_watchkit_extension flag requires ' + 'mac_bundle (target %s)' % self.spec['target_name']) + return 'com.apple.product-type.watchkit-extension' + if self._IsIosWatchApp(): + assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle ' + '(target %s)' % self.spec['target_name']) + return 'com.apple.product-type.application.watchapp' + if self._IsXCUiTest(): + assert self._IsBundle(), ('mac_xcuitest_bundle flag requires mac_bundle ' + '(target %s)' % self.spec['target_name']) + return 'com.apple.product-type.bundle.ui-testing' + if self._IsBundle(): + return { + 'executable': 'com.apple.product-type.application', + 'loadable_module': 'com.apple.product-type.bundle', + 'shared_library': 'com.apple.product-type.framework', + }[self.spec['type']] + else: + return { + 'executable': 'com.apple.product-type.tool', + 'loadable_module': 'com.apple.product-type.library.dynamic', + 'shared_library': 'com.apple.product-type.library.dynamic', + 'static_library': 'com.apple.product-type.library.static', + }[self.spec['type']] + + def GetMachOType(self): + """Returns the MACH_O_TYPE of this target.""" + # Weird, but matches Xcode. + if not self._IsBundle() and self.spec['type'] == 'executable': + return '' + return { + 'executable': 'mh_execute', + 'static_library': 'staticlib', + 'shared_library': 'mh_dylib', + 'loadable_module': 'mh_bundle', + }[self.spec['type']] + + def _GetBundleBinaryPath(self): + """Returns the name of the bundle binary of by this target. + E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles.""" + assert self._IsBundle() + return os.path.join(self.GetBundleExecutableFolderPath(), \ + self.GetExecutableName()) + + def _GetStandaloneExecutableSuffix(self): + if 'product_extension' in self.spec: + return '.' + self.spec['product_extension'] + return { + 'executable': '', + 'static_library': '.a', + 'shared_library': '.dylib', + 'loadable_module': '.so', + }[self.spec['type']] + + def _GetStandaloneExecutablePrefix(self): + return self.spec.get('product_prefix', { + 'executable': '', + 'static_library': 'lib', + 'shared_library': 'lib', + # Non-bundled loadable_modules are called foo.so for some reason + # (that is, .so and no prefix) with the xcode build -- match that. + 'loadable_module': '', + }[self.spec['type']]) + + def _GetStandaloneBinaryPath(self): + """Returns the name of the non-bundle binary represented by this target. + E.g. hello_world. Only valid for non-bundles.""" + assert not self._IsBundle() + assert self.spec['type'] in ( + 'executable', 'shared_library', 'static_library', 'loadable_module'), ( + 'Unexpected type %s' % self.spec['type']) + target = self.spec['target_name'] + if self.spec['type'] == 'static_library': + if target[:3] == 'lib': + target = target[3:] + elif self.spec['type'] in ('loadable_module', 'shared_library'): + if target[:3] == 'lib': + target = target[3:] + + target_prefix = self._GetStandaloneExecutablePrefix() + target = self.spec.get('product_name', target) + target_ext = self._GetStandaloneExecutableSuffix() + return target_prefix + target + target_ext + + def GetExecutableName(self): + """Returns the executable name of the bundle represented by this target. + E.g. Chromium.""" + if self._IsBundle(): + return self.spec.get('product_name', self.spec['target_name']) + else: + return self._GetStandaloneBinaryPath() + + def GetExecutablePath(self): + """Returns the qualified path to the primary executable of the bundle + represented by this target. E.g. Chromium.app/Contents/MacOS/Chromium.""" + if self._IsBundle(): + return self._GetBundleBinaryPath() + else: + return self._GetStandaloneBinaryPath() + + def GetActiveArchs(self, configname): + """Returns the architectures this target should be built for.""" + config_settings = self.xcode_settings[configname] + xcode_archs_default = GetXcodeArchsDefault() + return xcode_archs_default.ActiveArchs( + config_settings.get('ARCHS'), + config_settings.get('VALID_ARCHS'), + config_settings.get('SDKROOT')) + + def _GetSdkVersionInfoItem(self, sdk, infoitem): + # xcodebuild requires Xcode and can't run on Command Line Tools-only + # systems from 10.7 onward. + # Since the CLT has no SDK paths anyway, returning None is the + # most sensible route and should still do the right thing. + try: + return GetStdout(['xcrun', '--sdk', sdk, infoitem]) + except: + pass + + def _SdkRoot(self, configname): + if configname is None: + configname = self.configname + return self.GetPerConfigSetting('SDKROOT', configname, default='') + + def _XcodePlatformPath(self, configname=None): + sdk_root = self._SdkRoot(configname) + if sdk_root not in XcodeSettings._platform_path_cache: + platform_path = self._GetSdkVersionInfoItem(sdk_root, + '--show-sdk-platform-path') + XcodeSettings._platform_path_cache[sdk_root] = platform_path + return XcodeSettings._platform_path_cache[sdk_root] + + def _SdkPath(self, configname=None): + sdk_root = self._SdkRoot(configname) + if sdk_root.startswith('/'): + return sdk_root + return self._XcodeSdkPath(sdk_root) + + def _XcodeSdkPath(self, sdk_root): + if sdk_root not in XcodeSettings._sdk_path_cache: + sdk_path = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-path') + XcodeSettings._sdk_path_cache[sdk_root] = sdk_path + if sdk_root: + XcodeSettings._sdk_root_cache[sdk_path] = sdk_root + return XcodeSettings._sdk_path_cache[sdk_root] + + def _AppendPlatformVersionMinFlags(self, lst): + self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s') + if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings(): + # TODO: Implement this better? + sdk_path_basename = os.path.basename(self._SdkPath()) + if sdk_path_basename.lower().startswith('iphonesimulator'): + self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET', + '-mios-simulator-version-min=%s') + else: + self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET', + '-miphoneos-version-min=%s') + + def GetCflags(self, configname, arch=None): + """Returns flags that need to be added to .c, .cc, .m, and .mm + compilations.""" + # This functions (and the similar ones below) do not offer complete + # emulation of all xcode_settings keys. They're implemented on demand. + + self.configname = configname + cflags = [] + + sdk_root = self._SdkPath() + if 'SDKROOT' in self._Settings() and sdk_root: + cflags.append('-isysroot %s' % sdk_root) + + if self.header_map_path: + cflags.append('-I%s' % self.header_map_path) + + if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'): + cflags.append('-Wconstant-conversion') + + if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'): + cflags.append('-funsigned-char') + + if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'): + cflags.append('-fasm-blocks') + + if 'GCC_DYNAMIC_NO_PIC' in self._Settings(): + if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES': + cflags.append('-mdynamic-no-pic') + else: + pass + # TODO: In this case, it depends on the target. xcode passes + # mdynamic-no-pic by default for executable and possibly static lib + # according to mento + + if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'): + cflags.append('-mpascal-strings') + + self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s') + + if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'): + dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf') + if dbg_format == 'dwarf': + cflags.append('-gdwarf-2') + elif dbg_format == 'stabs': + raise NotImplementedError('stabs debug format is not supported yet.') + elif dbg_format == 'dwarf-with-dsym': + cflags.append('-gdwarf-2') + else: + raise NotImplementedError('Unknown debug format %s' % dbg_format) + + if self._Settings().get('GCC_STRICT_ALIASING') == 'YES': + cflags.append('-fstrict-aliasing') + elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO': + cflags.append('-fno-strict-aliasing') + + if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'): + cflags.append('-fvisibility=hidden') + + if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'): + cflags.append('-Werror') + + if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'): + cflags.append('-Wnewline-eof') + + # In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or + # llvm-gcc. It also requires a fairly recent libtool, and + # if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the + # path to the libLTO.dylib that matches the used clang. + if self._Test('LLVM_LTO', 'YES', default='NO'): + cflags.append('-flto') + + self._AppendPlatformVersionMinFlags(cflags) + + # TODO: + if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'): + self._WarnUnimplemented('COPY_PHASE_STRIP') + self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS') + self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS') + + # TODO: This is exported correctly, but assigning to it is not supported. + self._WarnUnimplemented('MACH_O_TYPE') + self._WarnUnimplemented('PRODUCT_TYPE') + + if arch is not None: + archs = [arch] + else: + assert self.configname + archs = self.GetActiveArchs(self.configname) + if len(archs) != 1: + # TODO: Supporting fat binaries will be annoying. + self._WarnUnimplemented('ARCHS') + archs = ['i386'] + cflags.append('-arch ' + archs[0]) + + if archs[0] in ('i386', 'x86_64'): + if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'): + cflags.append('-msse3') + if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES', + default='NO'): + cflags.append('-mssse3') # Note 3rd 's'. + if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'): + cflags.append('-msse4.1') + if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'): + cflags.append('-msse4.2') + + cflags += self._Settings().get('WARNING_CFLAGS', []) + + platform_root = self._XcodePlatformPath(configname) + if platform_root and self._IsXCTest(): + cflags.append('-F' + platform_root + '/Developer/Library/Frameworks/') + + if sdk_root: + framework_root = sdk_root + else: + framework_root = '' + config = self.spec['configurations'][self.configname] + framework_dirs = config.get('mac_framework_dirs', []) + for directory in framework_dirs: + cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root)) + + self.configname = None + return cflags + + def GetCflagsC(self, configname): + """Returns flags that need to be added to .c, and .m compilations.""" + self.configname = configname + cflags_c = [] + if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi': + cflags_c.append('-ansi') + else: + self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s') + cflags_c += self._Settings().get('OTHER_CFLAGS', []) + self.configname = None + return cflags_c + + def GetCflagsCC(self, configname): + """Returns flags that need to be added to .cc, and .mm compilations.""" + self.configname = configname + cflags_cc = [] + + clang_cxx_language_standard = self._Settings().get( + 'CLANG_CXX_LANGUAGE_STANDARD') + # Note: Don't make c++0x to c++11 so that c++0x can be used with older + # clangs that don't understand c++11 yet (like Xcode 4.2's). + if clang_cxx_language_standard: + cflags_cc.append('-std=%s' % clang_cxx_language_standard) + + self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s') + + if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'): + cflags_cc.append('-fno-rtti') + if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'): + cflags_cc.append('-fno-exceptions') + if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'): + cflags_cc.append('-fvisibility-inlines-hidden') + if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'): + cflags_cc.append('-fno-threadsafe-statics') + # Note: This flag is a no-op for clang, it only has an effect for gcc. + if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'): + cflags_cc.append('-Wno-invalid-offsetof') + + other_ccflags = [] + + for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']): + # TODO: More general variable expansion. Missing in many other places too. + if flag in ('$inherited', '$(inherited)', '${inherited}'): + flag = '$OTHER_CFLAGS' + if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'): + other_ccflags += self._Settings().get('OTHER_CFLAGS', []) + else: + other_ccflags.append(flag) + cflags_cc += other_ccflags + + self.configname = None + return cflags_cc + + def _AddObjectiveCGarbageCollectionFlags(self, flags): + gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported') + if gc_policy == 'supported': + flags.append('-fobjc-gc') + elif gc_policy == 'required': + flags.append('-fobjc-gc-only') + + def _AddObjectiveCARCFlags(self, flags): + if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'): + flags.append('-fobjc-arc') + + def _AddObjectiveCMissingPropertySynthesisFlags(self, flags): + if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS', + 'YES', default='NO'): + flags.append('-Wobjc-missing-property-synthesis') + + def GetCflagsObjC(self, configname): + """Returns flags that need to be added to .m compilations.""" + self.configname = configname + cflags_objc = [] + self._AddObjectiveCGarbageCollectionFlags(cflags_objc) + self._AddObjectiveCARCFlags(cflags_objc) + self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc) + self.configname = None + return cflags_objc + + def GetCflagsObjCC(self, configname): + """Returns flags that need to be added to .mm compilations.""" + self.configname = configname + cflags_objcc = [] + self._AddObjectiveCGarbageCollectionFlags(cflags_objcc) + self._AddObjectiveCARCFlags(cflags_objcc) + self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc) + if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'): + cflags_objcc.append('-fobjc-call-cxx-cdtors') + self.configname = None + return cflags_objcc + + def GetInstallNameBase(self): + """Return DYLIB_INSTALL_NAME_BASE for this target.""" + # Xcode sets this for shared_libraries, and for nonbundled loadable_modules. + if (self.spec['type'] != 'shared_library' and + (self.spec['type'] != 'loadable_module' or self._IsBundle())): + return None + install_base = self.GetPerTargetSetting( + 'DYLIB_INSTALL_NAME_BASE', + default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib') + return install_base + + def _StandardizePath(self, path): + """Do :standardizepath processing for path.""" + # I'm not quite sure what :standardizepath does. Just call normpath(), + # but don't let @executable_path/../foo collapse to foo. + if '/' in path: + prefix, rest = '', path + if path.startswith('@'): + prefix, rest = path.split('/', 1) + rest = os.path.normpath(rest) # :standardizepath + path = os.path.join(prefix, rest) + return path + + def GetInstallName(self): + """Return LD_DYLIB_INSTALL_NAME for this target.""" + # Xcode sets this for shared_libraries, and for nonbundled loadable_modules. + if (self.spec['type'] != 'shared_library' and + (self.spec['type'] != 'loadable_module' or self._IsBundle())): + return None + + default_install_name = \ + '$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)' + install_name = self.GetPerTargetSetting( + 'LD_DYLIB_INSTALL_NAME', default=default_install_name) + + # Hardcode support for the variables used in chromium for now, to + # unblock people using the make build. + if '$' in install_name: + assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/' + '$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), ( + 'Variables in LD_DYLIB_INSTALL_NAME are not generally supported ' + 'yet in target \'%s\' (got \'%s\')' % + (self.spec['target_name'], install_name)) + + install_name = install_name.replace( + '$(DYLIB_INSTALL_NAME_BASE:standardizepath)', + self._StandardizePath(self.GetInstallNameBase())) + if self._IsBundle(): + # These are only valid for bundles, hence the |if|. + install_name = install_name.replace( + '$(WRAPPER_NAME)', self.GetWrapperName()) + install_name = install_name.replace( + '$(PRODUCT_NAME)', self.GetProductName()) + else: + assert '$(WRAPPER_NAME)' not in install_name + assert '$(PRODUCT_NAME)' not in install_name + + install_name = install_name.replace( + '$(EXECUTABLE_PATH)', self.GetExecutablePath()) + return install_name + + def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path): + """Checks if ldflag contains a filename and if so remaps it from + gyp-directory-relative to build-directory-relative.""" + # This list is expanded on demand. + # They get matched as: + # -exported_symbols_list file + # -Wl,exported_symbols_list file + # -Wl,exported_symbols_list,file + LINKER_FILE = r'(\S+)' + WORD = r'\S+' + linker_flags = [ + ['-exported_symbols_list', LINKER_FILE], # Needed for NaCl. + ['-unexported_symbols_list', LINKER_FILE], + ['-reexported_symbols_list', LINKER_FILE], + ['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting. + ] + for flag_pattern in linker_flags: + regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern)) + m = regex.match(ldflag) + if m: + ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \ + ldflag[m.end(1):] + # Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS, + # TODO(thakis): Update ffmpeg.gyp): + if ldflag.startswith('-L'): + ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):]) + return ldflag + + def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None): + """Returns flags that need to be passed to the linker. + + Args: + configname: The name of the configuration to get ld flags for. + product_dir: The directory where products such static and dynamic + libraries are placed. This is added to the library search path. + gyp_to_build_path: A function that converts paths relative to the + current gyp file to paths relative to the build direcotry. + """ + self.configname = configname + ldflags = [] + + # The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS + # can contain entries that depend on this. Explicitly absolutify these. + for ldflag in self._Settings().get('OTHER_LDFLAGS', []): + ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path)) + + if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'): + ldflags.append('-Wl,-dead_strip') + + if self._Test('PREBINDING', 'YES', default='NO'): + ldflags.append('-Wl,-prebind') + + self._Appendf( + ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s') + self._Appendf( + ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s') + + self._AppendPlatformVersionMinFlags(ldflags) + + if 'SDKROOT' in self._Settings() and self._SdkPath(): + ldflags.append('-isysroot ' + self._SdkPath()) + + for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []): + ldflags.append('-L' + gyp_to_build_path(library_path)) + + if 'ORDER_FILE' in self._Settings(): + ldflags.append('-Wl,-order_file ' + + '-Wl,' + gyp_to_build_path( + self._Settings()['ORDER_FILE'])) + + if arch is not None: + archs = [arch] + else: + assert self.configname + archs = self.GetActiveArchs(self.configname) + if len(archs) != 1: + # TODO: Supporting fat binaries will be annoying. + self._WarnUnimplemented('ARCHS') + archs = ['i386'] + ldflags.append('-arch ' + archs[0]) + + # Xcode adds the product directory by default. + # Rewrite -L. to -L./ to work around http://www.openradar.me/25313838 + ldflags.append('-L' + (product_dir if product_dir != '.' else './')) + + install_name = self.GetInstallName() + if install_name and self.spec['type'] != 'loadable_module': + ldflags.append('-install_name ' + install_name.replace(' ', r'\ ')) + + for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []): + ldflags.append('-Wl,-rpath,' + rpath) + + sdk_root = self._SdkPath() + if not sdk_root: + sdk_root = '' + config = self.spec['configurations'][self.configname] + framework_dirs = config.get('mac_framework_dirs', []) + for directory in framework_dirs: + ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root)) + + platform_root = self._XcodePlatformPath(configname) + if sdk_root and platform_root and self._IsXCTest(): + ldflags.append('-F' + platform_root + '/Developer/Library/Frameworks/') + ldflags.append('-framework XCTest') + + is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension() + if sdk_root and is_extension: + # Adds the link flags for extensions. These flags are common for all + # extensions and provide loader and main function. + # These flags reflect the compilation options used by xcode to compile + # extensions. + if XcodeVersion() < '0900': + ldflags.append('-lpkstart') + ldflags.append(sdk_root + + '/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit') + else: + ldflags.append('-e _NSExtensionMain') + ldflags.append('-fapplication-extension') + + self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s') + + self.configname = None + return ldflags + + def GetLibtoolflags(self, configname): + """Returns flags that need to be passed to the static linker. + + Args: + configname: The name of the configuration to get ld flags for. + """ + self.configname = configname + libtoolflags = [] + + for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []): + libtoolflags.append(libtoolflag) + # TODO(thakis): ARCHS? + + self.configname = None + return libtoolflags + + def GetPerTargetSettings(self): + """Gets a list of all the per-target settings. This will only fetch keys + whose values are the same across all configurations.""" + first_pass = True + result = {} + for configname in sorted(self.xcode_settings.keys()): + if first_pass: + result = dict(self.xcode_settings[configname]) + first_pass = False + else: + for key, value in self.xcode_settings[configname].iteritems(): + if key not in result: + continue + elif result[key] != value: + del result[key] + return result + + def GetPerConfigSetting(self, setting, configname, default=None): + if configname in self.xcode_settings: + return self.xcode_settings[configname].get(setting, default) + else: + return self.GetPerTargetSetting(setting, default) + + def GetPerTargetSetting(self, setting, default=None): + """Tries to get xcode_settings.setting from spec. Assumes that the setting + has the same value in all configurations and throws otherwise.""" + is_first_pass = True + result = None + for configname in sorted(self.xcode_settings.keys()): + if is_first_pass: + result = self.xcode_settings[configname].get(setting, None) + is_first_pass = False + else: + assert result == self.xcode_settings[configname].get(setting, None), ( + "Expected per-target setting for '%s', got per-config setting " + "(target %s)" % (setting, self.spec['target_name'])) + if result is None: + return default + return result + + def _GetStripPostbuilds(self, configname, output_binary, quiet): + """Returns a list of shell commands that contain the shell commands + neccessary to strip this target's binary. These should be run as postbuilds + before the actual postbuilds run.""" + self.configname = configname + + result = [] + if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and + self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')): + + default_strip_style = 'debugging' + if ((self.spec['type'] == 'loadable_module' or self._IsIosAppExtension()) + and self._IsBundle()): + default_strip_style = 'non-global' + elif self.spec['type'] == 'executable': + default_strip_style = 'all' + + strip_style = self._Settings().get('STRIP_STYLE', default_strip_style) + strip_flags = { + 'all': '', + 'non-global': '-x', + 'debugging': '-S', + }[strip_style] + + explicit_strip_flags = self._Settings().get('STRIPFLAGS', '') + if explicit_strip_flags: + strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags) + + if not quiet: + result.append('echo STRIP\\(%s\\)' % self.spec['target_name']) + result.append('strip %s %s' % (strip_flags, output_binary)) + + self.configname = None + return result + + def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet): + """Returns a list of shell commands that contain the shell commands + neccessary to massage this target's debug information. These should be run + as postbuilds before the actual postbuilds run.""" + self.configname = configname + + # For static libraries, no dSYMs are created. + result = [] + if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and + self._Test( + 'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and + self.spec['type'] != 'static_library'): + if not quiet: + result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name']) + result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM')) + + self.configname = None + return result + + def _GetTargetPostbuilds(self, configname, output, output_binary, + quiet=False): + """Returns a list of shell commands that contain the shell commands + to run as postbuilds for this target, before the actual postbuilds.""" + # dSYMs need to build before stripping happens. + return ( + self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) + + self._GetStripPostbuilds(configname, output_binary, quiet)) + + def _GetIOSPostbuilds(self, configname, output_binary): + """Return a shell command to codesign the iOS output binary so it can + be deployed to a device. This should be run as the very last step of the + build.""" + if not (self.isIOS and + (self.spec['type'] == 'executable' or self._IsXCTest()) or + self.IsIosFramework()): + return [] + + postbuilds = [] + product_name = self.GetFullProductName() + settings = self.xcode_settings[configname] + + # Xcode expects XCTests to be copied into the TEST_HOST dir. + if self._IsXCTest(): + source = os.path.join("${BUILT_PRODUCTS_DIR}", product_name) + test_host = os.path.dirname(settings.get('TEST_HOST')); + xctest_destination = os.path.join(test_host, 'PlugIns', product_name) + postbuilds.extend(['ditto %s %s' % (source, xctest_destination)]) + + key = self._GetIOSCodeSignIdentityKey(settings) + if not key: + return postbuilds + + # Warn for any unimplemented signing xcode keys. + unimpl = ['OTHER_CODE_SIGN_FLAGS'] + unimpl = set(unimpl) & set(self.xcode_settings[configname].keys()) + if unimpl: + print 'Warning: Some codesign keys not implemented, ignoring: %s' % ( + ', '.join(sorted(unimpl))) + + if self._IsXCTest(): + # For device xctests, Xcode copies two extra frameworks into $TEST_HOST. + test_host = os.path.dirname(settings.get('TEST_HOST')); + frameworks_dir = os.path.join(test_host, 'Frameworks') + platform_root = self._XcodePlatformPath(configname) + frameworks = \ + ['Developer/Library/PrivateFrameworks/IDEBundleInjection.framework', + 'Developer/Library/Frameworks/XCTest.framework'] + for framework in frameworks: + source = os.path.join(platform_root, framework) + destination = os.path.join(frameworks_dir, os.path.basename(framework)) + postbuilds.extend(['ditto %s %s' % (source, destination)]) + + # Then re-sign everything with 'preserve=True' + postbuilds.extend(['%s code-sign-bundle "%s" "%s" "%s" "%s" %s' % ( + os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key, + settings.get('CODE_SIGN_ENTITLEMENTS', ''), + settings.get('PROVISIONING_PROFILE', ''), destination, True) + ]) + plugin_dir = os.path.join(test_host, 'PlugIns') + targets = [os.path.join(plugin_dir, product_name), test_host] + for target in targets: + postbuilds.extend(['%s code-sign-bundle "%s" "%s" "%s" "%s" %s' % ( + os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key, + settings.get('CODE_SIGN_ENTITLEMENTS', ''), + settings.get('PROVISIONING_PROFILE', ''), target, True) + ]) + + postbuilds.extend(['%s code-sign-bundle "%s" "%s" "%s" "%s" %s' % ( + os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key, + settings.get('CODE_SIGN_ENTITLEMENTS', ''), + settings.get('PROVISIONING_PROFILE', ''), + os.path.join("${BUILT_PRODUCTS_DIR}", product_name), False) + ]) + return postbuilds + + def _GetIOSCodeSignIdentityKey(self, settings): + identity = settings.get('CODE_SIGN_IDENTITY') + if not identity: + return None + if identity not in XcodeSettings._codesigning_key_cache: + output = subprocess.check_output( + ['security', 'find-identity', '-p', 'codesigning', '-v']) + for line in output.splitlines(): + if identity in line: + fingerprint = line.split()[1] + cache = XcodeSettings._codesigning_key_cache + assert identity not in cache or fingerprint == cache[identity], ( + "Multiple codesigning fingerprints for identity: %s" % identity) + XcodeSettings._codesigning_key_cache[identity] = fingerprint + return XcodeSettings._codesigning_key_cache.get(identity, '') + + def AddImplicitPostbuilds(self, configname, output, output_binary, + postbuilds=[], quiet=False): + """Returns a list of shell commands that should run before and after + |postbuilds|.""" + assert output_binary is not None + pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet) + post = self._GetIOSPostbuilds(configname, output_binary) + return pre + postbuilds + post + + def _AdjustLibrary(self, library, config_name=None): + if library.endswith('.framework'): + l = '-framework ' + os.path.splitext(os.path.basename(library))[0] + else: + m = self.library_re.match(library) + if m: + l = '-l' + m.group(1) + else: + l = library + + sdk_root = self._SdkPath(config_name) + if not sdk_root: + sdk_root = '' + # Xcode 7 started shipping with ".tbd" (text based stubs) files instead of + # ".dylib" without providing a real support for them. What it does, for + # "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the + # library order and cause collision when building Chrome. + # + # Instead substitude ".tbd" to ".dylib" in the generated project when the + # following conditions are both true: + # - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib", + # - the ".dylib" file does not exists but a ".tbd" file do. + library = l.replace('$(SDKROOT)', sdk_root) + if l.startswith('$(SDKROOT)'): + basename, ext = os.path.splitext(library) + if ext == '.dylib' and not os.path.exists(library): + tbd_library = basename + '.tbd' + if os.path.exists(tbd_library): + library = tbd_library + return library + + def AdjustLibraries(self, libraries, config_name=None): + """Transforms entries like 'Cocoa.framework' in libraries into entries like + '-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc. + """ + libraries = [self._AdjustLibrary(library, config_name) + for library in libraries] + return libraries + + def _BuildMachineOSBuild(self): + return GetStdout(['sw_vers', '-buildVersion']) + + def _XcodeIOSDeviceFamily(self, configname): + family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1') + return [int(x) for x in family.split(',')] + + def GetExtraPlistItems(self, configname=None): + """Returns a dictionary with extra items to insert into Info.plist.""" + if configname not in XcodeSettings._plist_cache: + cache = {} + cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild() + + xcode, xcode_build = XcodeVersion() + cache['DTXcode'] = xcode + cache['DTXcodeBuild'] = xcode_build + compiler = self.xcode_settings[configname].get('GCC_VERSION') + if compiler is not None: + cache['DTCompiler'] = compiler + + sdk_root = self._SdkRoot(configname) + if not sdk_root: + sdk_root = self._DefaultSdkRoot() + sdk_version = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-version') + cache['DTSDKName'] = sdk_root + (sdk_version or '') + if xcode >= '0720': + cache['DTSDKBuild'] = self._GetSdkVersionInfoItem( + sdk_root, '--show-sdk-build-version') + elif xcode >= '0430': + cache['DTSDKBuild'] = sdk_version + else: + cache['DTSDKBuild'] = cache['BuildMachineOSBuild'] + + if self.isIOS: + cache['MinimumOSVersion'] = self.xcode_settings[configname].get( + 'IPHONEOS_DEPLOYMENT_TARGET') + cache['DTPlatformName'] = sdk_root + cache['DTPlatformVersion'] = sdk_version + + if configname.endswith("iphoneos"): + cache['CFBundleSupportedPlatforms'] = ['iPhoneOS'] + cache['DTPlatformBuild'] = cache['DTSDKBuild'] + else: + cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator'] + # This is weird, but Xcode sets DTPlatformBuild to an empty field + # for simulator builds. + cache['DTPlatformBuild'] = "" + XcodeSettings._plist_cache[configname] = cache + + # Include extra plist items that are per-target, not per global + # XcodeSettings. + items = dict(XcodeSettings._plist_cache[configname]) + if self.isIOS: + items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname) + return items + + def _DefaultSdkRoot(self): + """Returns the default SDKROOT to use. + + Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode + project, then the environment variable was empty. Starting with this + version, Xcode uses the name of the newest SDK installed. + """ + xcode_version, xcode_build = XcodeVersion() + if xcode_version < '0500': + return '' + default_sdk_path = self._XcodeSdkPath('') + default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path) + if default_sdk_root: + return default_sdk_root + try: + all_sdks = GetStdout(['xcodebuild', '-showsdks']) + except: + # If xcodebuild fails, there will be no valid SDKs + return '' + for line in all_sdks.splitlines(): + items = line.split() + if len(items) >= 3 and items[-2] == '-sdk': + sdk_root = items[-1] + sdk_path = self._XcodeSdkPath(sdk_root) + if sdk_path == default_sdk_path: + return sdk_root + return '' + + +class MacPrefixHeader(object): + """A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature. + + This feature consists of several pieces: + * If GCC_PREFIX_HEADER is present, all compilations in that project get an + additional |-include path_to_prefix_header| cflag. + * If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is + instead compiled, and all other compilations in the project get an + additional |-include path_to_compiled_header| instead. + + Compiled prefix headers have the extension gch. There is one gch file for + every language used in the project (c, cc, m, mm), since gch files for + different languages aren't compatible. + + gch files themselves are built with the target's normal cflags, but they + obviously don't get the |-include| flag. Instead, they need a -x flag that + describes their language. + + All o files in the target need to depend on the gch file, to make sure + it's built before any o file is built. + + This class helps with some of these tasks, but it needs help from the build + system for writing dependencies to the gch files, for writing build commands + for the gch files, and for figuring out the location of the gch files. + """ + def __init__(self, xcode_settings, + gyp_path_to_build_path, gyp_path_to_build_output): + """If xcode_settings is None, all methods on this class are no-ops. + + Args: + gyp_path_to_build_path: A function that takes a gyp-relative path, + and returns a path relative to the build directory. + gyp_path_to_build_output: A function that takes a gyp-relative path and + a language code ('c', 'cc', 'm', or 'mm'), and that returns a path + to where the output of precompiling that path for that language + should be placed (without the trailing '.gch'). + """ + # This doesn't support per-configuration prefix headers. Good enough + # for now. + self.header = None + self.compile_headers = False + if xcode_settings: + self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER') + self.compile_headers = xcode_settings.GetPerTargetSetting( + 'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO' + self.compiled_headers = {} + if self.header: + if self.compile_headers: + for lang in ['c', 'cc', 'm', 'mm']: + self.compiled_headers[lang] = gyp_path_to_build_output( + self.header, lang) + self.header = gyp_path_to_build_path(self.header) + + def _CompiledHeader(self, lang, arch): + assert self.compile_headers + h = self.compiled_headers[lang] + if arch: + h += '.' + arch + return h + + def GetInclude(self, lang, arch=None): + """Gets the cflags to include the prefix header for language |lang|.""" + if self.compile_headers and lang in self.compiled_headers: + return '-include %s' % self._CompiledHeader(lang, arch) + elif self.header: + return '-include %s' % self.header + else: + return '' + + def _Gch(self, lang, arch): + """Returns the actual file name of the prefix header for language |lang|.""" + assert self.compile_headers + return self._CompiledHeader(lang, arch) + '.gch' + + def GetObjDependencies(self, sources, objs, arch=None): + """Given a list of source files and the corresponding object files, returns + a list of (source, object, gch) tuples, where |gch| is the build-directory + relative path to the gch file each object file depends on. |compilable[i]| + has to be the source file belonging to |objs[i]|.""" + if not self.header or not self.compile_headers: + return [] + + result = [] + for source, obj in zip(sources, objs): + ext = os.path.splitext(source)[1] + lang = { + '.c': 'c', + '.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc', + '.m': 'm', + '.mm': 'mm', + }.get(ext, None) + if lang: + result.append((source, obj, self._Gch(lang, arch))) + return result + + def GetPchBuildCommands(self, arch=None): + """Returns [(path_to_gch, language_flag, language, header)]. + |path_to_gch| and |header| are relative to the build directory. + """ + if not self.header or not self.compile_headers: + return [] + return [ + (self._Gch('c', arch), '-x c-header', 'c', self.header), + (self._Gch('cc', arch), '-x c++-header', 'cc', self.header), + (self._Gch('m', arch), '-x objective-c-header', 'm', self.header), + (self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header), + ] + + +def XcodeVersion(): + """Returns a tuple of version and build version of installed Xcode.""" + # `xcodebuild -version` output looks like + # Xcode 4.6.3 + # Build version 4H1503 + # or like + # Xcode 3.2.6 + # Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0 + # BuildVersion: 10M2518 + # Convert that to '0463', '4H1503'. + global XCODE_VERSION_CACHE + if XCODE_VERSION_CACHE: + return XCODE_VERSION_CACHE + try: + version_list = GetStdout(['xcodebuild', '-version']).splitlines() + # In some circumstances xcodebuild exits 0 but doesn't return + # the right results; for example, a user on 10.7 or 10.8 with + # a bogus path set via xcode-select + # In that case this may be a CLT-only install so fall back to + # checking that version. + if len(version_list) < 2: + raise GypError("xcodebuild returned unexpected results") + except: + version = CLTVersion() + if version: + version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0] + else: + raise GypError("No Xcode or CLT version detected!") + # The CLT has no build information, so we return an empty string. + version_list = [version, ''] + version = version_list[0] + build = version_list[-1] + # Be careful to convert "4.2" to "0420": + version = version.split()[-1].replace('.', '') + version = (version + '0' * (3 - len(version))).zfill(4) + if build: + build = build.split()[-1] + XCODE_VERSION_CACHE = (version, build) + return XCODE_VERSION_CACHE + + +# This function ported from the logic in Homebrew's CLT version check +def CLTVersion(): + """Returns the version of command-line tools from pkgutil.""" + # pkgutil output looks like + # package-id: com.apple.pkg.CLTools_Executables + # version: 5.0.1.0.1.1382131676 + # volume: / + # location: / + # install-time: 1382544035 + # groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group + STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo" + FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI" + MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables" + + regex = re.compile('version: (?P.+)') + for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]: + try: + output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key]) + return re.search(regex, output).groupdict()['version'] + except: + continue + + +def GetStdout(cmdlist): + """Returns the content of standard output returned by invoking |cmdlist|. + Raises |GypError| if the command return with a non-zero return code.""" + job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE) + out = job.communicate()[0] + if job.returncode != 0: + sys.stderr.write(out + '\n') + raise GypError('Error %d running %s' % (job.returncode, cmdlist[0])) + return out.rstrip('\n') + + +def MergeGlobalXcodeSettingsToSpec(global_dict, spec): + """Merges the global xcode_settings dictionary into each configuration of the + target represented by spec. For keys that are both in the global and the local + xcode_settings dict, the local key gets precendence. + """ + # The xcode generator special-cases global xcode_settings and does something + # that amounts to merging in the global xcode_settings into each local + # xcode_settings dict. + global_xcode_settings = global_dict.get('xcode_settings', {}) + for config in spec['configurations'].values(): + if 'xcode_settings' in config: + new_settings = global_xcode_settings.copy() + new_settings.update(config['xcode_settings']) + config['xcode_settings'] = new_settings + + +def IsMacBundle(flavor, spec): + """Returns if |spec| should be treated as a bundle. + + Bundles are directories with a certain subdirectory structure, instead of + just a single file. Bundle rules do not produce a binary but also package + resources into that directory.""" + is_mac_bundle = int(spec.get('mac_xctest_bundle', 0)) != 0 or \ + int(spec.get('mac_xcuitest_bundle', 0)) != 0 or \ + (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac') + + if is_mac_bundle: + assert spec['type'] != 'none', ( + 'mac_bundle targets cannot have type none (target "%s")' % + spec['target_name']) + return is_mac_bundle + + +def GetMacBundleResources(product_dir, xcode_settings, resources): + """Yields (output, resource) pairs for every resource in |resources|. + Only call this for mac bundle targets. + + Args: + product_dir: Path to the directory containing the output bundle, + relative to the build directory. + xcode_settings: The XcodeSettings of the current target. + resources: A list of bundle resources, relative to the build directory. + """ + dest = os.path.join(product_dir, + xcode_settings.GetBundleResourceFolder()) + for res in resources: + output = dest + + # The make generator doesn't support it, so forbid it everywhere + # to keep the generators more interchangable. + assert ' ' not in res, ( + "Spaces in resource filenames not supported (%s)" % res) + + # Split into (path,file). + res_parts = os.path.split(res) + + # Now split the path into (prefix,maybe.lproj). + lproj_parts = os.path.split(res_parts[0]) + # If the resource lives in a .lproj bundle, add that to the destination. + if lproj_parts[1].endswith('.lproj'): + output = os.path.join(output, lproj_parts[1]) + + output = os.path.join(output, res_parts[1]) + # Compiled XIB files are referred to by .nib. + if output.endswith('.xib'): + output = os.path.splitext(output)[0] + '.nib' + # Compiled storyboard files are referred to by .storyboardc. + if output.endswith('.storyboard'): + output = os.path.splitext(output)[0] + '.storyboardc' + + yield output, res + + +def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path): + """Returns (info_plist, dest_plist, defines, extra_env), where: + * |info_plist| is the source plist path, relative to the + build directory, + * |dest_plist| is the destination plist path, relative to the + build directory, + * |defines| is a list of preprocessor defines (empty if the plist + shouldn't be preprocessed, + * |extra_env| is a dict of env variables that should be exported when + invoking |mac_tool copy-info-plist|. + + Only call this for mac bundle targets. + + Args: + product_dir: Path to the directory containing the output bundle, + relative to the build directory. + xcode_settings: The XcodeSettings of the current target. + gyp_to_build_path: A function that converts paths relative to the + current gyp file to paths relative to the build direcotry. + """ + info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE') + if not info_plist: + return None, None, [], {} + + # The make generator doesn't support it, so forbid it everywhere + # to keep the generators more interchangable. + assert ' ' not in info_plist, ( + "Spaces in Info.plist filenames not supported (%s)" % info_plist) + + info_plist = gyp_path_to_build_path(info_plist) + + # If explicitly set to preprocess the plist, invoke the C preprocessor and + # specify any defines as -D flags. + if xcode_settings.GetPerTargetSetting( + 'INFOPLIST_PREPROCESS', default='NO') == 'YES': + # Create an intermediate file based on the path. + defines = shlex.split(xcode_settings.GetPerTargetSetting( + 'INFOPLIST_PREPROCESSOR_DEFINITIONS', default='')) + else: + defines = [] + + dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath()) + extra_env = xcode_settings.GetPerTargetSettings() + + return info_plist, dest_plist, defines, extra_env + + +def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, + additional_settings=None): + """Return the environment variables that Xcode would set. See + http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153 + for a full list. + + Args: + xcode_settings: An XcodeSettings object. If this is None, this function + returns an empty dict. + built_products_dir: Absolute path to the built products dir. + srcroot: Absolute path to the source root. + configuration: The build configuration name. + additional_settings: An optional dict with more values to add to the + result. + """ + + if not xcode_settings: return {} + + # This function is considered a friend of XcodeSettings, so let it reach into + # its implementation details. + spec = xcode_settings.spec + + # These are filled in on an as-needed basis. + env = { + 'BUILT_FRAMEWORKS_DIR' : built_products_dir, + 'BUILT_PRODUCTS_DIR' : built_products_dir, + 'CONFIGURATION' : configuration, + 'PRODUCT_NAME' : xcode_settings.GetProductName(), + # See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME + 'SRCROOT' : srcroot, + 'SOURCE_ROOT': '${SRCROOT}', + # This is not true for static libraries, but currently the env is only + # written for bundles: + 'TARGET_BUILD_DIR' : built_products_dir, + 'TEMP_DIR' : '${TMPDIR}', + 'XCODE_VERSION_ACTUAL' : XcodeVersion()[0], + } + if xcode_settings.GetPerConfigSetting('SDKROOT', configuration): + env['SDKROOT'] = xcode_settings._SdkPath(configuration) + else: + env['SDKROOT'] = '' + + if xcode_settings.mac_toolchain_dir: + env['DEVELOPER_DIR'] = xcode_settings.mac_toolchain_dir + + if spec['type'] in ( + 'executable', 'static_library', 'shared_library', 'loadable_module'): + env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName() + env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath() + env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName() + mach_o_type = xcode_settings.GetMachOType() + if mach_o_type: + env['MACH_O_TYPE'] = mach_o_type + env['PRODUCT_TYPE'] = xcode_settings.GetProductType() + if xcode_settings._IsBundle(): + # xcodeproj_file.py sets the same Xcode subfolder value for this as for + # FRAMEWORKS_FOLDER_PATH so Xcode builds will actually use FFP's value. + env['BUILT_FRAMEWORKS_DIR'] = \ + os.path.join(built_products_dir + os.sep \ + + xcode_settings.GetBundleFrameworksFolderPath()) + env['CONTENTS_FOLDER_PATH'] = \ + xcode_settings.GetBundleContentsFolderPath() + env['EXECUTABLE_FOLDER_PATH'] = \ + xcode_settings.GetBundleExecutableFolderPath() + env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \ + xcode_settings.GetBundleResourceFolder() + env['JAVA_FOLDER_PATH'] = xcode_settings.GetBundleJavaFolderPath() + env['FRAMEWORKS_FOLDER_PATH'] = \ + xcode_settings.GetBundleFrameworksFolderPath() + env['SHARED_FRAMEWORKS_FOLDER_PATH'] = \ + xcode_settings.GetBundleSharedFrameworksFolderPath() + env['SHARED_SUPPORT_FOLDER_PATH'] = \ + xcode_settings.GetBundleSharedSupportFolderPath() + env['PLUGINS_FOLDER_PATH'] = xcode_settings.GetBundlePlugInsFolderPath() + env['XPCSERVICES_FOLDER_PATH'] = \ + xcode_settings.GetBundleXPCServicesFolderPath() + env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath() + env['WRAPPER_NAME'] = xcode_settings.GetWrapperName() + + install_name = xcode_settings.GetInstallName() + if install_name: + env['LD_DYLIB_INSTALL_NAME'] = install_name + install_name_base = xcode_settings.GetInstallNameBase() + if install_name_base: + env['DYLIB_INSTALL_NAME_BASE'] = install_name_base + if XcodeVersion() >= '0500' and not env.get('SDKROOT'): + sdk_root = xcode_settings._SdkRoot(configuration) + if not sdk_root: + sdk_root = xcode_settings._XcodeSdkPath('') + env['SDKROOT'] = sdk_root + + if not additional_settings: + additional_settings = {} + else: + # Flatten lists to strings. + for k in additional_settings: + if not isinstance(additional_settings[k], str): + additional_settings[k] = ' '.join(additional_settings[k]) + additional_settings.update(env) + + for k in additional_settings: + additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k]) + + return additional_settings + + +def _NormalizeEnvVarReferences(str): + """Takes a string containing variable references in the form ${FOO}, $(FOO), + or $FOO, and returns a string with all variable references in the form ${FOO}. + """ + # $FOO -> ${FOO} + str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str) + + # $(FOO) -> ${FOO} + matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str) + for match in matches: + to_replace, variable = match + assert '$(' not in match, '$($(FOO)) variables not supported: ' + match + str = str.replace(to_replace, '${' + variable + '}') + + return str + + +def ExpandEnvVars(string, expansions): + """Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the + expansions list. If the variable expands to something that references + another variable, this variable is expanded as well if it's in env -- + until no variables present in env are left.""" + for k, v in reversed(expansions): + string = string.replace('${' + k + '}', v) + string = string.replace('$(' + k + ')', v) + string = string.replace('$' + k, v) + return string + + +def _TopologicallySortedEnvVarKeys(env): + """Takes a dict |env| whose values are strings that can refer to other keys, + for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of + env such that key2 is after key1 in L if env[key2] refers to env[key1]. + + Throws an Exception in case of dependency cycles. + """ + # Since environment variables can refer to other variables, the evaluation + # order is important. Below is the logic to compute the dependency graph + # and sort it. + regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}') + def GetEdges(node): + # Use a definition of edges such that user_of_variable -> used_varible. + # This happens to be easier in this case, since a variable's + # definition contains all variables it references in a single string. + # We can then reverse the result of the topological sort at the end. + # Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG)) + matches = set([v for v in regex.findall(env[node]) if v in env]) + for dependee in matches: + assert '${' not in dependee, 'Nested variables not supported: ' + dependee + return matches + + try: + # Topologically sort, and then reverse, because we used an edge definition + # that's inverted from the expected result of this function (see comment + # above). + order = gyp.common.TopologicallySorted(env.keys(), GetEdges) + order.reverse() + return order + except gyp.common.CycleError, e: + raise GypError( + 'Xcode environment variables are cyclically dependent: ' + str(e.nodes)) + + +def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot, + configuration, additional_settings=None): + env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration, + additional_settings) + return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)] + + +def GetSpecPostbuildCommands(spec, quiet=False): + """Returns the list of postbuilds explicitly defined on |spec|, in a form + executable by a shell.""" + postbuilds = [] + for postbuild in spec.get('postbuilds', []): + if not quiet: + postbuilds.append('echo POSTBUILD\\(%s\\) %s' % ( + spec['target_name'], postbuild['postbuild_name'])) + postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action'])) + return postbuilds + + +def _HasIOSTarget(targets): + """Returns true if any target contains the iOS specific key + IPHONEOS_DEPLOYMENT_TARGET.""" + for target_dict in targets.values(): + for config in target_dict['configurations'].values(): + if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'): + return True + return False + + +def _AddIOSDeviceConfigurations(targets): + """Clone all targets and append -iphoneos to the name. Configure these targets + to build for iOS devices and use correct architectures for those builds.""" + for target_dict in targets.itervalues(): + toolset = target_dict['toolset'] + configs = target_dict['configurations'] + for config_name, simulator_config_dict in dict(configs).iteritems(): + iphoneos_config_dict = copy.deepcopy(simulator_config_dict) + configs[config_name + '-iphoneos'] = iphoneos_config_dict + configs[config_name + '-iphonesimulator'] = simulator_config_dict + if toolset == 'target': + simulator_config_dict['xcode_settings']['SDKROOT'] = 'iphonesimulator' + iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos' + return targets + +def CloneConfigurationForDeviceAndEmulator(target_dicts): + """If |target_dicts| contains any iOS targets, automatically create -iphoneos + targets for iOS device builds.""" + if _HasIOSTarget(target_dicts): + return _AddIOSDeviceConfigurations(target_dicts) + return target_dicts diff --git a/deps/libuv/build/gyp/pylib/gyp/xcode_ninja.py b/deps/libuv/build/gyp/pylib/gyp/xcode_ninja.py new file mode 100644 index 00000000..bc76ffff --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/xcode_ninja.py @@ -0,0 +1,289 @@ +# Copyright (c) 2014 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Xcode-ninja wrapper project file generator. + +This updates the data structures passed to the Xcode gyp generator to build +with ninja instead. The Xcode project itself is transformed into a list of +executable targets, each with a build step to build with ninja, and a target +with every source and resource file. This appears to sidestep some of the +major performance headaches experienced using complex projects and large number +of targets within Xcode. +""" + +import errno +import gyp.generator.ninja +import os +import re +import xml.sax.saxutils + + +def _WriteWorkspace(main_gyp, sources_gyp, params): + """ Create a workspace to wrap main and sources gyp paths. """ + (build_file_root, build_file_ext) = os.path.splitext(main_gyp) + workspace_path = build_file_root + '.xcworkspace' + options = params['options'] + if options.generator_output: + workspace_path = os.path.join(options.generator_output, workspace_path) + try: + os.makedirs(workspace_path) + except OSError, e: + if e.errno != errno.EEXIST: + raise + output_string = '\n' + \ + '\n' + for gyp_name in [main_gyp, sources_gyp]: + name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj' + name = xml.sax.saxutils.quoteattr("group:" + name) + output_string += ' \n' % name + output_string += '\n' + + workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata") + + try: + with open(workspace_file, 'r') as input_file: + input_string = input_file.read() + if input_string == output_string: + return + except IOError: + # Ignore errors if the file doesn't exist. + pass + + with open(workspace_file, 'w') as output_file: + output_file.write(output_string) + +def _TargetFromSpec(old_spec, params): + """ Create fake target for xcode-ninja wrapper. """ + # Determine ninja top level build dir (e.g. /path/to/out). + ninja_toplevel = None + jobs = 0 + if params: + options = params['options'] + ninja_toplevel = \ + os.path.join(options.toplevel_dir, + gyp.generator.ninja.ComputeOutputDir(params)) + jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0) + + target_name = old_spec.get('target_name') + product_name = old_spec.get('product_name', target_name) + product_extension = old_spec.get('product_extension') + + ninja_target = {} + ninja_target['target_name'] = target_name + ninja_target['product_name'] = product_name + if product_extension: + ninja_target['product_extension'] = product_extension + ninja_target['toolset'] = old_spec.get('toolset') + ninja_target['default_configuration'] = old_spec.get('default_configuration') + ninja_target['configurations'] = {} + + # Tell Xcode to look in |ninja_toplevel| for build products. + new_xcode_settings = {} + if ninja_toplevel: + new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \ + "%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel + + if 'configurations' in old_spec: + for config in old_spec['configurations'].iterkeys(): + old_xcode_settings = \ + old_spec['configurations'][config].get('xcode_settings', {}) + if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings: + new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO" + new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \ + old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] + for key in ['BUNDLE_LOADER', 'TEST_HOST']: + if key in old_xcode_settings: + new_xcode_settings[key] = old_xcode_settings[key] + + ninja_target['configurations'][config] = {} + ninja_target['configurations'][config]['xcode_settings'] = \ + new_xcode_settings + + ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0) + ninja_target['mac_xctest_bundle'] = old_spec.get('mac_xctest_bundle', 0) + ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0) + ninja_target['ios_watchkit_extension'] = \ + old_spec.get('ios_watchkit_extension', 0) + ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0) + ninja_target['type'] = old_spec['type'] + if ninja_toplevel: + ninja_target['actions'] = [ + { + 'action_name': 'Compile and copy %s via ninja' % target_name, + 'inputs': [], + 'outputs': [], + 'action': [ + 'env', + 'PATH=%s' % os.environ['PATH'], + 'ninja', + '-C', + new_xcode_settings['CONFIGURATION_BUILD_DIR'], + target_name, + ], + 'message': 'Compile and copy %s via ninja' % target_name, + }, + ] + if jobs > 0: + ninja_target['actions'][0]['action'].extend(('-j', jobs)) + return ninja_target + +def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): + """Limit targets for Xcode wrapper. + + Xcode sometimes performs poorly with too many targets, so only include + proper executable targets, with filters to customize. + Arguments: + target_extras: Regular expression to always add, matching any target. + executable_target_pattern: Regular expression limiting executable targets. + spec: Specifications for target. + """ + target_name = spec.get('target_name') + # Always include targets matching target_extras. + if target_extras is not None and re.search(target_extras, target_name): + return True + + # Otherwise just show executable targets and xc_tests. + if (int(spec.get('mac_xctest_bundle', 0)) != 0 or + (spec.get('type', '') == 'executable' and + spec.get('product_extension', '') != 'bundle')): + + # If there is a filter and the target does not match, exclude the target. + if executable_target_pattern is not None: + if not re.search(executable_target_pattern, target_name): + return False + return True + return False + +def CreateWrapper(target_list, target_dicts, data, params): + """Initialize targets for the ninja wrapper. + + This sets up the necessary variables in the targets to generate Xcode projects + that use ninja as an external builder. + Arguments: + target_list: List of target pairs: 'base/base.gyp:base'. + target_dicts: Dict of target properties keyed on target pair. + data: Dict of flattened build files keyed on gyp path. + params: Dict of global options for gyp. + """ + orig_gyp = params['build_files'][0] + for gyp_name, gyp_dict in data.iteritems(): + if gyp_name == orig_gyp: + depth = gyp_dict['_DEPTH'] + + # Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE + # and prepend .ninja before the .gyp extension. + generator_flags = params.get('generator_flags', {}) + main_gyp = generator_flags.get('xcode_ninja_main_gyp', None) + if main_gyp is None: + (build_file_root, build_file_ext) = os.path.splitext(orig_gyp) + main_gyp = build_file_root + ".ninja" + build_file_ext + + # Create new |target_list|, |target_dicts| and |data| data structures. + new_target_list = [] + new_target_dicts = {} + new_data = {} + + # Set base keys needed for |data|. + new_data[main_gyp] = {} + new_data[main_gyp]['included_files'] = [] + new_data[main_gyp]['targets'] = [] + new_data[main_gyp]['xcode_settings'] = \ + data[orig_gyp].get('xcode_settings', {}) + + # Normally the xcode-ninja generator includes only valid executable targets. + # If |xcode_ninja_executable_target_pattern| is set, that list is reduced to + # executable targets that match the pattern. (Default all) + executable_target_pattern = \ + generator_flags.get('xcode_ninja_executable_target_pattern', None) + + # For including other non-executable targets, add the matching target name + # to the |xcode_ninja_target_pattern| regular expression. (Default none) + target_extras = generator_flags.get('xcode_ninja_target_pattern', None) + + for old_qualified_target in target_list: + spec = target_dicts[old_qualified_target] + if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec): + # Add to new_target_list. + target_name = spec.get('target_name') + new_target_name = '%s:%s#target' % (main_gyp, target_name) + new_target_list.append(new_target_name) + + # Add to new_target_dicts. + new_target_dicts[new_target_name] = _TargetFromSpec(spec, params) + + # Add to new_data. + for old_target in data[old_qualified_target.split(':')[0]]['targets']: + if old_target['target_name'] == target_name: + new_data_target = {} + new_data_target['target_name'] = old_target['target_name'] + new_data_target['toolset'] = old_target['toolset'] + new_data[main_gyp]['targets'].append(new_data_target) + + # Create sources target. + sources_target_name = 'sources_for_indexing' + sources_target = _TargetFromSpec( + { 'target_name' : sources_target_name, + 'toolset': 'target', + 'default_configuration': 'Default', + 'mac_bundle': '0', + 'type': 'executable' + }, None) + + # Tell Xcode to look everywhere for headers. + sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } } + + # Put excluded files into the sources target so they can be opened in Xcode. + skip_excluded_files = \ + not generator_flags.get('xcode_ninja_list_excluded_files', True) + + sources = [] + for target, target_dict in target_dicts.iteritems(): + base = os.path.dirname(target) + files = target_dict.get('sources', []) + \ + target_dict.get('mac_bundle_resources', []) + + if not skip_excluded_files: + files.extend(target_dict.get('sources_excluded', []) + + target_dict.get('mac_bundle_resources_excluded', [])) + + for action in target_dict.get('actions', []): + files.extend(action.get('inputs', [])) + + if not skip_excluded_files: + files.extend(action.get('inputs_excluded', [])) + + # Remove files starting with $. These are mostly intermediate files for the + # build system. + files = [ file for file in files if not file.startswith('$')] + + # Make sources relative to root build file. + relative_path = os.path.dirname(main_gyp) + sources += [ os.path.relpath(os.path.join(base, file), relative_path) + for file in files ] + + sources_target['sources'] = sorted(set(sources)) + + # Put sources_to_index in it's own gyp. + sources_gyp = \ + os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp") + fully_qualified_target_name = \ + '%s:%s#target' % (sources_gyp, sources_target_name) + + # Add to new_target_list, new_target_dicts and new_data. + new_target_list.append(fully_qualified_target_name) + new_target_dicts[fully_qualified_target_name] = sources_target + new_data_target = {} + new_data_target['target_name'] = sources_target['target_name'] + new_data_target['_DEPTH'] = depth + new_data_target['toolset'] = "target" + new_data[sources_gyp] = {} + new_data[sources_gyp]['targets'] = [] + new_data[sources_gyp]['included_files'] = [] + new_data[sources_gyp]['xcode_settings'] = \ + data[orig_gyp].get('xcode_settings', {}) + new_data[sources_gyp]['targets'].append(new_data_target) + + # Write workspace to file. + _WriteWorkspace(main_gyp, sources_gyp, params) + return (new_target_list, new_target_dicts, new_data) diff --git a/deps/libuv/build/gyp/pylib/gyp/xcodeproj_file.py b/deps/libuv/build/gyp/pylib/gyp/xcodeproj_file.py new file mode 100644 index 00000000..e69235f7 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/xcodeproj_file.py @@ -0,0 +1,2994 @@ +# Copyright (c) 2012 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Xcode project file generator. + +This module is both an Xcode project file generator and a documentation of the +Xcode project file format. Knowledge of the project file format was gained +based on extensive experience with Xcode, and by making changes to projects in +Xcode.app and observing the resultant changes in the associated project files. + +XCODE PROJECT FILES + +The generator targets the file format as written by Xcode 3.2 (specifically, +3.2.6), but past experience has taught that the format has not changed +significantly in the past several years, and future versions of Xcode are able +to read older project files. + +Xcode project files are "bundled": the project "file" from an end-user's +perspective is actually a directory with an ".xcodeproj" extension. The +project file from this module's perspective is actually a file inside this +directory, always named "project.pbxproj". This file contains a complete +description of the project and is all that is needed to use the xcodeproj. +Other files contained in the xcodeproj directory are simply used to store +per-user settings, such as the state of various UI elements in the Xcode +application. + +The project.pbxproj file is a property list, stored in a format almost +identical to the NeXTstep property list format. The file is able to carry +Unicode data, and is encoded in UTF-8. The root element in the property list +is a dictionary that contains several properties of minimal interest, and two +properties of immense interest. The most important property is a dictionary +named "objects". The entire structure of the project is represented by the +children of this property. The objects dictionary is keyed by unique 96-bit +values represented by 24 uppercase hexadecimal characters. Each value in the +objects dictionary is itself a dictionary, describing an individual object. + +Each object in the dictionary is a member of a class, which is identified by +the "isa" property of each object. A variety of classes are represented in a +project file. Objects can refer to other objects by ID, using the 24-character +hexadecimal object key. A project's objects form a tree, with a root object +of class PBXProject at the root. As an example, the PBXProject object serves +as parent to an XCConfigurationList object defining the build configurations +used in the project, a PBXGroup object serving as a container for all files +referenced in the project, and a list of target objects, each of which defines +a target in the project. There are several different types of target object, +such as PBXNativeTarget and PBXAggregateTarget. In this module, this +relationship is expressed by having each target type derive from an abstract +base named XCTarget. + +The project.pbxproj file's root dictionary also contains a property, sibling to +the "objects" dictionary, named "rootObject". The value of rootObject is a +24-character object key referring to the root PBXProject object in the +objects dictionary. + +In Xcode, every file used as input to a target or produced as a final product +of a target must appear somewhere in the hierarchy rooted at the PBXGroup +object referenced by the PBXProject's mainGroup property. A PBXGroup is +generally represented as a folder in the Xcode application. PBXGroups can +contain other PBXGroups as well as PBXFileReferences, which are pointers to +actual files. + +Each XCTarget contains a list of build phases, represented in this module by +the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations +are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the +"Compile Sources" and "Link Binary With Libraries" phases displayed in the +Xcode application. Files used as input to these phases (for example, source +files in the former case and libraries and frameworks in the latter) are +represented by PBXBuildFile objects, referenced by elements of "files" lists +in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile +object as a "weak" reference: it does not "own" the PBXBuildFile, which is +owned by the root object's mainGroup or a descendant group. In most cases, the +layer of indirection between an XCBuildPhase and a PBXFileReference via a +PBXBuildFile appears extraneous, but there's actually one reason for this: +file-specific compiler flags are added to the PBXBuildFile object so as to +allow a single file to be a member of multiple targets while having distinct +compiler flags for each. These flags can be modified in the Xcode applciation +in the "Build" tab of a File Info window. + +When a project is open in the Xcode application, Xcode will rewrite it. As +such, this module is careful to adhere to the formatting used by Xcode, to +avoid insignificant changes appearing in the file when it is used in the +Xcode application. This will keep version control repositories happy, and +makes it possible to compare a project file used in Xcode to one generated by +this module to determine if any significant changes were made in the +application. + +Xcode has its own way of assigning 24-character identifiers to each object, +which is not duplicated here. Because the identifier only is only generated +once, when an object is created, and is then left unchanged, there is no need +to attempt to duplicate Xcode's behavior in this area. The generator is free +to select any identifier, even at random, to refer to the objects it creates, +and Xcode will retain those identifiers and use them when subsequently +rewriting the project file. However, the generator would choose new random +identifiers each time the project files are generated, leading to difficulties +comparing "used" project files to "pristine" ones produced by this module, +and causing the appearance of changes as every object identifier is changed +when updated projects are checked in to a version control repository. To +mitigate this problem, this module chooses identifiers in a more deterministic +way, by hashing a description of each object as well as its parent and ancestor +objects. This strategy should result in minimal "shift" in IDs as successive +generations of project files are produced. + +THIS MODULE + +This module introduces several classes, all derived from the XCObject class. +Nearly all of the "brains" are built into the XCObject class, which understands +how to create and modify objects, maintain the proper tree structure, compute +identifiers, and print objects. For the most part, classes derived from +XCObject need only provide a _schema class object, a dictionary that +expresses what properties objects of the class may contain. + +Given this structure, it's possible to build a minimal project file by creating +objects of the appropriate types and making the proper connections: + + config_list = XCConfigurationList() + group = PBXGroup() + project = PBXProject({'buildConfigurationList': config_list, + 'mainGroup': group}) + +With the project object set up, it can be added to an XCProjectFile object. +XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject +subclass that does not actually correspond to a class type found in a project +file. Rather, it is used to represent the project file's root dictionary. +Printing an XCProjectFile will print the entire project file, including the +full "objects" dictionary. + + project_file = XCProjectFile({'rootObject': project}) + project_file.ComputeIDs() + project_file.Print() + +Xcode project files are always encoded in UTF-8. This module will accept +strings of either the str class or the unicode class. Strings of class str +are assumed to already be encoded in UTF-8. Obviously, if you're just using +ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset. +Strings of class unicode are handled properly and encoded in UTF-8 when +a project file is output. +""" + +import gyp.common +import posixpath +import re +import struct +import sys + +# hashlib is supplied as of Python 2.5 as the replacement interface for sha +# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if +# available, avoiding a deprecation warning under 2.6. Import sha otherwise, +# preserving 2.4 compatibility. +try: + import hashlib + _new_sha1 = hashlib.sha1 +except ImportError: + import sha + _new_sha1 = sha.new + + +# See XCObject._EncodeString. This pattern is used to determine when a string +# can be printed unquoted. Strings that match this pattern may be printed +# unquoted. Strings that do not match must be quoted and may be further +# transformed to be properly encoded. Note that this expression matches the +# characters listed with "+", for 1 or more occurrences: if a string is empty, +# it must not match this pattern, because it needs to be encoded as "". +_unquoted = re.compile('^[A-Za-z0-9$./_]+$') + +# Strings that match this pattern are quoted regardless of what _unquoted says. +# Oddly, Xcode will quote any string with a run of three or more underscores. +_quoted = re.compile('___') + +# This pattern should match any character that needs to be escaped by +# XCObject._EncodeString. See that function. +_escaped = re.compile('[\\\\"]|[\x00-\x1f]') + + +# Used by SourceTreeAndPathFromPath +_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$') + +def SourceTreeAndPathFromPath(input_path): + """Given input_path, returns a tuple with sourceTree and path values. + + Examples: + input_path (source_tree, output_path) + '$(VAR)/path' ('VAR', 'path') + '$(VAR)' ('VAR', None) + 'path' (None, 'path') + """ + + source_group_match = _path_leading_variable.match(input_path) + if source_group_match: + source_tree = source_group_match.group(1) + output_path = source_group_match.group(3) # This may be None. + else: + source_tree = None + output_path = input_path + + return (source_tree, output_path) + +def ConvertVariablesToShellSyntax(input_string): + return re.sub(r'\$\((.*?)\)', '${\\1}', input_string) + +class XCObject(object): + """The abstract base of all class types used in Xcode project files. + + Class variables: + _schema: A dictionary defining the properties of this class. The keys to + _schema are string property keys as used in project files. Values + are a list of four or five elements: + [ is_list, property_type, is_strong, is_required, default ] + is_list: True if the property described is a list, as opposed + to a single element. + property_type: The type to use as the value of the property, + or if is_list is True, the type to use for each + element of the value's list. property_type must + be an XCObject subclass, or one of the built-in + types str, int, or dict. + is_strong: If property_type is an XCObject subclass, is_strong + is True to assert that this class "owns," or serves + as parent, to the property value (or, if is_list is + True, values). is_strong must be False if + property_type is not an XCObject subclass. + is_required: True if the property is required for the class. + Note that is_required being True does not preclude + an empty string ("", in the case of property_type + str) or list ([], in the case of is_list True) from + being set for the property. + default: Optional. If is_requried is True, default may be set + to provide a default value for objects that do not supply + their own value. If is_required is True and default + is not provided, users of the class must supply their own + value for the property. + Note that although the values of the array are expressed in + boolean terms, subclasses provide values as integers to conserve + horizontal space. + _should_print_single_line: False in XCObject. Subclasses whose objects + should be written to the project file in the + alternate single-line format, such as + PBXFileReference and PBXBuildFile, should + set this to True. + _encode_transforms: Used by _EncodeString to encode unprintable characters. + The index into this list is the ordinal of the + character to transform; each value is a string + used to represent the character in the output. XCObject + provides an _encode_transforms list suitable for most + XCObject subclasses. + _alternate_encode_transforms: Provided for subclasses that wish to use + the alternate encoding rules. Xcode seems + to use these rules when printing objects in + single-line format. Subclasses that desire + this behavior should set _encode_transforms + to _alternate_encode_transforms. + _hashables: A list of XCObject subclasses that can be hashed by ComputeIDs + to construct this object's ID. Most classes that need custom + hashing behavior should do it by overriding Hashables, + but in some cases an object's parent may wish to push a + hashable value into its child, and it can do so by appending + to _hashables. + Attributes: + id: The object's identifier, a 24-character uppercase hexadecimal string. + Usually, objects being created should not set id until the entire + project file structure is built. At that point, UpdateIDs() should + be called on the root object to assign deterministic values for id to + each object in the tree. + parent: The object's parent. This is set by a parent XCObject when a child + object is added to it. + _properties: The object's property dictionary. An object's properties are + described by its class' _schema variable. + """ + + _schema = {} + _should_print_single_line = False + + # See _EncodeString. + _encode_transforms = [] + i = 0 + while i < ord(' '): + _encode_transforms.append('\\U%04x' % i) + i = i + 1 + _encode_transforms[7] = '\\a' + _encode_transforms[8] = '\\b' + _encode_transforms[9] = '\\t' + _encode_transforms[10] = '\\n' + _encode_transforms[11] = '\\v' + _encode_transforms[12] = '\\f' + _encode_transforms[13] = '\\n' + + _alternate_encode_transforms = list(_encode_transforms) + _alternate_encode_transforms[9] = chr(9) + _alternate_encode_transforms[10] = chr(10) + _alternate_encode_transforms[11] = chr(11) + + def __init__(self, properties=None, id=None, parent=None): + self.id = id + self.parent = parent + self._properties = {} + self._hashables = [] + self._SetDefaultsFromSchema() + self.UpdateProperties(properties) + + def __repr__(self): + try: + name = self.Name() + except NotImplementedError: + return '<%s at 0x%x>' % (self.__class__.__name__, id(self)) + return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) + + def Copy(self): + """Make a copy of this object. + + The new object will have its own copy of lists and dicts. Any XCObject + objects owned by this object (marked "strong") will be copied in the + new object, even those found in lists. If this object has any weak + references to other XCObjects, the same references are added to the new + object without making a copy. + """ + + that = self.__class__(id=self.id, parent=self.parent) + for key, value in self._properties.iteritems(): + is_strong = self._schema[key][2] + + if isinstance(value, XCObject): + if is_strong: + new_value = value.Copy() + new_value.parent = that + that._properties[key] = new_value + else: + that._properties[key] = value + elif isinstance(value, str) or isinstance(value, unicode) or \ + isinstance(value, int): + that._properties[key] = value + elif isinstance(value, list): + if is_strong: + # If is_strong is True, each element is an XCObject, so it's safe to + # call Copy. + that._properties[key] = [] + for item in value: + new_item = item.Copy() + new_item.parent = that + that._properties[key].append(new_item) + else: + that._properties[key] = value[:] + elif isinstance(value, dict): + # dicts are never strong. + if is_strong: + raise TypeError('Strong dict for key ' + key + ' in ' + \ + self.__class__.__name__) + else: + that._properties[key] = value.copy() + else: + raise TypeError('Unexpected type ' + value.__class__.__name__ + \ + ' for key ' + key + ' in ' + self.__class__.__name__) + + return that + + def Name(self): + """Return the name corresponding to an object. + + Not all objects necessarily need to be nameable, and not all that do have + a "name" property. Override as needed. + """ + + # If the schema indicates that "name" is required, try to access the + # property even if it doesn't exist. This will result in a KeyError + # being raised for the property that should be present, which seems more + # appropriate than NotImplementedError in this case. + if 'name' in self._properties or \ + ('name' in self._schema and self._schema['name'][3]): + return self._properties['name'] + + raise NotImplementedError(self.__class__.__name__ + ' must implement Name') + + def Comment(self): + """Return a comment string for the object. + + Most objects just use their name as the comment, but PBXProject uses + different values. + + The returned comment is not escaped and does not have any comment marker + strings applied to it. + """ + + return self.Name() + + def Hashables(self): + hashables = [self.__class__.__name__] + + name = self.Name() + if name != None: + hashables.append(name) + + hashables.extend(self._hashables) + + return hashables + + def HashablesForChild(self): + return None + + def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None): + """Set "id" properties deterministically. + + An object's "id" property is set based on a hash of its class type and + name, as well as the class type and name of all ancestor objects. As + such, it is only advisable to call ComputeIDs once an entire project file + tree is built. + + If recursive is True, recurse into all descendant objects and update their + hashes. + + If overwrite is True, any existing value set in the "id" property will be + replaced. + """ + + def _HashUpdate(hash, data): + """Update hash with data's length and contents. + + If the hash were updated only with the value of data, it would be + possible for clowns to induce collisions by manipulating the names of + their objects. By adding the length, it's exceedingly less likely that + ID collisions will be encountered, intentionally or not. + """ + + hash.update(struct.pack('>i', len(data))) + hash.update(data) + + if seed_hash is None: + seed_hash = _new_sha1() + + hash = seed_hash.copy() + + hashables = self.Hashables() + assert len(hashables) > 0 + for hashable in hashables: + _HashUpdate(hash, hashable) + + if recursive: + hashables_for_child = self.HashablesForChild() + if hashables_for_child is None: + child_hash = hash + else: + assert len(hashables_for_child) > 0 + child_hash = seed_hash.copy() + for hashable in hashables_for_child: + _HashUpdate(child_hash, hashable) + + for child in self.Children(): + child.ComputeIDs(recursive, overwrite, child_hash) + + if overwrite or self.id is None: + # Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is + # is 160 bits. Instead of throwing out 64 bits of the digest, xor them + # into the portion that gets used. + assert hash.digest_size % 4 == 0 + digest_int_count = hash.digest_size / 4 + digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest()) + id_ints = [0, 0, 0] + for index in xrange(0, digest_int_count): + id_ints[index % 3] ^= digest_ints[index] + self.id = '%08X%08X%08X' % tuple(id_ints) + + def EnsureNoIDCollisions(self): + """Verifies that no two objects have the same ID. Checks all descendants. + """ + + ids = {} + descendants = self.Descendants() + for descendant in descendants: + if descendant.id in ids: + other = ids[descendant.id] + raise KeyError( + 'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \ + (descendant.id, str(descendant._properties), + str(other._properties), self._properties['rootObject'].Name())) + ids[descendant.id] = descendant + + def Children(self): + """Returns a list of all of this object's owned (strong) children.""" + + children = [] + for property, attributes in self._schema.iteritems(): + (is_list, property_type, is_strong) = attributes[0:3] + if is_strong and property in self._properties: + if not is_list: + children.append(self._properties[property]) + else: + children.extend(self._properties[property]) + return children + + def Descendants(self): + """Returns a list of all of this object's descendants, including this + object. + """ + + children = self.Children() + descendants = [self] + for child in children: + descendants.extend(child.Descendants()) + return descendants + + def PBXProjectAncestor(self): + # The base case for recursion is defined at PBXProject.PBXProjectAncestor. + if self.parent: + return self.parent.PBXProjectAncestor() + return None + + def _EncodeComment(self, comment): + """Encodes a comment to be placed in the project file output, mimicing + Xcode behavior. + """ + + # This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If + # the string already contains a "*/", it is turned into "(*)/". This keeps + # the file writer from outputting something that would be treated as the + # end of a comment in the middle of something intended to be entirely a + # comment. + + return '/* ' + comment.replace('*/', '(*)/') + ' */' + + def _EncodeTransform(self, match): + # This function works closely with _EncodeString. It will only be called + # by re.sub with match.group(0) containing a character matched by the + # the _escaped expression. + char = match.group(0) + + # Backslashes (\) and quotation marks (") are always replaced with a + # backslash-escaped version of the same. Everything else gets its + # replacement from the class' _encode_transforms array. + if char == '\\': + return '\\\\' + if char == '"': + return '\\"' + return self._encode_transforms[ord(char)] + + def _EncodeString(self, value): + """Encodes a string to be placed in the project file output, mimicing + Xcode behavior. + """ + + # Use quotation marks when any character outside of the range A-Z, a-z, 0-9, + # $ (dollar sign), . (period), and _ (underscore) is present. Also use + # quotation marks to represent empty strings. + # + # Escape " (double-quote) and \ (backslash) by preceding them with a + # backslash. + # + # Some characters below the printable ASCII range are encoded specially: + # 7 ^G BEL is encoded as "\a" + # 8 ^H BS is encoded as "\b" + # 11 ^K VT is encoded as "\v" + # 12 ^L NP is encoded as "\f" + # 127 ^? DEL is passed through as-is without escaping + # - In PBXFileReference and PBXBuildFile objects: + # 9 ^I HT is passed through as-is without escaping + # 10 ^J NL is passed through as-is without escaping + # 13 ^M CR is passed through as-is without escaping + # - In other objects: + # 9 ^I HT is encoded as "\t" + # 10 ^J NL is encoded as "\n" + # 13 ^M CR is encoded as "\n" rendering it indistinguishable from + # 10 ^J NL + # All other characters within the ASCII control character range (0 through + # 31 inclusive) are encoded as "\U001f" referring to the Unicode code point + # in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e". + # Characters above the ASCII range are passed through to the output encoded + # as UTF-8 without any escaping. These mappings are contained in the + # class' _encode_transforms list. + + if _unquoted.search(value) and not _quoted.search(value): + return value + + return '"' + _escaped.sub(self._EncodeTransform, value) + '"' + + def _XCPrint(self, file, tabs, line): + file.write('\t' * tabs + line) + + def _XCPrintableValue(self, tabs, value, flatten_list=False): + """Returns a representation of value that may be printed in a project file, + mimicing Xcode's behavior. + + _XCPrintableValue can handle str and int values, XCObjects (which are + made printable by returning their id property), and list and dict objects + composed of any of the above types. When printing a list or dict, and + _should_print_single_line is False, the tabs parameter is used to determine + how much to indent the lines corresponding to the items in the list or + dict. + + If flatten_list is True, single-element lists will be transformed into + strings. + """ + + printable = '' + comment = None + + if self._should_print_single_line: + sep = ' ' + element_tabs = '' + end_tabs = '' + else: + sep = '\n' + element_tabs = '\t' * (tabs + 1) + end_tabs = '\t' * tabs + + if isinstance(value, XCObject): + printable += value.id + comment = value.Comment() + elif isinstance(value, str): + printable += self._EncodeString(value) + elif isinstance(value, unicode): + printable += self._EncodeString(value.encode('utf-8')) + elif isinstance(value, int): + printable += str(value) + elif isinstance(value, list): + if flatten_list and len(value) <= 1: + if len(value) == 0: + printable += self._EncodeString('') + else: + printable += self._EncodeString(value[0]) + else: + printable = '(' + sep + for item in value: + printable += element_tabs + \ + self._XCPrintableValue(tabs + 1, item, flatten_list) + \ + ',' + sep + printable += end_tabs + ')' + elif isinstance(value, dict): + printable = '{' + sep + for item_key, item_value in sorted(value.iteritems()): + printable += element_tabs + \ + self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \ + self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \ + sep + printable += end_tabs + '}' + else: + raise TypeError("Can't make " + value.__class__.__name__ + ' printable') + + if comment != None: + printable += ' ' + self._EncodeComment(comment) + + return printable + + def _XCKVPrint(self, file, tabs, key, value): + """Prints a key and value, members of an XCObject's _properties dictionary, + to file. + + tabs is an int identifying the indentation level. If the class' + _should_print_single_line variable is True, tabs is ignored and the + key-value pair will be followed by a space insead of a newline. + """ + + if self._should_print_single_line: + printable = '' + after_kv = ' ' + else: + printable = '\t' * tabs + after_kv = '\n' + + # Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy + # objects without comments. Sometimes it prints them with comments, but + # the majority of the time, it doesn't. To avoid unnecessary changes to + # the project file after Xcode opens it, don't write comments for + # remoteGlobalIDString. This is a sucky hack and it would certainly be + # cleaner to extend the schema to indicate whether or not a comment should + # be printed, but since this is the only case where the problem occurs and + # Xcode itself can't seem to make up its mind, the hack will suffice. + # + # Also see PBXContainerItemProxy._schema['remoteGlobalIDString']. + if key == 'remoteGlobalIDString' and isinstance(self, + PBXContainerItemProxy): + value_to_print = value.id + else: + value_to_print = value + + # PBXBuildFile's settings property is represented in the output as a dict, + # but a hack here has it represented as a string. Arrange to strip off the + # quotes so that it shows up in the output as expected. + if key == 'settings' and isinstance(self, PBXBuildFile): + strip_value_quotes = True + else: + strip_value_quotes = False + + # In another one-off, let's set flatten_list on buildSettings properties + # of XCBuildConfiguration objects, because that's how Xcode treats them. + if key == 'buildSettings' and isinstance(self, XCBuildConfiguration): + flatten_list = True + else: + flatten_list = False + + try: + printable_key = self._XCPrintableValue(tabs, key, flatten_list) + printable_value = self._XCPrintableValue(tabs, value_to_print, + flatten_list) + if strip_value_quotes and len(printable_value) > 1 and \ + printable_value[0] == '"' and printable_value[-1] == '"': + printable_value = printable_value[1:-1] + printable += printable_key + ' = ' + printable_value + ';' + after_kv + except TypeError, e: + gyp.common.ExceptionAppend(e, + 'while printing key "%s"' % key) + raise + + self._XCPrint(file, 0, printable) + + def Print(self, file=sys.stdout): + """Prints a reprentation of this object to file, adhering to Xcode output + formatting. + """ + + self.VerifyHasRequiredProperties() + + if self._should_print_single_line: + # When printing an object in a single line, Xcode doesn't put any space + # between the beginning of a dictionary (or presumably a list) and the + # first contained item, so you wind up with snippets like + # ...CDEF = {isa = PBXFileReference; fileRef = 0123... + # If it were me, I would have put a space in there after the opening + # curly, but I guess this is just another one of those inconsistencies + # between how Xcode prints PBXFileReference and PBXBuildFile objects as + # compared to other objects. Mimic Xcode's behavior here by using an + # empty string for sep. + sep = '' + end_tabs = 0 + else: + sep = '\n' + end_tabs = 2 + + # Start the object. For example, '\t\tPBXProject = {\n'. + self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep) + + # "isa" isn't in the _properties dictionary, it's an intrinsic property + # of the class which the object belongs to. Xcode always outputs "isa" + # as the first element of an object dictionary. + self._XCKVPrint(file, 3, 'isa', self.__class__.__name__) + + # The remaining elements of an object dictionary are sorted alphabetically. + for property, value in sorted(self._properties.iteritems()): + self._XCKVPrint(file, 3, property, value) + + # End the object. + self._XCPrint(file, end_tabs, '};\n') + + def UpdateProperties(self, properties, do_copy=False): + """Merge the supplied properties into the _properties dictionary. + + The input properties must adhere to the class schema or a KeyError or + TypeError exception will be raised. If adding an object of an XCObject + subclass and the schema indicates a strong relationship, the object's + parent will be set to this object. + + If do_copy is True, then lists, dicts, strong-owned XCObjects, and + strong-owned XCObjects in lists will be copied instead of having their + references added. + """ + + if properties is None: + return + + for property, value in properties.iteritems(): + # Make sure the property is in the schema. + if not property in self._schema: + raise KeyError(property + ' not in ' + self.__class__.__name__) + + # Make sure the property conforms to the schema. + (is_list, property_type, is_strong) = self._schema[property][0:3] + if is_list: + if value.__class__ != list: + raise TypeError( + property + ' of ' + self.__class__.__name__ + \ + ' must be list, not ' + value.__class__.__name__) + for item in value: + if not isinstance(item, property_type) and \ + not (item.__class__ == unicode and property_type == str): + # Accept unicode where str is specified. str is treated as + # UTF-8-encoded. + raise TypeError( + 'item of ' + property + ' of ' + self.__class__.__name__ + \ + ' must be ' + property_type.__name__ + ', not ' + \ + item.__class__.__name__) + elif not isinstance(value, property_type) and \ + not (value.__class__ == unicode and property_type == str): + # Accept unicode where str is specified. str is treated as + # UTF-8-encoded. + raise TypeError( + property + ' of ' + self.__class__.__name__ + ' must be ' + \ + property_type.__name__ + ', not ' + value.__class__.__name__) + + # Checks passed, perform the assignment. + if do_copy: + if isinstance(value, XCObject): + if is_strong: + self._properties[property] = value.Copy() + else: + self._properties[property] = value + elif isinstance(value, str) or isinstance(value, unicode) or \ + isinstance(value, int): + self._properties[property] = value + elif isinstance(value, list): + if is_strong: + # If is_strong is True, each element is an XCObject, so it's safe + # to call Copy. + self._properties[property] = [] + for item in value: + self._properties[property].append(item.Copy()) + else: + self._properties[property] = value[:] + elif isinstance(value, dict): + self._properties[property] = value.copy() + else: + raise TypeError("Don't know how to copy a " + \ + value.__class__.__name__ + ' object for ' + \ + property + ' in ' + self.__class__.__name__) + else: + self._properties[property] = value + + # Set up the child's back-reference to this object. Don't use |value| + # any more because it may not be right if do_copy is true. + if is_strong: + if not is_list: + self._properties[property].parent = self + else: + for item in self._properties[property]: + item.parent = self + + def HasProperty(self, key): + return key in self._properties + + def GetProperty(self, key): + return self._properties[key] + + def SetProperty(self, key, value): + self.UpdateProperties({key: value}) + + def DelProperty(self, key): + if key in self._properties: + del self._properties[key] + + def AppendProperty(self, key, value): + # TODO(mark): Support ExtendProperty too (and make this call that)? + + # Schema validation. + if not key in self._schema: + raise KeyError(key + ' not in ' + self.__class__.__name__) + + (is_list, property_type, is_strong) = self._schema[key][0:3] + if not is_list: + raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list') + if not isinstance(value, property_type): + raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \ + ' must be ' + property_type.__name__ + ', not ' + \ + value.__class__.__name__) + + # If the property doesn't exist yet, create a new empty list to receive the + # item. + if not key in self._properties: + self._properties[key] = [] + + # Set up the ownership link. + if is_strong: + value.parent = self + + # Store the item. + self._properties[key].append(value) + + def VerifyHasRequiredProperties(self): + """Ensure that all properties identified as required by the schema are + set. + """ + + # TODO(mark): A stronger verification mechanism is needed. Some + # subclasses need to perform validation beyond what the schema can enforce. + for property, attributes in self._schema.iteritems(): + (is_list, property_type, is_strong, is_required) = attributes[0:4] + if is_required and not property in self._properties: + raise KeyError(self.__class__.__name__ + ' requires ' + property) + + def _SetDefaultsFromSchema(self): + """Assign object default values according to the schema. This will not + overwrite properties that have already been set.""" + + defaults = {} + for property, attributes in self._schema.iteritems(): + (is_list, property_type, is_strong, is_required) = attributes[0:4] + if is_required and len(attributes) >= 5 and \ + not property in self._properties: + default = attributes[4] + + defaults[property] = default + + if len(defaults) > 0: + # Use do_copy=True so that each new object gets its own copy of strong + # objects, lists, and dicts. + self.UpdateProperties(defaults, do_copy=True) + + +class XCHierarchicalElement(XCObject): + """Abstract base for PBXGroup and PBXFileReference. Not represented in a + project file.""" + + # TODO(mark): Do name and path belong here? Probably so. + # If path is set and name is not, name may have a default value. Name will + # be set to the basename of path, if the basename of path is different from + # the full value of path. If path is already just a leaf name, name will + # not be set. + _schema = XCObject._schema.copy() + _schema.update({ + 'comments': [0, str, 0, 0], + 'fileEncoding': [0, str, 0, 0], + 'includeInIndex': [0, int, 0, 0], + 'indentWidth': [0, int, 0, 0], + 'lineEnding': [0, int, 0, 0], + 'sourceTree': [0, str, 0, 1, ''], + 'tabWidth': [0, int, 0, 0], + 'usesTabs': [0, int, 0, 0], + 'wrapsLines': [0, int, 0, 0], + }) + + def __init__(self, properties=None, id=None, parent=None): + # super + XCObject.__init__(self, properties, id, parent) + if 'path' in self._properties and not 'name' in self._properties: + path = self._properties['path'] + name = posixpath.basename(path) + if name != '' and path != name: + self.SetProperty('name', name) + + if 'path' in self._properties and \ + (not 'sourceTree' in self._properties or \ + self._properties['sourceTree'] == ''): + # If the pathname begins with an Xcode variable like "$(SDKROOT)/", take + # the variable out and make the path be relative to that variable by + # assigning the variable name as the sourceTree. + (source_tree, path) = SourceTreeAndPathFromPath(self._properties['path']) + if source_tree != None: + self._properties['sourceTree'] = source_tree + if path != None: + self._properties['path'] = path + if source_tree != None and path is None and \ + not 'name' in self._properties: + # The path was of the form "$(SDKROOT)" with no path following it. + # This object is now relative to that variable, so it has no path + # attribute of its own. It does, however, keep a name. + del self._properties['path'] + self._properties['name'] = source_tree + + def Name(self): + if 'name' in self._properties: + return self._properties['name'] + elif 'path' in self._properties: + return self._properties['path'] + else: + # This happens in the case of the root PBXGroup. + return None + + def Hashables(self): + """Custom hashables for XCHierarchicalElements. + + XCHierarchicalElements are special. Generally, their hashes shouldn't + change if the paths don't change. The normal XCObject implementation of + Hashables adds a hashable for each object, which means that if + the hierarchical structure changes (possibly due to changes caused when + TakeOverOnlyChild runs and encounters slight changes in the hierarchy), + the hashes will change. For example, if a project file initially contains + a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent + a/b. If someone later adds a/f2 to the project file, a/b can no longer be + collapsed, and f1 winds up with parent b and grandparent a. That would + be sufficient to change f1's hash. + + To counteract this problem, hashables for all XCHierarchicalElements except + for the main group (which has neither a name nor a path) are taken to be + just the set of path components. Because hashables are inherited from + parents, this provides assurance that a/b/f1 has the same set of hashables + whether its parent is b or a/b. + + The main group is a special case. As it is permitted to have no name or + path, it is permitted to use the standard XCObject hash mechanism. This + is not considered a problem because there can be only one main group. + """ + + if self == self.PBXProjectAncestor()._properties['mainGroup']: + # super + return XCObject.Hashables(self) + + hashables = [] + + # Put the name in first, ensuring that if TakeOverOnlyChild collapses + # children into a top-level group like "Source", the name always goes + # into the list of hashables without interfering with path components. + if 'name' in self._properties: + # Make it less likely for people to manipulate hashes by following the + # pattern of always pushing an object type value onto the list first. + hashables.append(self.__class__.__name__ + '.name') + hashables.append(self._properties['name']) + + # NOTE: This still has the problem that if an absolute path is encountered, + # including paths with a sourceTree, they'll still inherit their parents' + # hashables, even though the paths aren't relative to their parents. This + # is not expected to be much of a problem in practice. + path = self.PathFromSourceTreeAndPath() + if path != None: + components = path.split(posixpath.sep) + for component in components: + hashables.append(self.__class__.__name__ + '.path') + hashables.append(component) + + hashables.extend(self._hashables) + + return hashables + + def Compare(self, other): + # Allow comparison of these types. PBXGroup has the highest sort rank; + # PBXVariantGroup is treated as equal to PBXFileReference. + valid_class_types = { + PBXFileReference: 'file', + PBXGroup: 'group', + PBXVariantGroup: 'file', + } + self_type = valid_class_types[self.__class__] + other_type = valid_class_types[other.__class__] + + if self_type == other_type: + # If the two objects are of the same sort rank, compare their names. + return cmp(self.Name(), other.Name()) + + # Otherwise, sort groups before everything else. + if self_type == 'group': + return -1 + return 1 + + def CompareRootGroup(self, other): + # This function should be used only to compare direct children of the + # containing PBXProject's mainGroup. These groups should appear in the + # listed order. + # TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the + # generator should have a way of influencing this list rather than having + # to hardcode for the generator here. + order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products', + 'Build'] + + # If the groups aren't in the listed order, do a name comparison. + # Otherwise, groups in the listed order should come before those that + # aren't. + self_name = self.Name() + other_name = other.Name() + self_in = isinstance(self, PBXGroup) and self_name in order + other_in = isinstance(self, PBXGroup) and other_name in order + if not self_in and not other_in: + return self.Compare(other) + if self_name in order and not other_name in order: + return -1 + if other_name in order and not self_name in order: + return 1 + + # If both groups are in the listed order, go by the defined order. + self_index = order.index(self_name) + other_index = order.index(other_name) + if self_index < other_index: + return -1 + if self_index > other_index: + return 1 + return 0 + + def PathFromSourceTreeAndPath(self): + # Turn the object's sourceTree and path properties into a single flat + # string of a form comparable to the path parameter. If there's a + # sourceTree property other than "", wrap it in $(...) for the + # comparison. + components = [] + if self._properties['sourceTree'] != '': + components.append('$(' + self._properties['sourceTree'] + ')') + if 'path' in self._properties: + components.append(self._properties['path']) + + if len(components) > 0: + return posixpath.join(*components) + + return None + + def FullPath(self): + # Returns a full path to self relative to the project file, or relative + # to some other source tree. Start with self, and walk up the chain of + # parents prepending their paths, if any, until no more parents are + # available (project-relative path) or until a path relative to some + # source tree is found. + xche = self + path = None + while isinstance(xche, XCHierarchicalElement) and \ + (path is None or \ + (not path.startswith('/') and not path.startswith('$'))): + this_path = xche.PathFromSourceTreeAndPath() + if this_path != None and path != None: + path = posixpath.join(this_path, path) + elif this_path != None: + path = this_path + xche = xche.parent + + return path + + +class PBXGroup(XCHierarchicalElement): + """ + Attributes: + _children_by_path: Maps pathnames of children of this PBXGroup to the + actual child XCHierarchicalElement objects. + _variant_children_by_name_and_path: Maps (name, path) tuples of + PBXVariantGroup children to the actual child PBXVariantGroup objects. + """ + + _schema = XCHierarchicalElement._schema.copy() + _schema.update({ + 'children': [1, XCHierarchicalElement, 1, 1, []], + 'name': [0, str, 0, 0], + 'path': [0, str, 0, 0], + }) + + def __init__(self, properties=None, id=None, parent=None): + # super + XCHierarchicalElement.__init__(self, properties, id, parent) + self._children_by_path = {} + self._variant_children_by_name_and_path = {} + for child in self._properties.get('children', []): + self._AddChildToDicts(child) + + def Hashables(self): + # super + hashables = XCHierarchicalElement.Hashables(self) + + # It is not sufficient to just rely on name and parent to build a unique + # hashable : a node could have two child PBXGroup sharing a common name. + # To add entropy the hashable is enhanced with the names of all its + # children. + for child in self._properties.get('children', []): + child_name = child.Name() + if child_name != None: + hashables.append(child_name) + + return hashables + + def HashablesForChild(self): + # To avoid a circular reference the hashables used to compute a child id do + # not include the child names. + return XCHierarchicalElement.Hashables(self) + + def _AddChildToDicts(self, child): + # Sets up this PBXGroup object's dicts to reference the child properly. + child_path = child.PathFromSourceTreeAndPath() + if child_path: + if child_path in self._children_by_path: + raise ValueError('Found multiple children with path ' + child_path) + self._children_by_path[child_path] = child + + if isinstance(child, PBXVariantGroup): + child_name = child._properties.get('name', None) + key = (child_name, child_path) + if key in self._variant_children_by_name_and_path: + raise ValueError('Found multiple PBXVariantGroup children with ' + \ + 'name ' + str(child_name) + ' and path ' + \ + str(child_path)) + self._variant_children_by_name_and_path[key] = child + + def AppendChild(self, child): + # Callers should use this instead of calling + # AppendProperty('children', child) directly because this function + # maintains the group's dicts. + self.AppendProperty('children', child) + self._AddChildToDicts(child) + + def GetChildByName(self, name): + # This is not currently optimized with a dict as GetChildByPath is because + # it has few callers. Most callers probably want GetChildByPath. This + # function is only useful to get children that have names but no paths, + # which is rare. The children of the main group ("Source", "Products", + # etc.) is pretty much the only case where this likely to come up. + # + # TODO(mark): Maybe this should raise an error if more than one child is + # present with the same name. + if not 'children' in self._properties: + return None + + for child in self._properties['children']: + if child.Name() == name: + return child + + return None + + def GetChildByPath(self, path): + if not path: + return None + + if path in self._children_by_path: + return self._children_by_path[path] + + return None + + def GetChildByRemoteObject(self, remote_object): + # This method is a little bit esoteric. Given a remote_object, which + # should be a PBXFileReference in another project file, this method will + # return this group's PBXReferenceProxy object serving as a local proxy + # for the remote PBXFileReference. + # + # This function might benefit from a dict optimization as GetChildByPath + # for some workloads, but profiling shows that it's not currently a + # problem. + if not 'children' in self._properties: + return None + + for child in self._properties['children']: + if not isinstance(child, PBXReferenceProxy): + continue + + container_proxy = child._properties['remoteRef'] + if container_proxy._properties['remoteGlobalIDString'] == remote_object: + return child + + return None + + def AddOrGetFileByPath(self, path, hierarchical): + """Returns an existing or new file reference corresponding to path. + + If hierarchical is True, this method will create or use the necessary + hierarchical group structure corresponding to path. Otherwise, it will + look in and create an item in the current group only. + + If an existing matching reference is found, it is returned, otherwise, a + new one will be created, added to the correct group, and returned. + + If path identifies a directory by virtue of carrying a trailing slash, + this method returns a PBXFileReference of "folder" type. If path + identifies a variant, by virtue of it identifying a file inside a directory + with an ".lproj" extension, this method returns a PBXVariantGroup + containing the variant named by path, and possibly other variants. For + all other paths, a "normal" PBXFileReference will be returned. + """ + + # Adding or getting a directory? Directories end with a trailing slash. + is_dir = False + if path.endswith('/'): + is_dir = True + path = posixpath.normpath(path) + if is_dir: + path = path + '/' + + # Adding or getting a variant? Variants are files inside directories + # with an ".lproj" extension. Xcode uses variants for localization. For + # a variant path/to/Language.lproj/MainMenu.nib, put a variant group named + # MainMenu.nib inside path/to, and give it a variant named Language. In + # this example, grandparent would be set to path/to and parent_root would + # be set to Language. + variant_name = None + parent = posixpath.dirname(path) + grandparent = posixpath.dirname(parent) + parent_basename = posixpath.basename(parent) + (parent_root, parent_ext) = posixpath.splitext(parent_basename) + if parent_ext == '.lproj': + variant_name = parent_root + if grandparent == '': + grandparent = None + + # Putting a directory inside a variant group is not currently supported. + assert not is_dir or variant_name is None + + path_split = path.split(posixpath.sep) + if len(path_split) == 1 or \ + ((is_dir or variant_name != None) and len(path_split) == 2) or \ + not hierarchical: + # The PBXFileReference or PBXVariantGroup will be added to or gotten from + # this PBXGroup, no recursion necessary. + if variant_name is None: + # Add or get a PBXFileReference. + file_ref = self.GetChildByPath(path) + if file_ref != None: + assert file_ref.__class__ == PBXFileReference + else: + file_ref = PBXFileReference({'path': path}) + self.AppendChild(file_ref) + else: + # Add or get a PBXVariantGroup. The variant group name is the same + # as the basename (MainMenu.nib in the example above). grandparent + # specifies the path to the variant group itself, and path_split[-2:] + # is the path of the specific variant relative to its group. + variant_group_name = posixpath.basename(path) + variant_group_ref = self.AddOrGetVariantGroupByNameAndPath( + variant_group_name, grandparent) + variant_path = posixpath.sep.join(path_split[-2:]) + variant_ref = variant_group_ref.GetChildByPath(variant_path) + if variant_ref != None: + assert variant_ref.__class__ == PBXFileReference + else: + variant_ref = PBXFileReference({'name': variant_name, + 'path': variant_path}) + variant_group_ref.AppendChild(variant_ref) + # The caller is interested in the variant group, not the specific + # variant file. + file_ref = variant_group_ref + return file_ref + else: + # Hierarchical recursion. Add or get a PBXGroup corresponding to the + # outermost path component, and then recurse into it, chopping off that + # path component. + next_dir = path_split[0] + group_ref = self.GetChildByPath(next_dir) + if group_ref != None: + assert group_ref.__class__ == PBXGroup + else: + group_ref = PBXGroup({'path': next_dir}) + self.AppendChild(group_ref) + return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]), + hierarchical) + + def AddOrGetVariantGroupByNameAndPath(self, name, path): + """Returns an existing or new PBXVariantGroup for name and path. + + If a PBXVariantGroup identified by the name and path arguments is already + present as a child of this object, it is returned. Otherwise, a new + PBXVariantGroup with the correct properties is created, added as a child, + and returned. + + This method will generally be called by AddOrGetFileByPath, which knows + when to create a variant group based on the structure of the pathnames + passed to it. + """ + + key = (name, path) + if key in self._variant_children_by_name_and_path: + variant_group_ref = self._variant_children_by_name_and_path[key] + assert variant_group_ref.__class__ == PBXVariantGroup + return variant_group_ref + + variant_group_properties = {'name': name} + if path != None: + variant_group_properties['path'] = path + variant_group_ref = PBXVariantGroup(variant_group_properties) + self.AppendChild(variant_group_ref) + + return variant_group_ref + + def TakeOverOnlyChild(self, recurse=False): + """If this PBXGroup has only one child and it's also a PBXGroup, take + it over by making all of its children this object's children. + + This function will continue to take over only children when those children + are groups. If there are three PBXGroups representing a, b, and c, with + c inside b and b inside a, and a and b have no other children, this will + result in a taking over both b and c, forming a PBXGroup for a/b/c. + + If recurse is True, this function will recurse into children and ask them + to collapse themselves by taking over only children as well. Assuming + an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f + (d1, d2, and f are files, the rest are groups), recursion will result in + a group for a/b/c containing a group for d3/e. + """ + + # At this stage, check that child class types are PBXGroup exactly, + # instead of using isinstance. The only subclass of PBXGroup, + # PBXVariantGroup, should not participate in reparenting in the same way: + # reparenting by merging different object types would be wrong. + while len(self._properties['children']) == 1 and \ + self._properties['children'][0].__class__ == PBXGroup: + # Loop to take over the innermost only-child group possible. + + child = self._properties['children'][0] + + # Assume the child's properties, including its children. Save a copy + # of this object's old properties, because they'll still be needed. + # This object retains its existing id and parent attributes. + old_properties = self._properties + self._properties = child._properties + self._children_by_path = child._children_by_path + + if not 'sourceTree' in self._properties or \ + self._properties['sourceTree'] == '': + # The child was relative to its parent. Fix up the path. Note that + # children with a sourceTree other than "" are not relative to + # their parents, so no path fix-up is needed in that case. + if 'path' in old_properties: + if 'path' in self._properties: + # Both the original parent and child have paths set. + self._properties['path'] = posixpath.join(old_properties['path'], + self._properties['path']) + else: + # Only the original parent has a path, use it. + self._properties['path'] = old_properties['path'] + if 'sourceTree' in old_properties: + # The original parent had a sourceTree set, use it. + self._properties['sourceTree'] = old_properties['sourceTree'] + + # If the original parent had a name set, keep using it. If the original + # parent didn't have a name but the child did, let the child's name + # live on. If the name attribute seems unnecessary now, get rid of it. + if 'name' in old_properties and old_properties['name'] != None and \ + old_properties['name'] != self.Name(): + self._properties['name'] = old_properties['name'] + if 'name' in self._properties and 'path' in self._properties and \ + self._properties['name'] == self._properties['path']: + del self._properties['name'] + + # Notify all children of their new parent. + for child in self._properties['children']: + child.parent = self + + # If asked to recurse, recurse. + if recurse: + for child in self._properties['children']: + if child.__class__ == PBXGroup: + child.TakeOverOnlyChild(recurse) + + def SortGroup(self): + self._properties['children'] = \ + sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y)) + + # Recurse. + for child in self._properties['children']: + if isinstance(child, PBXGroup): + child.SortGroup() + + +class XCFileLikeElement(XCHierarchicalElement): + # Abstract base for objects that can be used as the fileRef property of + # PBXBuildFile. + + def PathHashables(self): + # A PBXBuildFile that refers to this object will call this method to + # obtain additional hashables specific to this XCFileLikeElement. Don't + # just use this object's hashables, they're not specific and unique enough + # on their own (without access to the parent hashables.) Instead, provide + # hashables that identify this object by path by getting its hashables as + # well as the hashables of ancestor XCHierarchicalElement objects. + + hashables = [] + xche = self + while xche != None and isinstance(xche, XCHierarchicalElement): + xche_hashables = xche.Hashables() + for index in xrange(0, len(xche_hashables)): + hashables.insert(index, xche_hashables[index]) + xche = xche.parent + return hashables + + +class XCContainerPortal(XCObject): + # Abstract base for objects that can be used as the containerPortal property + # of PBXContainerItemProxy. + pass + + +class XCRemoteObject(XCObject): + # Abstract base for objects that can be used as the remoteGlobalIDString + # property of PBXContainerItemProxy. + pass + + +class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject): + _schema = XCFileLikeElement._schema.copy() + _schema.update({ + 'explicitFileType': [0, str, 0, 0], + 'lastKnownFileType': [0, str, 0, 0], + 'name': [0, str, 0, 0], + 'path': [0, str, 0, 1], + }) + + # Weird output rules for PBXFileReference. + _should_print_single_line = True + # super + _encode_transforms = XCFileLikeElement._alternate_encode_transforms + + def __init__(self, properties=None, id=None, parent=None): + # super + XCFileLikeElement.__init__(self, properties, id, parent) + if 'path' in self._properties and self._properties['path'].endswith('/'): + self._properties['path'] = self._properties['path'][:-1] + is_dir = True + else: + is_dir = False + + if 'path' in self._properties and \ + not 'lastKnownFileType' in self._properties and \ + not 'explicitFileType' in self._properties: + # TODO(mark): This is the replacement for a replacement for a quick hack. + # It is no longer incredibly sucky, but this list needs to be extended. + extension_map = { + 'a': 'archive.ar', + 'app': 'wrapper.application', + 'bdic': 'file', + 'bundle': 'wrapper.cfbundle', + 'c': 'sourcecode.c.c', + 'cc': 'sourcecode.cpp.cpp', + 'cpp': 'sourcecode.cpp.cpp', + 'css': 'text.css', + 'cxx': 'sourcecode.cpp.cpp', + 'dart': 'sourcecode', + 'dylib': 'compiled.mach-o.dylib', + 'framework': 'wrapper.framework', + 'gyp': 'sourcecode', + 'gypi': 'sourcecode', + 'h': 'sourcecode.c.h', + 'hxx': 'sourcecode.cpp.h', + 'icns': 'image.icns', + 'java': 'sourcecode.java', + 'js': 'sourcecode.javascript', + 'kext': 'wrapper.kext', + 'm': 'sourcecode.c.objc', + 'mm': 'sourcecode.cpp.objcpp', + 'nib': 'wrapper.nib', + 'o': 'compiled.mach-o.objfile', + 'pdf': 'image.pdf', + 'pl': 'text.script.perl', + 'plist': 'text.plist.xml', + 'pm': 'text.script.perl', + 'png': 'image.png', + 'py': 'text.script.python', + 'r': 'sourcecode.rez', + 'rez': 'sourcecode.rez', + 's': 'sourcecode.asm', + 'storyboard': 'file.storyboard', + 'strings': 'text.plist.strings', + 'swift': 'sourcecode.swift', + 'ttf': 'file', + 'xcassets': 'folder.assetcatalog', + 'xcconfig': 'text.xcconfig', + 'xcdatamodel': 'wrapper.xcdatamodel', + 'xcdatamodeld':'wrapper.xcdatamodeld', + 'xib': 'file.xib', + 'y': 'sourcecode.yacc', + } + + prop_map = { + 'dart': 'explicitFileType', + 'gyp': 'explicitFileType', + 'gypi': 'explicitFileType', + } + + if is_dir: + file_type = 'folder' + prop_name = 'lastKnownFileType' + else: + basename = posixpath.basename(self._properties['path']) + (root, ext) = posixpath.splitext(basename) + # Check the map using a lowercase extension. + # TODO(mark): Maybe it should try with the original case first and fall + # back to lowercase, in case there are any instances where case + # matters. There currently aren't. + if ext != '': + ext = ext[1:].lower() + + # TODO(mark): "text" is the default value, but "file" is appropriate + # for unrecognized files not containing text. Xcode seems to choose + # based on content. + file_type = extension_map.get(ext, 'text') + prop_name = prop_map.get(ext, 'lastKnownFileType') + + self._properties[prop_name] = file_type + + +class PBXVariantGroup(PBXGroup, XCFileLikeElement): + """PBXVariantGroup is used by Xcode to represent localizations.""" + # No additions to the schema relative to PBXGroup. + pass + + +# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below +# because it uses PBXContainerItemProxy, defined below. + + +class XCBuildConfiguration(XCObject): + _schema = XCObject._schema.copy() + _schema.update({ + 'baseConfigurationReference': [0, PBXFileReference, 0, 0], + 'buildSettings': [0, dict, 0, 1, {}], + 'name': [0, str, 0, 1], + }) + + def HasBuildSetting(self, key): + return key in self._properties['buildSettings'] + + def GetBuildSetting(self, key): + return self._properties['buildSettings'][key] + + def SetBuildSetting(self, key, value): + # TODO(mark): If a list, copy? + self._properties['buildSettings'][key] = value + + def AppendBuildSetting(self, key, value): + if not key in self._properties['buildSettings']: + self._properties['buildSettings'][key] = [] + self._properties['buildSettings'][key].append(value) + + def DelBuildSetting(self, key): + if key in self._properties['buildSettings']: + del self._properties['buildSettings'][key] + + def SetBaseConfiguration(self, value): + self._properties['baseConfigurationReference'] = value + +class XCConfigurationList(XCObject): + # _configs is the default list of configurations. + _configs = [ XCBuildConfiguration({'name': 'Debug'}), + XCBuildConfiguration({'name': 'Release'}) ] + + _schema = XCObject._schema.copy() + _schema.update({ + 'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs], + 'defaultConfigurationIsVisible': [0, int, 0, 1, 1], + 'defaultConfigurationName': [0, str, 0, 1, 'Release'], + }) + + def Name(self): + return 'Build configuration list for ' + \ + self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"' + + def ConfigurationNamed(self, name): + """Convenience accessor to obtain an XCBuildConfiguration by name.""" + for configuration in self._properties['buildConfigurations']: + if configuration._properties['name'] == name: + return configuration + + raise KeyError(name) + + def DefaultConfiguration(self): + """Convenience accessor to obtain the default XCBuildConfiguration.""" + return self.ConfigurationNamed(self._properties['defaultConfigurationName']) + + def HasBuildSetting(self, key): + """Determines the state of a build setting in all XCBuildConfiguration + child objects. + + If all child objects have key in their build settings, and the value is the + same in all child objects, returns 1. + + If no child objects have the key in their build settings, returns 0. + + If some, but not all, child objects have the key in their build settings, + or if any children have different values for the key, returns -1. + """ + + has = None + value = None + for configuration in self._properties['buildConfigurations']: + configuration_has = configuration.HasBuildSetting(key) + if has is None: + has = configuration_has + elif has != configuration_has: + return -1 + + if configuration_has: + configuration_value = configuration.GetBuildSetting(key) + if value is None: + value = configuration_value + elif value != configuration_value: + return -1 + + if not has: + return 0 + + return 1 + + def GetBuildSetting(self, key): + """Gets the build setting for key. + + All child XCConfiguration objects must have the same value set for the + setting, or a ValueError will be raised. + """ + + # TODO(mark): This is wrong for build settings that are lists. The list + # contents should be compared (and a list copy returned?) + + value = None + for configuration in self._properties['buildConfigurations']: + configuration_value = configuration.GetBuildSetting(key) + if value is None: + value = configuration_value + else: + if value != configuration_value: + raise ValueError('Variant values for ' + key) + + return value + + def SetBuildSetting(self, key, value): + """Sets the build setting for key to value in all child + XCBuildConfiguration objects. + """ + + for configuration in self._properties['buildConfigurations']: + configuration.SetBuildSetting(key, value) + + def AppendBuildSetting(self, key, value): + """Appends value to the build setting for key, which is treated as a list, + in all child XCBuildConfiguration objects. + """ + + for configuration in self._properties['buildConfigurations']: + configuration.AppendBuildSetting(key, value) + + def DelBuildSetting(self, key): + """Deletes the build setting key from all child XCBuildConfiguration + objects. + """ + + for configuration in self._properties['buildConfigurations']: + configuration.DelBuildSetting(key) + + def SetBaseConfiguration(self, value): + """Sets the build configuration in all child XCBuildConfiguration objects. + """ + + for configuration in self._properties['buildConfigurations']: + configuration.SetBaseConfiguration(value) + + +class PBXBuildFile(XCObject): + _schema = XCObject._schema.copy() + _schema.update({ + 'fileRef': [0, XCFileLikeElement, 0, 1], + 'settings': [0, str, 0, 0], # hack, it's a dict + }) + + # Weird output rules for PBXBuildFile. + _should_print_single_line = True + _encode_transforms = XCObject._alternate_encode_transforms + + def Name(self): + # Example: "main.cc in Sources" + return self._properties['fileRef'].Name() + ' in ' + self.parent.Name() + + def Hashables(self): + # super + hashables = XCObject.Hashables(self) + + # It is not sufficient to just rely on Name() to get the + # XCFileLikeElement's name, because that is not a complete pathname. + # PathHashables returns hashables unique enough that no two + # PBXBuildFiles should wind up with the same set of hashables, unless + # someone adds the same file multiple times to the same target. That + # would be considered invalid anyway. + hashables.extend(self._properties['fileRef'].PathHashables()) + + return hashables + + +class XCBuildPhase(XCObject): + """Abstract base for build phase classes. Not represented in a project + file. + + Attributes: + _files_by_path: A dict mapping each path of a child in the files list by + path (keys) to the corresponding PBXBuildFile children (values). + _files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys) + to the corresponding PBXBuildFile children (values). + """ + + # TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't + # actually have a "files" list. XCBuildPhase should not have "files" but + # another abstract subclass of it should provide this, and concrete build + # phase types that do have "files" lists should be derived from that new + # abstract subclass. XCBuildPhase should only provide buildActionMask and + # runOnlyForDeploymentPostprocessing, and not files or the various + # file-related methods and attributes. + + _schema = XCObject._schema.copy() + _schema.update({ + 'buildActionMask': [0, int, 0, 1, 0x7fffffff], + 'files': [1, PBXBuildFile, 1, 1, []], + 'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0], + }) + + def __init__(self, properties=None, id=None, parent=None): + # super + XCObject.__init__(self, properties, id, parent) + + self._files_by_path = {} + self._files_by_xcfilelikeelement = {} + for pbxbuildfile in self._properties.get('files', []): + self._AddBuildFileToDicts(pbxbuildfile) + + def FileGroup(self, path): + # Subclasses must override this by returning a two-element tuple. The + # first item in the tuple should be the PBXGroup to which "path" should be + # added, either as a child or deeper descendant. The second item should + # be a boolean indicating whether files should be added into hierarchical + # groups or one single flat group. + raise NotImplementedError( + self.__class__.__name__ + ' must implement FileGroup') + + def _AddPathToDict(self, pbxbuildfile, path): + """Adds path to the dict tracking paths belonging to this build phase. + + If the path is already a member of this build phase, raises an exception. + """ + + if path in self._files_by_path: + raise ValueError('Found multiple build files with path ' + path) + self._files_by_path[path] = pbxbuildfile + + def _AddBuildFileToDicts(self, pbxbuildfile, path=None): + """Maintains the _files_by_path and _files_by_xcfilelikeelement dicts. + + If path is specified, then it is the path that is being added to the + phase, and pbxbuildfile must contain either a PBXFileReference directly + referencing that path, or it must contain a PBXVariantGroup that itself + contains a PBXFileReference referencing the path. + + If path is not specified, either the PBXFileReference's path or the paths + of all children of the PBXVariantGroup are taken as being added to the + phase. + + If the path is already present in the phase, raises an exception. + + If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile + are already present in the phase, referenced by a different PBXBuildFile + object, raises an exception. This does not raise an exception when + a PBXFileReference or PBXVariantGroup reappear and are referenced by the + same PBXBuildFile that has already introduced them, because in the case + of PBXVariantGroup objects, they may correspond to multiple paths that are + not all added simultaneously. When this situation occurs, the path needs + to be added to _files_by_path, but nothing needs to change in + _files_by_xcfilelikeelement, and the caller should have avoided adding + the PBXBuildFile if it is already present in the list of children. + """ + + xcfilelikeelement = pbxbuildfile._properties['fileRef'] + + paths = [] + if path != None: + # It's best when the caller provides the path. + if isinstance(xcfilelikeelement, PBXVariantGroup): + paths.append(path) + else: + # If the caller didn't provide a path, there can be either multiple + # paths (PBXVariantGroup) or one. + if isinstance(xcfilelikeelement, PBXVariantGroup): + for variant in xcfilelikeelement._properties['children']: + paths.append(variant.FullPath()) + else: + paths.append(xcfilelikeelement.FullPath()) + + # Add the paths first, because if something's going to raise, the + # messages provided by _AddPathToDict are more useful owing to its + # having access to a real pathname and not just an object's Name(). + for a_path in paths: + self._AddPathToDict(pbxbuildfile, a_path) + + # If another PBXBuildFile references this XCFileLikeElement, there's a + # problem. + if xcfilelikeelement in self._files_by_xcfilelikeelement and \ + self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile: + raise ValueError('Found multiple build files for ' + \ + xcfilelikeelement.Name()) + self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile + + def AppendBuildFile(self, pbxbuildfile, path=None): + # Callers should use this instead of calling + # AppendProperty('files', pbxbuildfile) directly because this function + # maintains the object's dicts. Better yet, callers can just call AddFile + # with a pathname and not worry about building their own PBXBuildFile + # objects. + self.AppendProperty('files', pbxbuildfile) + self._AddBuildFileToDicts(pbxbuildfile, path) + + def AddFile(self, path, settings=None): + (file_group, hierarchical) = self.FileGroup(path) + file_ref = file_group.AddOrGetFileByPath(path, hierarchical) + + if file_ref in self._files_by_xcfilelikeelement and \ + isinstance(file_ref, PBXVariantGroup): + # There's already a PBXBuildFile in this phase corresponding to the + # PBXVariantGroup. path just provides a new variant that belongs to + # the group. Add the path to the dict. + pbxbuildfile = self._files_by_xcfilelikeelement[file_ref] + self._AddBuildFileToDicts(pbxbuildfile, path) + else: + # Add a new PBXBuildFile to get file_ref into the phase. + if settings is None: + pbxbuildfile = PBXBuildFile({'fileRef': file_ref}) + else: + pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings}) + self.AppendBuildFile(pbxbuildfile, path) + + +class PBXHeadersBuildPhase(XCBuildPhase): + # No additions to the schema relative to XCBuildPhase. + + def Name(self): + return 'Headers' + + def FileGroup(self, path): + return self.PBXProjectAncestor().RootGroupForPath(path) + + +class PBXResourcesBuildPhase(XCBuildPhase): + # No additions to the schema relative to XCBuildPhase. + + def Name(self): + return 'Resources' + + def FileGroup(self, path): + return self.PBXProjectAncestor().RootGroupForPath(path) + + +class PBXSourcesBuildPhase(XCBuildPhase): + # No additions to the schema relative to XCBuildPhase. + + def Name(self): + return 'Sources' + + def FileGroup(self, path): + return self.PBXProjectAncestor().RootGroupForPath(path) + + +class PBXFrameworksBuildPhase(XCBuildPhase): + # No additions to the schema relative to XCBuildPhase. + + def Name(self): + return 'Frameworks' + + def FileGroup(self, path): + (root, ext) = posixpath.splitext(path) + if ext != '': + ext = ext[1:].lower() + if ext == 'o': + # .o files are added to Xcode Frameworks phases, but conceptually aren't + # frameworks, they're more like sources or intermediates. Redirect them + # to show up in one of those other groups. + return self.PBXProjectAncestor().RootGroupForPath(path) + else: + return (self.PBXProjectAncestor().FrameworksGroup(), False) + + +class PBXShellScriptBuildPhase(XCBuildPhase): + _schema = XCBuildPhase._schema.copy() + _schema.update({ + 'inputPaths': [1, str, 0, 1, []], + 'name': [0, str, 0, 0], + 'outputPaths': [1, str, 0, 1, []], + 'shellPath': [0, str, 0, 1, '/bin/sh'], + 'shellScript': [0, str, 0, 1], + 'showEnvVarsInLog': [0, int, 0, 0], + }) + + def Name(self): + if 'name' in self._properties: + return self._properties['name'] + + return 'ShellScript' + + +class PBXCopyFilesBuildPhase(XCBuildPhase): + _schema = XCBuildPhase._schema.copy() + _schema.update({ + 'dstPath': [0, str, 0, 1], + 'dstSubfolderSpec': [0, int, 0, 1], + 'name': [0, str, 0, 0], + }) + + # path_tree_re matches "$(DIR)/path", "$(DIR)/$(DIR2)/path" or just "$(DIR)". + # Match group 1 is "DIR", group 3 is "path" or "$(DIR2") or "$(DIR2)/path" + # or None. If group 3 is "path", group 4 will be None otherwise group 4 is + # "DIR2" and group 6 is "path". + path_tree_re = re.compile(r'^\$\((.*?)\)(/(\$\((.*?)\)(/(.*)|)|(.*)|)|)$') + + # path_tree_{first,second}_to_subfolder map names of Xcode variables to the + # associated dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase + # object. + path_tree_first_to_subfolder = { + # Types that can be chosen via the Xcode UI. + 'BUILT_PRODUCTS_DIR': 16, # Products Directory + 'BUILT_FRAMEWORKS_DIR': 10, # Not an official Xcode macro. + # Existed before support for the + # names below was added. Maps to + # "Frameworks". + } + + path_tree_second_to_subfolder = { + 'WRAPPER_NAME': 1, # Wrapper + # Although Xcode's friendly name is "Executables", the destination + # is demonstrably the value of the build setting + # EXECUTABLE_FOLDER_PATH not EXECUTABLES_FOLDER_PATH. + 'EXECUTABLE_FOLDER_PATH': 6, # Executables. + 'UNLOCALIZED_RESOURCES_FOLDER_PATH': 7, # Resources + 'JAVA_FOLDER_PATH': 15, # Java Resources + 'FRAMEWORKS_FOLDER_PATH': 10, # Frameworks + 'SHARED_FRAMEWORKS_FOLDER_PATH': 11, # Shared Frameworks + 'SHARED_SUPPORT_FOLDER_PATH': 12, # Shared Support + 'PLUGINS_FOLDER_PATH': 13, # PlugIns + # For XPC Services, Xcode sets both dstPath and dstSubfolderSpec. + # Note that it re-uses the BUILT_PRODUCTS_DIR value for + # dstSubfolderSpec. dstPath is set below. + 'XPCSERVICES_FOLDER_PATH': 16, # XPC Services. + } + + def Name(self): + if 'name' in self._properties: + return self._properties['name'] + + return 'CopyFiles' + + def FileGroup(self, path): + return self.PBXProjectAncestor().RootGroupForPath(path) + + def SetDestination(self, path): + """Set the dstSubfolderSpec and dstPath properties from path. + + path may be specified in the same notation used for XCHierarchicalElements, + specifically, "$(DIR)/path". + """ + + path_tree_match = self.path_tree_re.search(path) + if path_tree_match: + path_tree = path_tree_match.group(1); + if path_tree in self.path_tree_first_to_subfolder: + subfolder = self.path_tree_first_to_subfolder[path_tree] + relative_path = path_tree_match.group(3) + if relative_path is None: + relative_path = '' + + if subfolder == 16 and path_tree_match.group(4) is not None: + # BUILT_PRODUCTS_DIR (16) is the first element in a path whose + # second element is possibly one of the variable names in + # path_tree_second_to_subfolder. Xcode sets the values of all these + # variables to relative paths so .gyp files must prefix them with + # BUILT_PRODUCTS_DIR, e.g. + # $(BUILT_PRODUCTS_DIR)/$(PLUGINS_FOLDER_PATH). Then + # xcode_emulation.py can export these variables with the same values + # as Xcode yet make & ninja files can determine the absolute path + # to the target. Xcode uses the dstSubfolderSpec value set here + # to determine the full path. + # + # An alternative of xcode_emulation.py setting the values to absolute + # paths when exporting these variables has been ruled out because + # then the values would be different depending on the build tool. + # + # Another alternative is to invent new names for the variables used + # to match to the subfolder indices in the second table. .gyp files + # then will not need to prepend $(BUILT_PRODUCTS_DIR) because + # xcode_emulation.py can set the values of those variables to + # the absolute paths when exporting. This is possibly the thinking + # behind BUILT_FRAMEWORKS_DIR which is used in exactly this manner. + # + # Requiring prepending BUILT_PRODUCTS_DIR has been chosen because + # this same way could be used to specify destinations in .gyp files + # that pre-date this addition to GYP. However they would only work + # with the Xcode generator. The previous version of xcode_emulation.py + # does not export these variables. Such files will get the benefit + # of the Xcode UI showing the proper destination name simply by + # regenerating the projects with this version of GYP. + path_tree = path_tree_match.group(4) + relative_path = path_tree_match.group(6) + separator = '/' + + if path_tree in self.path_tree_second_to_subfolder: + subfolder = self.path_tree_second_to_subfolder[path_tree] + if relative_path is None: + relative_path = '' + separator = '' + if path_tree == 'XPCSERVICES_FOLDER_PATH': + relative_path = '$(CONTENTS_FOLDER_PATH)/XPCServices' \ + + separator + relative_path + else: + # subfolder = 16 from above + # The second element of the path is an unrecognized variable. + # Include it and any remaining elements in relative_path. + relative_path = path_tree_match.group(3); + + else: + # The path starts with an unrecognized Xcode variable + # name like $(SRCROOT). Xcode will still handle this + # as an "absolute path" that starts with the variable. + subfolder = 0 + relative_path = path + elif path.startswith('/'): + # Special case. Absolute paths are in dstSubfolderSpec 0. + subfolder = 0 + relative_path = path[1:] + else: + raise ValueError('Can\'t use path %s in a %s' % \ + (path, self.__class__.__name__)) + + self._properties['dstPath'] = relative_path + self._properties['dstSubfolderSpec'] = subfolder + + +class PBXBuildRule(XCObject): + _schema = XCObject._schema.copy() + _schema.update({ + 'compilerSpec': [0, str, 0, 1], + 'filePatterns': [0, str, 0, 0], + 'fileType': [0, str, 0, 1], + 'isEditable': [0, int, 0, 1, 1], + 'outputFiles': [1, str, 0, 1, []], + 'script': [0, str, 0, 0], + }) + + def Name(self): + # Not very inspired, but it's what Xcode uses. + return self.__class__.__name__ + + def Hashables(self): + # super + hashables = XCObject.Hashables(self) + + # Use the hashables of the weak objects that this object refers to. + hashables.append(self._properties['fileType']) + if 'filePatterns' in self._properties: + hashables.append(self._properties['filePatterns']) + return hashables + + +class PBXContainerItemProxy(XCObject): + # When referencing an item in this project file, containerPortal is the + # PBXProject root object of this project file. When referencing an item in + # another project file, containerPortal is a PBXFileReference identifying + # the other project file. + # + # When serving as a proxy to an XCTarget (in this project file or another), + # proxyType is 1. When serving as a proxy to a PBXFileReference (in another + # project file), proxyType is 2. Type 2 is used for references to the + # producs of the other project file's targets. + # + # Xcode is weird about remoteGlobalIDString. Usually, it's printed without + # a comment, indicating that it's tracked internally simply as a string, but + # sometimes it's printed with a comment (usually when the object is initially + # created), indicating that it's tracked as a project file object at least + # sometimes. This module always tracks it as an object, but contains a hack + # to prevent it from printing the comment in the project file output. See + # _XCKVPrint. + _schema = XCObject._schema.copy() + _schema.update({ + 'containerPortal': [0, XCContainerPortal, 0, 1], + 'proxyType': [0, int, 0, 1], + 'remoteGlobalIDString': [0, XCRemoteObject, 0, 1], + 'remoteInfo': [0, str, 0, 1], + }) + + def __repr__(self): + props = self._properties + name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo']) + return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) + + def Name(self): + # Admittedly not the best name, but it's what Xcode uses. + return self.__class__.__name__ + + def Hashables(self): + # super + hashables = XCObject.Hashables(self) + + # Use the hashables of the weak objects that this object refers to. + hashables.extend(self._properties['containerPortal'].Hashables()) + hashables.extend(self._properties['remoteGlobalIDString'].Hashables()) + return hashables + + +class PBXTargetDependency(XCObject): + # The "target" property accepts an XCTarget object, and obviously not + # NoneType. But XCTarget is defined below, so it can't be put into the + # schema yet. The definition of PBXTargetDependency can't be moved below + # XCTarget because XCTarget's own schema references PBXTargetDependency. + # Python doesn't deal well with this circular relationship, and doesn't have + # a real way to do forward declarations. To work around, the type of + # the "target" property is reset below, after XCTarget is defined. + # + # At least one of "name" and "target" is required. + _schema = XCObject._schema.copy() + _schema.update({ + 'name': [0, str, 0, 0], + 'target': [0, None.__class__, 0, 0], + 'targetProxy': [0, PBXContainerItemProxy, 1, 1], + }) + + def __repr__(self): + name = self._properties.get('name') or self._properties['target'].Name() + return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) + + def Name(self): + # Admittedly not the best name, but it's what Xcode uses. + return self.__class__.__name__ + + def Hashables(self): + # super + hashables = XCObject.Hashables(self) + + # Use the hashables of the weak objects that this object refers to. + hashables.extend(self._properties['targetProxy'].Hashables()) + return hashables + + +class PBXReferenceProxy(XCFileLikeElement): + _schema = XCFileLikeElement._schema.copy() + _schema.update({ + 'fileType': [0, str, 0, 1], + 'path': [0, str, 0, 1], + 'remoteRef': [0, PBXContainerItemProxy, 1, 1], + }) + + +class XCTarget(XCRemoteObject): + # An XCTarget is really just an XCObject, the XCRemoteObject thing is just + # to allow PBXProject to be used in the remoteGlobalIDString property of + # PBXContainerItemProxy. + # + # Setting a "name" property at instantiation may also affect "productName", + # which may in turn affect the "PRODUCT_NAME" build setting in children of + # "buildConfigurationList". See __init__ below. + _schema = XCRemoteObject._schema.copy() + _schema.update({ + 'buildConfigurationList': [0, XCConfigurationList, 1, 1, + XCConfigurationList()], + 'buildPhases': [1, XCBuildPhase, 1, 1, []], + 'dependencies': [1, PBXTargetDependency, 1, 1, []], + 'name': [0, str, 0, 1], + 'productName': [0, str, 0, 1], + }) + + def __init__(self, properties=None, id=None, parent=None, + force_outdir=None, force_prefix=None, force_extension=None): + # super + XCRemoteObject.__init__(self, properties, id, parent) + + # Set up additional defaults not expressed in the schema. If a "name" + # property was supplied, set "productName" if it is not present. Also set + # the "PRODUCT_NAME" build setting in each configuration, but only if + # the setting is not present in any build configuration. + if 'name' in self._properties: + if not 'productName' in self._properties: + self.SetProperty('productName', self._properties['name']) + + if 'productName' in self._properties: + if 'buildConfigurationList' in self._properties: + configs = self._properties['buildConfigurationList'] + if configs.HasBuildSetting('PRODUCT_NAME') == 0: + configs.SetBuildSetting('PRODUCT_NAME', + self._properties['productName']) + + def AddDependency(self, other): + pbxproject = self.PBXProjectAncestor() + other_pbxproject = other.PBXProjectAncestor() + if pbxproject == other_pbxproject: + # Add a dependency to another target in the same project file. + container = PBXContainerItemProxy({'containerPortal': pbxproject, + 'proxyType': 1, + 'remoteGlobalIDString': other, + 'remoteInfo': other.Name()}) + dependency = PBXTargetDependency({'target': other, + 'targetProxy': container}) + self.AppendProperty('dependencies', dependency) + else: + # Add a dependency to a target in a different project file. + other_project_ref = \ + pbxproject.AddOrGetProjectReference(other_pbxproject)[1] + container = PBXContainerItemProxy({ + 'containerPortal': other_project_ref, + 'proxyType': 1, + 'remoteGlobalIDString': other, + 'remoteInfo': other.Name(), + }) + dependency = PBXTargetDependency({'name': other.Name(), + 'targetProxy': container}) + self.AppendProperty('dependencies', dependency) + + # Proxy all of these through to the build configuration list. + + def ConfigurationNamed(self, name): + return self._properties['buildConfigurationList'].ConfigurationNamed(name) + + def DefaultConfiguration(self): + return self._properties['buildConfigurationList'].DefaultConfiguration() + + def HasBuildSetting(self, key): + return self._properties['buildConfigurationList'].HasBuildSetting(key) + + def GetBuildSetting(self, key): + return self._properties['buildConfigurationList'].GetBuildSetting(key) + + def SetBuildSetting(self, key, value): + return self._properties['buildConfigurationList'].SetBuildSetting(key, \ + value) + + def AppendBuildSetting(self, key, value): + return self._properties['buildConfigurationList'].AppendBuildSetting(key, \ + value) + + def DelBuildSetting(self, key): + return self._properties['buildConfigurationList'].DelBuildSetting(key) + + +# Redefine the type of the "target" property. See PBXTargetDependency._schema +# above. +PBXTargetDependency._schema['target'][1] = XCTarget + + +class PBXNativeTarget(XCTarget): + # buildPhases is overridden in the schema to be able to set defaults. + # + # NOTE: Contrary to most objects, it is advisable to set parent when + # constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject + # object. A parent reference is required for a PBXNativeTarget during + # construction to be able to set up the target defaults for productReference, + # because a PBXBuildFile object must be created for the target and it must + # be added to the PBXProject's mainGroup hierarchy. + _schema = XCTarget._schema.copy() + _schema.update({ + 'buildPhases': [1, XCBuildPhase, 1, 1, + [PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]], + 'buildRules': [1, PBXBuildRule, 1, 1, []], + 'productReference': [0, PBXFileReference, 0, 1], + 'productType': [0, str, 0, 1], + }) + + # Mapping from Xcode product-types to settings. The settings are: + # filetype : used for explicitFileType in the project file + # prefix : the prefix for the file name + # suffix : the suffix for the file name + _product_filetypes = { + 'com.apple.product-type.application': ['wrapper.application', + '', '.app'], + 'com.apple.product-type.application.watchapp': ['wrapper.application', + '', '.app'], + 'com.apple.product-type.watchkit-extension': ['wrapper.app-extension', + '', '.appex'], + 'com.apple.product-type.app-extension': ['wrapper.app-extension', + '', '.appex'], + 'com.apple.product-type.bundle': ['wrapper.cfbundle', + '', '.bundle'], + 'com.apple.product-type.framework': ['wrapper.framework', + '', '.framework'], + 'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib', + 'lib', '.dylib'], + 'com.apple.product-type.library.static': ['archive.ar', + 'lib', '.a'], + 'com.apple.product-type.tool': ['compiled.mach-o.executable', + '', ''], + 'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle', + '', '.xctest'], + 'com.apple.product-type.bundle.ui-testing': ['wrapper.cfbundle', + '', '.xctest'], + 'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib', + '', '.so'], + 'com.apple.product-type.kernel-extension': ['wrapper.kext', + '', '.kext'], + } + + def __init__(self, properties=None, id=None, parent=None, + force_outdir=None, force_prefix=None, force_extension=None): + # super + XCTarget.__init__(self, properties, id, parent) + + if 'productName' in self._properties and \ + 'productType' in self._properties and \ + not 'productReference' in self._properties and \ + self._properties['productType'] in self._product_filetypes: + products_group = None + pbxproject = self.PBXProjectAncestor() + if pbxproject != None: + products_group = pbxproject.ProductsGroup() + + if products_group != None: + (filetype, prefix, suffix) = \ + self._product_filetypes[self._properties['productType']] + # Xcode does not have a distinct type for loadable modules that are + # pure BSD targets (not in a bundle wrapper). GYP allows such modules + # to be specified by setting a target type to loadable_module without + # having mac_bundle set. These are mapped to the pseudo-product type + # com.googlecode.gyp.xcode.bundle. + # + # By picking up this special type and converting it to a dynamic + # library (com.apple.product-type.library.dynamic) with fix-ups, + # single-file loadable modules can be produced. + # + # MACH_O_TYPE is changed to mh_bundle to produce the proper file type + # (as opposed to mh_dylib). In order for linking to succeed, + # DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be + # cleared. They are meaningless for type mh_bundle. + # + # Finally, the .so extension is forcibly applied over the default + # (.dylib), unless another forced extension is already selected. + # .dylib is plainly wrong, and .bundle is used by loadable_modules in + # bundle wrappers (com.apple.product-type.bundle). .so seems an odd + # choice because it's used as the extension on many other systems that + # don't distinguish between linkable shared libraries and non-linkable + # loadable modules, but there's precedent: Python loadable modules on + # Mac OS X use an .so extension. + if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle': + self._properties['productType'] = \ + 'com.apple.product-type.library.dynamic' + self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle') + self.SetBuildSetting('DYLIB_CURRENT_VERSION', '') + self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '') + if force_extension is None: + force_extension = suffix[1:] + + if self._properties['productType'] == \ + 'com.apple.product-type-bundle.unit.test' or \ + self._properties['productType'] == \ + 'com.apple.product-type-bundle.ui-testing': + if force_extension is None: + force_extension = suffix[1:] + + if force_extension is not None: + # If it's a wrapper (bundle), set WRAPPER_EXTENSION. + # Extension override. + suffix = '.' + force_extension + if filetype.startswith('wrapper.'): + self.SetBuildSetting('WRAPPER_EXTENSION', force_extension) + else: + self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension) + + if filetype.startswith('compiled.mach-o.executable'): + product_name = self._properties['productName'] + product_name += suffix + suffix = '' + self.SetProperty('productName', product_name) + self.SetBuildSetting('PRODUCT_NAME', product_name) + + # Xcode handles most prefixes based on the target type, however there + # are exceptions. If a "BSD Dynamic Library" target is added in the + # Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that + # behavior. + if force_prefix is not None: + prefix = force_prefix + if filetype.startswith('wrapper.'): + self.SetBuildSetting('WRAPPER_PREFIX', prefix) + else: + self.SetBuildSetting('EXECUTABLE_PREFIX', prefix) + + if force_outdir is not None: + self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir) + + # TODO(tvl): Remove the below hack. + # http://code.google.com/p/gyp/issues/detail?id=122 + + # Some targets include the prefix in the target_name. These targets + # really should just add a product_name setting that doesn't include + # the prefix. For example: + # target_name = 'libevent', product_name = 'event' + # This check cleans up for them. + product_name = self._properties['productName'] + prefix_len = len(prefix) + if prefix_len and (product_name[:prefix_len] == prefix): + product_name = product_name[prefix_len:] + self.SetProperty('productName', product_name) + self.SetBuildSetting('PRODUCT_NAME', product_name) + + ref_props = { + 'explicitFileType': filetype, + 'includeInIndex': 0, + 'path': prefix + product_name + suffix, + 'sourceTree': 'BUILT_PRODUCTS_DIR', + } + file_ref = PBXFileReference(ref_props) + products_group.AppendChild(file_ref) + self.SetProperty('productReference', file_ref) + + def GetBuildPhaseByType(self, type): + if not 'buildPhases' in self._properties: + return None + + the_phase = None + for phase in self._properties['buildPhases']: + if isinstance(phase, type): + # Some phases may be present in multiples in a well-formed project file, + # but phases like PBXSourcesBuildPhase may only be present singly, and + # this function is intended as an aid to GetBuildPhaseByType. Loop + # over the entire list of phases and assert if more than one of the + # desired type is found. + assert the_phase is None + the_phase = phase + + return the_phase + + def HeadersPhase(self): + headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase) + if headers_phase is None: + headers_phase = PBXHeadersBuildPhase() + + # The headers phase should come before the resources, sources, and + # frameworks phases, if any. + insert_at = len(self._properties['buildPhases']) + for index in xrange(0, len(self._properties['buildPhases'])): + phase = self._properties['buildPhases'][index] + if isinstance(phase, PBXResourcesBuildPhase) or \ + isinstance(phase, PBXSourcesBuildPhase) or \ + isinstance(phase, PBXFrameworksBuildPhase): + insert_at = index + break + + self._properties['buildPhases'].insert(insert_at, headers_phase) + headers_phase.parent = self + + return headers_phase + + def ResourcesPhase(self): + resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase) + if resources_phase is None: + resources_phase = PBXResourcesBuildPhase() + + # The resources phase should come before the sources and frameworks + # phases, if any. + insert_at = len(self._properties['buildPhases']) + for index in xrange(0, len(self._properties['buildPhases'])): + phase = self._properties['buildPhases'][index] + if isinstance(phase, PBXSourcesBuildPhase) or \ + isinstance(phase, PBXFrameworksBuildPhase): + insert_at = index + break + + self._properties['buildPhases'].insert(insert_at, resources_phase) + resources_phase.parent = self + + return resources_phase + + def SourcesPhase(self): + sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase) + if sources_phase is None: + sources_phase = PBXSourcesBuildPhase() + self.AppendProperty('buildPhases', sources_phase) + + return sources_phase + + def FrameworksPhase(self): + frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase) + if frameworks_phase is None: + frameworks_phase = PBXFrameworksBuildPhase() + self.AppendProperty('buildPhases', frameworks_phase) + + return frameworks_phase + + def AddDependency(self, other): + # super + XCTarget.AddDependency(self, other) + + static_library_type = 'com.apple.product-type.library.static' + shared_library_type = 'com.apple.product-type.library.dynamic' + framework_type = 'com.apple.product-type.framework' + if isinstance(other, PBXNativeTarget) and \ + 'productType' in self._properties and \ + self._properties['productType'] != static_library_type and \ + 'productType' in other._properties and \ + (other._properties['productType'] == static_library_type or \ + ((other._properties['productType'] == shared_library_type or \ + other._properties['productType'] == framework_type) and \ + ((not other.HasBuildSetting('MACH_O_TYPE')) or + other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))): + + file_ref = other.GetProperty('productReference') + + pbxproject = self.PBXProjectAncestor() + other_pbxproject = other.PBXProjectAncestor() + if pbxproject != other_pbxproject: + other_project_product_group = \ + pbxproject.AddOrGetProjectReference(other_pbxproject)[0] + file_ref = other_project_product_group.GetChildByRemoteObject(file_ref) + + self.FrameworksPhase().AppendProperty('files', + PBXBuildFile({'fileRef': file_ref})) + + +class PBXAggregateTarget(XCTarget): + pass + + +class PBXProject(XCContainerPortal): + # A PBXProject is really just an XCObject, the XCContainerPortal thing is + # just to allow PBXProject to be used in the containerPortal property of + # PBXContainerItemProxy. + """ + + Attributes: + path: "sample.xcodeproj". TODO(mark) Document me! + _other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each + value is a reference to the dict in the + projectReferences list associated with the keyed + PBXProject. + """ + + _schema = XCContainerPortal._schema.copy() + _schema.update({ + 'attributes': [0, dict, 0, 0], + 'buildConfigurationList': [0, XCConfigurationList, 1, 1, + XCConfigurationList()], + 'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'], + 'hasScannedForEncodings': [0, int, 0, 1, 1], + 'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()], + 'projectDirPath': [0, str, 0, 1, ''], + 'projectReferences': [1, dict, 0, 0], + 'projectRoot': [0, str, 0, 1, ''], + 'targets': [1, XCTarget, 1, 1, []], + }) + + def __init__(self, properties=None, id=None, parent=None, path=None): + self.path = path + self._other_pbxprojects = {} + # super + return XCContainerPortal.__init__(self, properties, id, parent) + + def Name(self): + name = self.path + if name[-10:] == '.xcodeproj': + name = name[:-10] + return posixpath.basename(name) + + def Path(self): + return self.path + + def Comment(self): + return 'Project object' + + def Children(self): + # super + children = XCContainerPortal.Children(self) + + # Add children that the schema doesn't know about. Maybe there's a more + # elegant way around this, but this is the only case where we need to own + # objects in a dictionary (that is itself in a list), and three lines for + # a one-off isn't that big a deal. + if 'projectReferences' in self._properties: + for reference in self._properties['projectReferences']: + children.append(reference['ProductGroup']) + + return children + + def PBXProjectAncestor(self): + return self + + def _GroupByName(self, name): + if not 'mainGroup' in self._properties: + self.SetProperty('mainGroup', PBXGroup()) + + main_group = self._properties['mainGroup'] + group = main_group.GetChildByName(name) + if group is None: + group = PBXGroup({'name': name}) + main_group.AppendChild(group) + + return group + + # SourceGroup and ProductsGroup are created by default in Xcode's own + # templates. + def SourceGroup(self): + return self._GroupByName('Source') + + def ProductsGroup(self): + return self._GroupByName('Products') + + # IntermediatesGroup is used to collect source-like files that are generated + # by rules or script phases and are placed in intermediate directories such + # as DerivedSources. + def IntermediatesGroup(self): + return self._GroupByName('Intermediates') + + # FrameworksGroup and ProjectsGroup are top-level groups used to collect + # frameworks and projects. + def FrameworksGroup(self): + return self._GroupByName('Frameworks') + + def ProjectsGroup(self): + return self._GroupByName('Projects') + + def RootGroupForPath(self, path): + """Returns a PBXGroup child of this object to which path should be added. + + This method is intended to choose between SourceGroup and + IntermediatesGroup on the basis of whether path is present in a source + directory or an intermediates directory. For the purposes of this + determination, any path located within a derived file directory such as + PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates + directory. + + The returned value is a two-element tuple. The first element is the + PBXGroup, and the second element specifies whether that group should be + organized hierarchically (True) or as a single flat list (False). + """ + + # TODO(mark): make this a class variable and bind to self on call? + # Also, this list is nowhere near exhaustive. + # INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by + # gyp.generator.xcode. There should probably be some way for that module + # to push the names in, rather than having to hard-code them here. + source_tree_groups = { + 'DERIVED_FILE_DIR': (self.IntermediatesGroup, True), + 'INTERMEDIATE_DIR': (self.IntermediatesGroup, True), + 'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True), + 'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True), + } + + (source_tree, path) = SourceTreeAndPathFromPath(path) + if source_tree != None and source_tree in source_tree_groups: + (group_func, hierarchical) = source_tree_groups[source_tree] + group = group_func() + return (group, hierarchical) + + # TODO(mark): make additional choices based on file extension. + + return (self.SourceGroup(), True) + + def AddOrGetFileInRootGroup(self, path): + """Returns a PBXFileReference corresponding to path in the correct group + according to RootGroupForPath's heuristics. + + If an existing PBXFileReference for path exists, it will be returned. + Otherwise, one will be created and returned. + """ + + (group, hierarchical) = self.RootGroupForPath(path) + return group.AddOrGetFileByPath(path, hierarchical) + + def RootGroupsTakeOverOnlyChildren(self, recurse=False): + """Calls TakeOverOnlyChild for all groups in the main group.""" + + for group in self._properties['mainGroup']._properties['children']: + if isinstance(group, PBXGroup): + group.TakeOverOnlyChild(recurse) + + def SortGroups(self): + # Sort the children of the mainGroup (like "Source" and "Products") + # according to their defined order. + self._properties['mainGroup']._properties['children'] = \ + sorted(self._properties['mainGroup']._properties['children'], + cmp=lambda x,y: x.CompareRootGroup(y)) + + # Sort everything else by putting group before files, and going + # alphabetically by name within sections of groups and files. SortGroup + # is recursive. + for group in self._properties['mainGroup']._properties['children']: + if not isinstance(group, PBXGroup): + continue + + if group.Name() == 'Products': + # The Products group is a special case. Instead of sorting + # alphabetically, sort things in the order of the targets that + # produce the products. To do this, just build up a new list of + # products based on the targets. + products = [] + for target in self._properties['targets']: + if not isinstance(target, PBXNativeTarget): + continue + product = target._properties['productReference'] + # Make sure that the product is already in the products group. + assert product in group._properties['children'] + products.append(product) + + # Make sure that this process doesn't miss anything that was already + # in the products group. + assert len(products) == len(group._properties['children']) + group._properties['children'] = products + else: + group.SortGroup() + + def AddOrGetProjectReference(self, other_pbxproject): + """Add a reference to another project file (via PBXProject object) to this + one. + + Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in + this project file that contains a PBXReferenceProxy object for each + product of each PBXNativeTarget in the other project file. ProjectRef is + a PBXFileReference to the other project file. + + If this project file already references the other project file, the + existing ProductGroup and ProjectRef are returned. The ProductGroup will + still be updated if necessary. + """ + + if not 'projectReferences' in self._properties: + self._properties['projectReferences'] = [] + + product_group = None + project_ref = None + + if not other_pbxproject in self._other_pbxprojects: + # This project file isn't yet linked to the other one. Establish the + # link. + product_group = PBXGroup({'name': 'Products'}) + + # ProductGroup is strong. + product_group.parent = self + + # There's nothing unique about this PBXGroup, and if left alone, it will + # wind up with the same set of hashables as all other PBXGroup objects + # owned by the projectReferences list. Add the hashables of the + # remote PBXProject that it's related to. + product_group._hashables.extend(other_pbxproject.Hashables()) + + # The other project reports its path as relative to the same directory + # that this project's path is relative to. The other project's path + # is not necessarily already relative to this project. Figure out the + # pathname that this project needs to use to refer to the other one. + this_path = posixpath.dirname(self.Path()) + projectDirPath = self.GetProperty('projectDirPath') + if projectDirPath: + if posixpath.isabs(projectDirPath[0]): + this_path = projectDirPath + else: + this_path = posixpath.join(this_path, projectDirPath) + other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path) + + # ProjectRef is weak (it's owned by the mainGroup hierarchy). + project_ref = PBXFileReference({ + 'lastKnownFileType': 'wrapper.pb-project', + 'path': other_path, + 'sourceTree': 'SOURCE_ROOT', + }) + self.ProjectsGroup().AppendChild(project_ref) + + ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref} + self._other_pbxprojects[other_pbxproject] = ref_dict + self.AppendProperty('projectReferences', ref_dict) + + # Xcode seems to sort this list case-insensitively + self._properties['projectReferences'] = \ + sorted(self._properties['projectReferences'], cmp=lambda x,y: + cmp(x['ProjectRef'].Name().lower(), + y['ProjectRef'].Name().lower())) + else: + # The link already exists. Pull out the relevnt data. + project_ref_dict = self._other_pbxprojects[other_pbxproject] + product_group = project_ref_dict['ProductGroup'] + project_ref = project_ref_dict['ProjectRef'] + + self._SetUpProductReferences(other_pbxproject, product_group, project_ref) + + inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False) + targets = other_pbxproject.GetProperty('targets') + if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets): + dir_path = project_ref._properties['path'] + product_group._hashables.extend(dir_path) + + return [product_group, project_ref] + + def _AllSymrootsUnique(self, target, inherit_unique_symroot): + # Returns True if all configurations have a unique 'SYMROOT' attribute. + # The value of inherit_unique_symroot decides, if a configuration is assumed + # to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't + # define an explicit value for 'SYMROOT'. + symroots = self._DefinedSymroots(target) + for s in self._DefinedSymroots(target): + if (s is not None and not self._IsUniqueSymrootForTarget(s) or + s is None and not inherit_unique_symroot): + return False + return True if symroots else inherit_unique_symroot + + def _DefinedSymroots(self, target): + # Returns all values for the 'SYMROOT' attribute defined in all + # configurations for this target. If any configuration doesn't define the + # 'SYMROOT' attribute, None is added to the returned set. If all + # configurations don't define the 'SYMROOT' attribute, an empty set is + # returned. + config_list = target.GetProperty('buildConfigurationList') + symroots = set() + for config in config_list.GetProperty('buildConfigurations'): + setting = config.GetProperty('buildSettings') + if 'SYMROOT' in setting: + symroots.add(setting['SYMROOT']) + else: + symroots.add(None) + if len(symroots) == 1 and None in symroots: + return set() + return symroots + + def _IsUniqueSymrootForTarget(self, symroot): + # This method returns True if all configurations in target contain a + # 'SYMROOT' attribute that is unique for the given target. A value is + # unique, if the Xcode macro '$SRCROOT' appears in it in any form. + uniquifier = ['$SRCROOT', '$(SRCROOT)'] + if any(x in symroot for x in uniquifier): + return True + return False + + def _SetUpProductReferences(self, other_pbxproject, product_group, + project_ref): + # TODO(mark): This only adds references to products in other_pbxproject + # when they don't exist in this pbxproject. Perhaps it should also + # remove references from this pbxproject that are no longer present in + # other_pbxproject. Perhaps it should update various properties if they + # change. + for target in other_pbxproject._properties['targets']: + if not isinstance(target, PBXNativeTarget): + continue + + other_fileref = target._properties['productReference'] + if product_group.GetChildByRemoteObject(other_fileref) is None: + # Xcode sets remoteInfo to the name of the target and not the name + # of its product, despite this proxy being a reference to the product. + container_item = PBXContainerItemProxy({ + 'containerPortal': project_ref, + 'proxyType': 2, + 'remoteGlobalIDString': other_fileref, + 'remoteInfo': target.Name() + }) + # TODO(mark): Does sourceTree get copied straight over from the other + # project? Can the other project ever have lastKnownFileType here + # instead of explicitFileType? (Use it if so?) Can path ever be + # unset? (I don't think so.) Can other_fileref have name set, and + # does it impact the PBXReferenceProxy if so? These are the questions + # that perhaps will be answered one day. + reference_proxy = PBXReferenceProxy({ + 'fileType': other_fileref._properties['explicitFileType'], + 'path': other_fileref._properties['path'], + 'sourceTree': other_fileref._properties['sourceTree'], + 'remoteRef': container_item, + }) + + product_group.AppendChild(reference_proxy) + + def SortRemoteProductReferences(self): + # For each remote project file, sort the associated ProductGroup in the + # same order that the targets are sorted in the remote project file. This + # is the sort order used by Xcode. + + def CompareProducts(x, y, remote_products): + # x and y are PBXReferenceProxy objects. Go through their associated + # PBXContainerItem to get the remote PBXFileReference, which will be + # present in the remote_products list. + x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString'] + y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString'] + x_index = remote_products.index(x_remote) + y_index = remote_products.index(y_remote) + + # Use the order of each remote PBXFileReference in remote_products to + # determine the sort order. + return cmp(x_index, y_index) + + for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems(): + # Build up a list of products in the remote project file, ordered the + # same as the targets that produce them. + remote_products = [] + for target in other_pbxproject._properties['targets']: + if not isinstance(target, PBXNativeTarget): + continue + remote_products.append(target._properties['productReference']) + + # Sort the PBXReferenceProxy children according to the list of remote + # products. + product_group = ref_dict['ProductGroup'] + product_group._properties['children'] = sorted( + product_group._properties['children'], + cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp)) + + +class XCProjectFile(XCObject): + _schema = XCObject._schema.copy() + _schema.update({ + 'archiveVersion': [0, int, 0, 1, 1], + 'classes': [0, dict, 0, 1, {}], + 'objectVersion': [0, int, 0, 1, 46], + 'rootObject': [0, PBXProject, 1, 1], + }) + + def ComputeIDs(self, recursive=True, overwrite=True, hash=None): + # Although XCProjectFile is implemented here as an XCObject, it's not a + # proper object in the Xcode sense, and it certainly doesn't have its own + # ID. Pass through an attempt to update IDs to the real root object. + if recursive: + self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash) + + def Print(self, file=sys.stdout): + self.VerifyHasRequiredProperties() + + # Add the special "objects" property, which will be caught and handled + # separately during printing. This structure allows a fairly standard + # loop do the normal printing. + self._properties['objects'] = {} + self._XCPrint(file, 0, '// !$*UTF8*$!\n') + if self._should_print_single_line: + self._XCPrint(file, 0, '{ ') + else: + self._XCPrint(file, 0, '{\n') + for property, value in sorted(self._properties.iteritems(), + cmp=lambda x, y: cmp(x, y)): + if property == 'objects': + self._PrintObjects(file) + else: + self._XCKVPrint(file, 1, property, value) + self._XCPrint(file, 0, '}\n') + del self._properties['objects'] + + def _PrintObjects(self, file): + if self._should_print_single_line: + self._XCPrint(file, 0, 'objects = {') + else: + self._XCPrint(file, 1, 'objects = {\n') + + objects_by_class = {} + for object in self.Descendants(): + if object == self: + continue + class_name = object.__class__.__name__ + if not class_name in objects_by_class: + objects_by_class[class_name] = [] + objects_by_class[class_name].append(object) + + for class_name in sorted(objects_by_class): + self._XCPrint(file, 0, '\n') + self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n') + for object in sorted(objects_by_class[class_name], + cmp=lambda x, y: cmp(x.id, y.id)): + object.Print(file) + self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n') + + if self._should_print_single_line: + self._XCPrint(file, 0, '}; ') + else: + self._XCPrint(file, 1, '};\n') diff --git a/deps/libuv/build/gyp/pylib/gyp/xml_fix.py b/deps/libuv/build/gyp/pylib/gyp/xml_fix.py new file mode 100644 index 00000000..5de84815 --- /dev/null +++ b/deps/libuv/build/gyp/pylib/gyp/xml_fix.py @@ -0,0 +1,69 @@ +# Copyright (c) 2011 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Applies a fix to CR LF TAB handling in xml.dom. + +Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293 +Working around this: http://bugs.python.org/issue5752 +TODO(bradnelson): Consider dropping this when we drop XP support. +""" + + +import xml.dom.minidom + + +def _Replacement_write_data(writer, data, is_attrib=False): + """Writes datachars to writer.""" + data = data.replace("&", "&").replace("<", "<") + data = data.replace("\"", """).replace(">", ">") + if is_attrib: + data = data.replace( + "\r", " ").replace( + "\n", " ").replace( + "\t", " ") + writer.write(data) + + +def _Replacement_writexml(self, writer, indent="", addindent="", newl=""): + # indent = current indentation + # addindent = indentation to add to higher levels + # newl = newline string + writer.write(indent+"<" + self.tagName) + + attrs = self._get_attributes() + a_names = attrs.keys() + a_names.sort() + + for a_name in a_names: + writer.write(" %s=\"" % a_name) + _Replacement_write_data(writer, attrs[a_name].value, is_attrib=True) + writer.write("\"") + if self.childNodes: + writer.write(">%s" % newl) + for node in self.childNodes: + node.writexml(writer, indent + addindent, addindent, newl) + writer.write("%s%s" % (indent, self.tagName, newl)) + else: + writer.write("/>%s" % newl) + + +class XmlFix(object): + """Object to manage temporary patching of xml.dom.minidom.""" + + def __init__(self): + # Preserve current xml.dom.minidom functions. + self.write_data = xml.dom.minidom._write_data + self.writexml = xml.dom.minidom.Element.writexml + # Inject replacement versions of a function and a method. + xml.dom.minidom._write_data = _Replacement_write_data + xml.dom.minidom.Element.writexml = _Replacement_writexml + + def Cleanup(self): + if self.write_data: + xml.dom.minidom._write_data = self.write_data + xml.dom.minidom.Element.writexml = self.writexml + self.write_data = None + + def __del__(self): + self.Cleanup() diff --git a/deps/libuv/build/gyp/setup.py b/deps/libuv/build/gyp/setup.py new file mode 100755 index 00000000..75a42558 --- /dev/null +++ b/deps/libuv/build/gyp/setup.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +# Copyright (c) 2009 Google Inc. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +from setuptools import setup + +setup( + name='gyp', + version='0.1', + description='Generate Your Projects', + author='Chromium Authors', + author_email='chromium-dev@googlegroups.com', + url='http://code.google.com/p/gyp', + package_dir = {'': 'pylib'}, + packages=['gyp', 'gyp.generator'], + entry_points = {'console_scripts': ['gyp=gyp:script_main'] } +) diff --git a/deps/libuv/checksparse.sh b/deps/libuv/checksparse.sh new file mode 100755 index 00000000..68e3bde3 --- /dev/null +++ b/deps/libuv/checksparse.sh @@ -0,0 +1,236 @@ +#!/bin/sh + +# Copyright (c) 2013, Ben Noordhuis +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +SPARSE=${SPARSE:-sparse} + +SPARSE_FLAGS=${SPARSE_FLAGS:-" +-D__POSIX__ +-Wsparse-all +-Wno-do-while +-Wno-transparent-union +-Iinclude +-Isrc +"} + +SOURCES=" +include/tree.h +include/uv-unix.h +include/uv.h +src/fs-poll.c +src/inet.c +src/queue.h +src/unix/async.c +src/unix/core.c +src/unix/dl.c +src/unix/fs.c +src/unix/getaddrinfo.c +src/unix/internal.h +src/unix/loop-watcher.c +src/unix/loop.c +src/unix/pipe.c +src/unix/poll.c +src/unix/process.c +src/unix/signal.c +src/unix/stream.c +src/unix/tcp.c +src/unix/thread.c +src/unix/threadpool.c +src/unix/timer.c +src/unix/tty.c +src/unix/udp.c +src/uv-common.c +src/uv-common.h +" + +TESTS=" +test/benchmark-async-pummel.c +test/benchmark-async.c +test/benchmark-fs-stat.c +test/benchmark-getaddrinfo.c +test/benchmark-loop-count.c +test/benchmark-million-async.c +test/benchmark-million-timers.c +test/benchmark-multi-accept.c +test/benchmark-ping-pongs.c +test/benchmark-pound.c +test/benchmark-pump.c +test/benchmark-sizes.c +test/benchmark-spawn.c +test/benchmark-tcp-write-batch.c +test/benchmark-thread.c +test/benchmark-udp-pummel.c +test/blackhole-server.c +test/dns-server.c +test/echo-server.c +test/run-benchmarks.c +test/run-tests.c +test/runner-unix.c +test/runner-unix.h +test/runner.c +test/runner.h +test/task.h +test/test-active.c +test/test-async.c +test/test-barrier.c +test/test-callback-order.c +test/test-callback-stack.c +test/test-condvar.c +test/test-connection-fail.c +test/test-cwd-and-chdir.c +test/test-delayed-accept.c +test/test-dlerror.c +test/test-embed.c +test/test-error.c +test/test-fail-always.c +test/test-fs-event.c +test/test-fs-poll.c +test/test-fs.c +test/test-get-currentexe.c +test/test-get-loadavg.c +test/test-get-memory.c +test/test-get-passwd.c +test/test-getaddrinfo.c +test/test-getsockname.c +test/test-homedir.c +test/test-hrtime.c +test/test-idle.c +test/test-ip6-addr.c +test/test-ipc-send-recv.c +test/test-ipc.c +test/test-loop-handles.c +test/test-multiple-listen.c +test/test-mutexes.c +test/test-pass-always.c +test/test-ping-pong.c +test/test-pipe-bind-error.c +test/test-pipe-connect-error.c +test/test-pipe-sendmsg.c +test/test-pipe-server-close.c +test/test-platform-output.c +test/test-poll-close.c +test/test-poll.c +test/test-process-title.c +test/test-ref.c +test/test-run-nowait.c +test/test-run-once.c +test/test-semaphore.c +test/test-shutdown-close.c +test/test-shutdown-eof.c +test/test-signal-multiple-loops.c +test/test-signal.c +test/test-spawn.c +test/test-stdio-over-pipes.c +test/test-tcp-bind-error.c +test/test-tcp-bind6-error.c +test/test-tcp-close-while-connecting.c +test/test-tcp-close-accept.c +test/test-tcp-close.c +test/test-tcp-connect-error-after-write.c +test/test-tcp-connect-error.c +test/test-tcp-connect-timeout.c +test/test-tcp-connect6-error.c +test/test-tcp-flags.c +test/test-tcp-open.c +test/test-tcp-read-stop.c +test/test-tcp-shutdown-after-write.c +test/test-tcp-unexpected-read.c +test/test-tcp-oob.c +test/test-tcp-write-error.c +test/test-tcp-write-to-half-open-connection.c +test/test-tcp-writealot.c +test/test-thread.c +test/test-threadpool-cancel.c +test/test-threadpool.c +test/test-timer-again.c +test/test-timer.c +test/test-tmpdir.c +test/test-tty.c +test/test-udp-dgram-too-big.c +test/test-udp-ipv6.c +test/test-udp-multicast-join.c +test/test-udp-multicast-ttl.c +test/test-udp-open.c +test/test-udp-options.c +test/test-udp-send-and-recv.c +test/test-walk-handles.c +test/test-watcher-cross-stop.c +" + +case `uname -s` in +AIX) + SPARSE_FLAGS="$SPARSE_FLAGS -D_AIX=1" + SOURCES="$SOURCES + src/unix/aix.c" + ;; +Darwin) + SPARSE_FLAGS="$SPARSE_FLAGS -D__APPLE__=1" + SOURCES="$SOURCES + include/uv-bsd.h + src/unix/darwin.c + src/unix/kqueue.c + src/unix/fsevents.c" + ;; +DragonFly) + SPARSE_FLAGS="$SPARSE_FLAGS -D__DragonFly__=1" + SOURCES="$SOURCES + include/uv-bsd.h + src/unix/kqueue.c + src/unix/freebsd.c" + ;; +FreeBSD) + SPARSE_FLAGS="$SPARSE_FLAGS -D__FreeBSD__=1" + SOURCES="$SOURCES + include/uv-bsd.h + src/unix/kqueue.c + src/unix/freebsd.c" + ;; +Linux) + SPARSE_FLAGS="$SPARSE_FLAGS -D__linux__=1" + SOURCES="$SOURCES + include/uv-linux.h + src/unix/linux-inotify.c + src/unix/linux-core.c + src/unix/linux-syscalls.c + src/unix/linux-syscalls.h" + ;; +NetBSD) + SPARSE_FLAGS="$SPARSE_FLAGS -D__NetBSD__=1" + SOURCES="$SOURCES + include/uv-bsd.h + src/unix/kqueue.c + src/unix/netbsd.c" + ;; +OpenBSD) + SPARSE_FLAGS="$SPARSE_FLAGS -D__OpenBSD__=1" + SOURCES="$SOURCES + include/uv-bsd.h + src/unix/kqueue.c + src/unix/openbsd.c" + ;; +SunOS) + SPARSE_FLAGS="$SPARSE_FLAGS -D__sun=1" + SOURCES="$SOURCES + include/uv-sunos.h + src/unix/sunos.c" + ;; +esac + +for ARCH in __i386__ __x86_64__ __arm__ __mips__; do + $SPARSE $SPARSE_FLAGS -D$ARCH=1 $SOURCES +done + +# Tests are architecture independent. +$SPARSE $SPARSE_FLAGS -Itest $TESTS diff --git a/deps/libuv/common.gypi b/deps/libuv/common.gypi new file mode 100644 index 00000000..44db701d --- /dev/null +++ b/deps/libuv/common.gypi @@ -0,0 +1,206 @@ +{ + 'variables': { + 'target_arch%': 'ia32', # set v8's target architecture + 'host_arch%': 'ia32', # set v8's host architecture + 'uv_library%': 'static_library', # allow override to 'shared_library' for DLL/.so builds + 'msvs_multi_core_compile': '0', # we do enable multicore compiles, but not using the V8 way + }, + + 'target_defaults': { + 'default_configuration': 'Debug', + 'configurations': { + 'Debug': { + 'defines': [ 'DEBUG', '_DEBUG' ], + 'cflags': [ '-g' ], + 'msvs_settings': { + 'VCCLCompilerTool': { + 'target_conditions': [ + ['uv_library=="static_library"', { + 'RuntimeLibrary': 1, # static debug + }, { + 'RuntimeLibrary': 3, # DLL debug + }], + ], + 'Optimization': 0, # /Od, no optimization + 'MinimalRebuild': 'false', + 'OmitFramePointers': 'false', + 'BasicRuntimeChecks': 3, # /RTC1 + }, + 'VCLinkerTool': { + 'LinkIncremental': 2, # enable incremental linking + }, + }, + 'xcode_settings': { + 'GCC_OPTIMIZATION_LEVEL': '0', + 'OTHER_CFLAGS': [ '-Wno-strict-aliasing' ], + }, + 'conditions': [ + ['OS != "zos"', { + 'cflags': [ '-O0', '-fwrapv' ] + }], + ['OS == "android"', { + 'cflags': [ '-fPIE' ], + 'ldflags': [ '-fPIE', '-pie' ] + }] + ] + }, + 'Release': { + 'defines': [ 'NDEBUG' ], + 'cflags': [ + '-O3', + '-fstrict-aliasing', + '-fomit-frame-pointer', + '-fdata-sections', + '-ffunction-sections', + ], + 'msvs_settings': { + 'VCCLCompilerTool': { + 'target_conditions': [ + ['uv_library=="static_library"', { + 'RuntimeLibrary': 0, # static release + }, { + 'RuntimeLibrary': 2, # debug release + }], + ], + 'Optimization': 3, # /Ox, full optimization + 'FavorSizeOrSpeed': 1, # /Ot, favour speed over size + 'InlineFunctionExpansion': 2, # /Ob2, inline anything eligible + 'WholeProgramOptimization': 'true', # /GL, whole program optimization, needed for LTCG + 'OmitFramePointers': 'true', + 'EnableFunctionLevelLinking': 'true', + 'EnableIntrinsicFunctions': 'true', + }, + 'VCLibrarianTool': { + 'AdditionalOptions': [ + '/LTCG', # link time code generation + ], + }, + 'VCLinkerTool': { + 'LinkTimeCodeGeneration': 1, # link-time code generation + 'OptimizeReferences': 2, # /OPT:REF + 'EnableCOMDATFolding': 2, # /OPT:ICF + 'LinkIncremental': 1, # disable incremental linking + }, + }, + } + }, + 'msvs_settings': { + 'VCCLCompilerTool': { + 'StringPooling': 'true', # pool string literals + 'DebugInformationFormat': 3, # Generate a PDB + 'WarningLevel': 3, + 'BufferSecurityCheck': 'true', + 'ExceptionHandling': 1, # /EHsc + 'SuppressStartupBanner': 'true', + 'WarnAsError': 'false', + 'AdditionalOptions': [ + '/MP', # compile across multiple CPUs + ], + }, + 'VCLibrarianTool': { + }, + 'VCLinkerTool': { + 'GenerateDebugInformation': 'true', + 'RandomizedBaseAddress': 2, # enable ASLR + 'DataExecutionPrevention': 2, # enable DEP + 'AllowIsolation': 'true', + 'SuppressStartupBanner': 'true', + 'target_conditions': [ + ['_type=="executable"', { + 'SubSystem': 1, # console executable + }], + ], + }, + }, + 'conditions': [ + ['OS == "win"', { + 'msvs_cygwin_shell': 0, # prevent actions from trying to use cygwin + 'defines': [ + 'WIN32', + # we don't really want VC++ warning us about + # how dangerous C functions are... + '_CRT_SECURE_NO_DEPRECATE', + # ... or that C implementations shouldn't use + # POSIX names + '_CRT_NONSTDC_NO_DEPRECATE', + ], + 'target_conditions': [ + ['target_arch=="x64"', { + 'msvs_configuration_platform': 'x64' + }] + ] + }], + ['OS in "freebsd dragonflybsd linux openbsd solaris android"', { + 'cflags': [ '-Wall' ], + 'cflags_cc': [ '-fno-rtti', '-fno-exceptions' ], + 'target_conditions': [ + ['_type=="static_library"', { + 'standalone_static_library': 1, # disable thin archive which needs binutils >= 2.19 + }], + ], + 'conditions': [ + [ 'host_arch != target_arch and target_arch=="ia32"', { + 'cflags': [ '-m32' ], + 'ldflags': [ '-m32' ], + }], + [ 'target_arch=="x32"', { + 'cflags': [ '-mx32' ], + 'ldflags': [ '-mx32' ], + }], + [ 'OS=="linux"', { + 'cflags': [ '-ansi' ], + }], + [ 'OS=="solaris"', { + 'cflags': [ '-pthreads' ], + 'ldflags': [ '-pthreads' ], + }], + [ 'OS not in "solaris android zos"', { + 'cflags': [ '-pthread' ], + 'ldflags': [ '-pthread' ], + }], + ], + }], + ['OS=="mac"', { + 'xcode_settings': { + 'ALWAYS_SEARCH_USER_PATHS': 'NO', + 'GCC_CW_ASM_SYNTAX': 'NO', # No -fasm-blocks + 'GCC_DYNAMIC_NO_PIC': 'NO', # No -mdynamic-no-pic + # (Equivalent to -fPIC) + 'GCC_ENABLE_CPP_EXCEPTIONS': 'NO', # -fno-exceptions + 'GCC_ENABLE_CPP_RTTI': 'NO', # -fno-rtti + 'GCC_ENABLE_PASCAL_STRINGS': 'NO', # No -mpascal-strings + 'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics + 'PREBINDING': 'NO', # No -Wl,-prebind + 'USE_HEADERMAP': 'NO', + 'OTHER_CFLAGS': [ + '-fstrict-aliasing', + ], + 'WARNING_CFLAGS': [ + '-Wall', + '-Wendif-labels', + '-W', + '-Wno-unused-parameter', + ], + }, + 'conditions': [ + ['target_arch=="ia32"', { + 'xcode_settings': {'ARCHS': ['i386']}, + }], + ['target_arch=="x64"', { + 'xcode_settings': {'ARCHS': ['x86_64']}, + }], + ], + 'target_conditions': [ + ['_type!="static_library"', { + 'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-search_paths_first']}, + }], + ], + }], + ['OS=="solaris"', { + 'cflags': [ '-fno-omit-frame-pointer' ], + # pull in V8's postmortem metadata + 'ldflags': [ '-Wl,-z,allextract' ] + }], + ], + }, +} diff --git a/deps/libuv/configure.ac b/deps/libuv/configure.ac new file mode 100644 index 00000000..5abd8a19 --- /dev/null +++ b/deps/libuv/configure.ac @@ -0,0 +1,74 @@ +# Copyright (c) 2013, Ben Noordhuis +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +AC_PREREQ(2.57) +AC_INIT([libuv], [1.10.0], [https://github.com/libuv/libuv/issues]) +AC_CONFIG_MACRO_DIR([m4]) +m4_include([m4/libuv-extra-automake-flags.m4]) +m4_include([m4/as_case.m4]) +m4_include([m4/libuv-check-flags.m4]) +AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects] UV_EXTRA_AUTOMAKE_FLAGS) +AC_CANONICAL_HOST +AC_ENABLE_SHARED +AC_ENABLE_STATIC +AC_PROG_CC +AM_PROG_CC_C_O +AS_IF([AS_CASE([$host_os],[openedition*], [false], [true])], [ + CC_CHECK_CFLAGS_APPEND([-pedantic]) +]) +CC_FLAG_VISIBILITY #[-fvisibility=hidden] +CC_CHECK_CFLAGS_APPEND([-g]) +CC_CHECK_CFLAGS_APPEND([-std=gnu89]) +CC_CHECK_CFLAGS_APPEND([-Wall]) +CC_CHECK_CFLAGS_APPEND([-Wextra]) +CC_CHECK_CFLAGS_APPEND([-Wno-unused-parameter]) +# AM_PROG_AR is not available in automake v0.11 but it's essential in v0.12. +m4_ifdef([AM_PROG_AR], [AM_PROG_AR]) +# autoconf complains if AC_PROG_LIBTOOL precedes AM_PROG_AR. +AC_PROG_LIBTOOL +m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) +LT_INIT +# TODO(bnoordhuis) Check for -pthread vs. -pthreads +AC_CHECK_LIB([dl], [dlopen]) +AC_CHECK_LIB([kstat], [kstat_lookup]) +AC_CHECK_LIB([kvm], [kvm_open]) +AC_CHECK_LIB([nsl], [gethostbyname]) +AC_CHECK_LIB([perfstat], [perfstat_cpu]) +AC_CHECK_LIB([pthread], [pthread_mutex_init]) +AC_CHECK_LIB([rt], [clock_gettime]) +AC_CHECK_LIB([sendfile], [sendfile]) +AC_CHECK_LIB([socket], [socket]) +AC_SYS_LARGEFILE +AM_CONDITIONAL([AIX], [AS_CASE([$host_os],[aix*], [true], [false])]) +AM_CONDITIONAL([ANDROID], [AS_CASE([$host_os],[linux-android*],[true], [false])]) +AM_CONDITIONAL([DARWIN], [AS_CASE([$host_os],[darwin*], [true], [false])]) +AM_CONDITIONAL([DRAGONFLY],[AS_CASE([$host_os],[dragonfly*], [true], [false])]) +AM_CONDITIONAL([FREEBSD], [AS_CASE([$host_os],[*freebsd*], [true], [false])]) +AM_CONDITIONAL([LINUX], [AS_CASE([$host_os],[linux*], [true], [false])]) +AM_CONDITIONAL([NETBSD], [AS_CASE([$host_os],[netbsd*], [true], [false])]) +AM_CONDITIONAL([OPENBSD], [AS_CASE([$host_os],[openbsd*], [true], [false])]) +AM_CONDITIONAL([OS390], [AS_CASE([$host_os],[openedition*], [true], [false])]) +AM_CONDITIONAL([SUNOS], [AS_CASE([$host_os],[solaris*], [true], [false])]) +AM_CONDITIONAL([WINNT], [AS_CASE([$host_os],[mingw*], [true], [false])]) +AS_CASE([$host_os],[mingw*], [ + LIBS="$LIBS -lws2_32 -lpsapi -liphlpapi -lshell32 -luserenv -luser32" +]) +AC_CHECK_HEADERS([sys/ahafs_evProds.h]) +AC_CHECK_PROG(PKG_CONFIG, pkg-config, yes) +AM_CONDITIONAL([HAVE_PKG_CONFIG], [test "x$PKG_CONFIG" != "x"]) +AS_IF([test "x$PKG_CONFIG" != "x"], [ + AC_CONFIG_FILES([libuv.pc]) +]) +AC_CONFIG_FILES([Makefile]) +AC_OUTPUT diff --git a/deps/libuv/gyp_uv.py b/deps/libuv/gyp_uv.py new file mode 100755 index 00000000..bd37d95c --- /dev/null +++ b/deps/libuv/gyp_uv.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python + +import os +import platform +import sys + +try: + import multiprocessing.synchronize + gyp_parallel_support = True +except ImportError: + gyp_parallel_support = False + + +CC = os.environ.get('CC', 'cc') +script_dir = os.path.dirname(__file__) +uv_root = os.path.normpath(script_dir) +output_dir = os.path.join(os.path.abspath(uv_root), 'out') + +sys.path.insert(0, os.path.join(uv_root, 'build', 'gyp', 'pylib')) +try: + import gyp +except ImportError: + print('You need to install gyp in build/gyp first. See the README.') + sys.exit(42) + + +def host_arch(): + machine = platform.machine() + if machine == 'i386': return 'ia32' + if machine == 'AMD64': return 'x64' + if machine == 'x86_64': return 'x64' + if machine.startswith('arm'): return 'arm' + if machine.startswith('mips'): return 'mips' + return machine # Return as-is and hope for the best. + + +def run_gyp(args): + rc = gyp.main(args) + if rc != 0: + print('Error running GYP') + sys.exit(rc) + + +if __name__ == '__main__': + args = sys.argv[1:] + + # GYP bug. + # On msvs it will crash if it gets an absolute path. + # On Mac/make it will crash if it doesn't get an absolute path. + if sys.platform == 'win32': + args.append(os.path.join(uv_root, 'uv.gyp')) + common_fn = os.path.join(uv_root, 'common.gypi') + options_fn = os.path.join(uv_root, 'options.gypi') + # we force vs 2010 over 2008 which would otherwise be the default for gyp + if not os.environ.get('GYP_MSVS_VERSION'): + os.environ['GYP_MSVS_VERSION'] = '2010' + else: + args.append(os.path.join(os.path.abspath(uv_root), 'uv.gyp')) + common_fn = os.path.join(os.path.abspath(uv_root), 'common.gypi') + options_fn = os.path.join(os.path.abspath(uv_root), 'options.gypi') + + if os.path.exists(common_fn): + args.extend(['-I', common_fn]) + + if os.path.exists(options_fn): + args.extend(['-I', options_fn]) + + args.append('--depth=' + uv_root) + + # There's a bug with windows which doesn't allow this feature. + if sys.platform != 'win32': + if '-f' not in args: + args.extend('-f make'.split()) + if 'eclipse' not in args and 'ninja' not in args: + args.extend(['-Goutput_dir=' + output_dir]) + args.extend(['--generator-output', output_dir]) + + if not any(a.startswith('-Dhost_arch=') for a in args): + args.append('-Dhost_arch=%s' % host_arch()) + + if not any(a.startswith('-Dtarget_arch=') for a in args): + args.append('-Dtarget_arch=%s' % host_arch()) + + if not any(a.startswith('-Duv_library=') for a in args): + args.append('-Duv_library=static_library') + + # Some platforms (OpenBSD for example) don't have multiprocessing.synchronize + # so gyp must be run with --no-parallel + if not gyp_parallel_support: + args.append('--no-parallel') + + gyp_args = list(args) + print(gyp_args) + run_gyp(gyp_args) diff --git a/deps/libuv/include/android-ifaddrs.h b/deps/libuv/include/android-ifaddrs.h new file mode 100644 index 00000000..9cd19fec --- /dev/null +++ b/deps/libuv/include/android-ifaddrs.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 1995, 1999 + * Berkeley Software Design, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * THIS SOFTWARE IS PROVIDED BY Berkeley Software Design, Inc. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, Inc. BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * BSDI ifaddrs.h,v 2.5 2000/02/23 14:51:59 dab Exp + */ + +#ifndef _IFADDRS_H_ +#define _IFADDRS_H_ + +struct ifaddrs { + struct ifaddrs *ifa_next; + char *ifa_name; + unsigned int ifa_flags; + struct sockaddr *ifa_addr; + struct sockaddr *ifa_netmask; + struct sockaddr *ifa_dstaddr; + void *ifa_data; +}; + +/* + * This may have been defined in . Note that if is + * to be included it must be included before this header file. + */ +#ifndef ifa_broadaddr +#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */ +#endif + +#include + +__BEGIN_DECLS +extern int getifaddrs(struct ifaddrs **ifap); +extern void freeifaddrs(struct ifaddrs *ifa); +__END_DECLS + +#endif diff --git a/deps/libuv/include/pthread-barrier.h b/deps/libuv/include/pthread-barrier.h new file mode 100644 index 00000000..3e01705d --- /dev/null +++ b/deps/libuv/include/pthread-barrier.h @@ -0,0 +1,66 @@ +/* +Copyright (c) 2016, Kari Tristan Helgason + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#ifndef _UV_PTHREAD_BARRIER_ +#define _UV_PTHREAD_BARRIER_ +#include +#include +#include /* sem_t */ + +#define PTHREAD_BARRIER_SERIAL_THREAD 0x12345 + +/* + * To maintain ABI compatibility with + * libuv v1.x struct is padded according + * to target platform + */ +#if defined(__ANDROID__) +# define UV_BARRIER_STRUCT_PADDING \ + sizeof(pthread_mutex_t) + \ + sizeof(pthread_cond_t) + \ + sizeof(unsigned int) - \ + sizeof(void *) +#elif defined(__APPLE__) +# define UV_BARRIER_STRUCT_PADDING \ + sizeof(pthread_mutex_t) + \ + 2 * sizeof(sem_t) + \ + 2 * sizeof(unsigned int) - \ + sizeof(void *) +#else +# define UV_BARRIER_STRUCT_PADDING 0 +#endif + +typedef struct { + pthread_mutex_t mutex; + pthread_cond_t cond; + unsigned threshold; + unsigned in; + unsigned out; +} _uv_barrier; + +typedef struct { + _uv_barrier* b; + char _pad[UV_BARRIER_STRUCT_PADDING]; +} pthread_barrier_t; + +int pthread_barrier_init(pthread_barrier_t* barrier, + const void* barrier_attr, + unsigned count); + +int pthread_barrier_wait(pthread_barrier_t* barrier); +int pthread_barrier_destroy(pthread_barrier_t *barrier); + +#endif /* _UV_PTHREAD_BARRIER_ */ diff --git a/deps/libuv/include/stdint-msvc2008.h b/deps/libuv/include/stdint-msvc2008.h new file mode 100644 index 00000000..d02608a5 --- /dev/null +++ b/deps/libuv/include/stdint-msvc2008.h @@ -0,0 +1,247 @@ +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_STDINT_H_ // [ +#define _MSC_STDINT_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +#include + +// For Visual Studio 6 in C++ mode and for many Visual Studio versions when +// compiling for ARM we should wrap include with 'extern "C++" {}' +// or compiler give many errors like this: +// error C2733: second C linkage of overloaded function 'wmemchr' not allowed +#ifdef __cplusplus +extern "C" { +#endif +# include +#ifdef __cplusplus +} +#endif + +// Define _W64 macros to mark types changing their size, like intptr_t. +#ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +#endif + + +// 7.18.1 Integer types + +// 7.18.1.1 Exact-width integer types + +// Visual Studio 6 and Embedded Visual C++ 4 doesn't +// realize that, e.g. char has the same size as __int8 +// so we give up on __intX for them. +#if (_MSC_VER < 1300) + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; +#else + typedef signed __int8 int8_t; + typedef signed __int16 int16_t; + typedef signed __int32 int32_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +#endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; + + +// 7.18.1.2 Minimum-width integer types +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + +// 7.18.1.3 Fastest minimum-width integer types +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + +// 7.18.1.4 Integer types capable of holding object pointers +#ifdef _WIN64 // [ + typedef signed __int64 intptr_t; + typedef unsigned __int64 uintptr_t; +#else // _WIN64 ][ + typedef _W64 signed int intptr_t; + typedef _W64 unsigned int uintptr_t; +#endif // _WIN64 ] + +// 7.18.1.5 Greatest-width integer types +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; + + +// 7.18.2 Limits of specified-width integer types + +#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 + +// 7.18.2.1 Limits of exact-width integer types +#define INT8_MIN ((int8_t)_I8_MIN) +#define INT8_MAX _I8_MAX +#define INT16_MIN ((int16_t)_I16_MIN) +#define INT16_MAX _I16_MAX +#define INT32_MIN ((int32_t)_I32_MIN) +#define INT32_MAX _I32_MAX +#define INT64_MIN ((int64_t)_I64_MIN) +#define INT64_MAX _I64_MAX +#define UINT8_MAX _UI8_MAX +#define UINT16_MAX _UI16_MAX +#define UINT32_MAX _UI32_MAX +#define UINT64_MAX _UI64_MAX + +// 7.18.2.2 Limits of minimum-width integer types +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +// 7.18.2.3 Limits of fastest minimum-width integer types +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +// 7.18.2.4 Limits of integer types capable of holding object pointers +#ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +#else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +#endif // _WIN64 ] + +// 7.18.2.5 Limits of greatest-width integer types +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +// 7.18.3 Limits of other integer types + +#ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +#else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +#endif // _WIN64 ] + +#define SIG_ATOMIC_MIN INT_MIN +#define SIG_ATOMIC_MAX INT_MAX + +#ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +#endif // SIZE_MAX ] + +// WCHAR_MIN and WCHAR_MAX are also defined in +#ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +#endif // WCHAR_MIN ] +#ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +#endif // WCHAR_MAX ] + +#define WINT_MIN 0 +#define WINT_MAX _UI16_MAX + +#endif // __STDC_LIMIT_MACROS ] + + +// 7.18.4 Limits of other integer types + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +#define INTMAX_C INT64_C +#define UINTMAX_C UINT64_C + +#endif // __STDC_CONSTANT_MACROS ] + + +#endif // _MSC_STDINT_H_ ] diff --git a/deps/libuv/include/tree.h b/deps/libuv/include/tree.h new file mode 100644 index 00000000..f936416e --- /dev/null +++ b/deps/libuv/include/tree.h @@ -0,0 +1,768 @@ +/*- + * Copyright 2002 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef UV_TREE_H_ +#define UV_TREE_H_ + +#ifndef UV__UNUSED +# if __GNUC__ +# define UV__UNUSED __attribute__((unused)) +# else +# define UV__UNUSED +# endif +#endif + +/* + * This file defines data structures for different types of trees: + * splay trees and red-black trees. + * + * A splay tree is a self-organizing data structure. Every operation + * on the tree causes a splay to happen. The splay moves the requested + * node to the root of the tree and partly rebalances it. + * + * This has the benefit that request locality causes faster lookups as + * the requested nodes move to the top of the tree. On the other hand, + * every lookup causes memory writes. + * + * The Balance Theorem bounds the total access time for m operations + * and n inserts on an initially empty tree as O((m + n)lg n). The + * amortized cost for a sequence of m accesses to a splay tree is O(lg n); + * + * A red-black tree is a binary search tree with the node color as an + * extra attribute. It fulfills a set of conditions: + * - every search path from the root to a leaf consists of the + * same number of black nodes, + * - each red node (except for the root) has a black parent, + * - each leaf node is black. + * + * Every operation on a red-black tree is bounded as O(lg n). + * The maximum height of a red-black tree is 2lg (n+1). + */ + +#define SPLAY_HEAD(name, type) \ +struct name { \ + struct type *sph_root; /* root of the tree */ \ +} + +#define SPLAY_INITIALIZER(root) \ + { NULL } + +#define SPLAY_INIT(root) do { \ + (root)->sph_root = NULL; \ +} while (/*CONSTCOND*/ 0) + +#define SPLAY_ENTRY(type) \ +struct { \ + struct type *spe_left; /* left element */ \ + struct type *spe_right; /* right element */ \ +} + +#define SPLAY_LEFT(elm, field) (elm)->field.spe_left +#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right +#define SPLAY_ROOT(head) (head)->sph_root +#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) + +/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ +#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ +} while (/*CONSTCOND*/ 0) + +#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ +} while (/*CONSTCOND*/ 0) + +#define SPLAY_LINKLEFT(head, tmp, field) do { \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ +} while (/*CONSTCOND*/ 0) + +#define SPLAY_LINKRIGHT(head, tmp, field) do { \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ +} while (/*CONSTCOND*/ 0) + +#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ + SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ + SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ +} while (/*CONSTCOND*/ 0) + +/* Generates prototypes and inline functions */ + +#define SPLAY_PROTOTYPE(name, type, field, cmp) \ +void name##_SPLAY(struct name *, struct type *); \ +void name##_SPLAY_MINMAX(struct name *, int); \ +struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ +struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ + \ +/* Finds the node with the same key as elm */ \ +static __inline struct type * \ +name##_SPLAY_FIND(struct name *head, struct type *elm) \ +{ \ + if (SPLAY_EMPTY(head)) \ + return(NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) \ + return (head->sph_root); \ + return (NULL); \ +} \ + \ +static __inline struct type * \ +name##_SPLAY_NEXT(struct name *head, struct type *elm) \ +{ \ + name##_SPLAY(head, elm); \ + if (SPLAY_RIGHT(elm, field) != NULL) { \ + elm = SPLAY_RIGHT(elm, field); \ + while (SPLAY_LEFT(elm, field) != NULL) { \ + elm = SPLAY_LEFT(elm, field); \ + } \ + } else \ + elm = NULL; \ + return (elm); \ +} \ + \ +static __inline struct type * \ +name##_SPLAY_MIN_MAX(struct name *head, int val) \ +{ \ + name##_SPLAY_MINMAX(head, val); \ + return (SPLAY_ROOT(head)); \ +} + +/* Main splay operation. + * Moves node close to the key of elm to top + */ +#define SPLAY_GENERATE(name, type, field, cmp) \ +struct type * \ +name##_SPLAY_INSERT(struct name *head, struct type *elm) \ +{ \ + if (SPLAY_EMPTY(head)) { \ + SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ + } else { \ + int __comp; \ + name##_SPLAY(head, elm); \ + __comp = (cmp)(elm, (head)->sph_root); \ + if(__comp < 0) { \ + SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \ + SPLAY_RIGHT(elm, field) = (head)->sph_root; \ + SPLAY_LEFT((head)->sph_root, field) = NULL; \ + } else if (__comp > 0) { \ + SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \ + SPLAY_LEFT(elm, field) = (head)->sph_root; \ + SPLAY_RIGHT((head)->sph_root, field) = NULL; \ + } else \ + return ((head)->sph_root); \ + } \ + (head)->sph_root = (elm); \ + return (NULL); \ +} \ + \ +struct type * \ +name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ +{ \ + struct type *__tmp; \ + if (SPLAY_EMPTY(head)) \ + return (NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) { \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ + } else { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ + name##_SPLAY(head, elm); \ + SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ + } \ + return (elm); \ + } \ + return (NULL); \ +} \ + \ +void \ +name##_SPLAY(struct name *head, struct type *elm) \ +{ \ + struct type __node, *__left, *__right, *__tmp; \ + int __comp; \ + \ + SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \ + __left = __right = &__node; \ + \ + while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) < 0){ \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL) \ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) > 0){ \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ +} \ + \ +/* Splay with either the minimum or the maximum element \ + * Used to find minimum or maximum element in tree. \ + */ \ +void name##_SPLAY_MINMAX(struct name *head, int __comp) \ +{ \ + struct type __node, *__left, *__right, *__tmp; \ + \ + SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \ + __left = __right = &__node; \ + \ + while (1) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp < 0){ \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL) \ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp > 0) { \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ +} + +#define SPLAY_NEGINF -1 +#define SPLAY_INF 1 + +#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) +#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) +#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) +#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) +#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ + : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) +#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ + : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) + +#define SPLAY_FOREACH(x, name, head) \ + for ((x) = SPLAY_MIN(name, head); \ + (x) != NULL; \ + (x) = SPLAY_NEXT(name, head, x)) + +/* Macros that define a red-black tree */ +#define RB_HEAD(name, type) \ +struct name { \ + struct type *rbh_root; /* root of the tree */ \ +} + +#define RB_INITIALIZER(root) \ + { NULL } + +#define RB_INIT(root) do { \ + (root)->rbh_root = NULL; \ +} while (/*CONSTCOND*/ 0) + +#define RB_BLACK 0 +#define RB_RED 1 +#define RB_ENTRY(type) \ +struct { \ + struct type *rbe_left; /* left element */ \ + struct type *rbe_right; /* right element */ \ + struct type *rbe_parent; /* parent element */ \ + int rbe_color; /* node color */ \ +} + +#define RB_LEFT(elm, field) (elm)->field.rbe_left +#define RB_RIGHT(elm, field) (elm)->field.rbe_right +#define RB_PARENT(elm, field) (elm)->field.rbe_parent +#define RB_COLOR(elm, field) (elm)->field.rbe_color +#define RB_ROOT(head) (head)->rbh_root +#define RB_EMPTY(head) (RB_ROOT(head) == NULL) + +#define RB_SET(elm, parent, field) do { \ + RB_PARENT(elm, field) = parent; \ + RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ + RB_COLOR(elm, field) = RB_RED; \ +} while (/*CONSTCOND*/ 0) + +#define RB_SET_BLACKRED(black, red, field) do { \ + RB_COLOR(black, field) = RB_BLACK; \ + RB_COLOR(red, field) = RB_RED; \ +} while (/*CONSTCOND*/ 0) + +#ifndef RB_AUGMENT +#define RB_AUGMENT(x) do {} while (0) +#endif + +#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \ + (tmp) = RB_RIGHT(elm, field); \ + if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \ + RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \ + } \ + RB_AUGMENT(elm); \ + if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ + if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ + RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ + else \ + RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_LEFT(tmp, field) = (elm); \ + RB_PARENT(elm, field) = (tmp); \ + RB_AUGMENT(tmp); \ + if ((RB_PARENT(tmp, field))) \ + RB_AUGMENT(RB_PARENT(tmp, field)); \ +} while (/*CONSTCOND*/ 0) + +#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \ + (tmp) = RB_LEFT(elm, field); \ + if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \ + RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \ + } \ + RB_AUGMENT(elm); \ + if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \ + if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ + RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ + else \ + RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_RIGHT(tmp, field) = (elm); \ + RB_PARENT(elm, field) = (tmp); \ + RB_AUGMENT(tmp); \ + if ((RB_PARENT(tmp, field))) \ + RB_AUGMENT(RB_PARENT(tmp, field)); \ +} while (/*CONSTCOND*/ 0) + +/* Generates prototypes and inline functions */ +#define RB_PROTOTYPE(name, type, field, cmp) \ + RB_PROTOTYPE_INTERNAL(name, type, field, cmp,) +#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \ + RB_PROTOTYPE_INTERNAL(name, type, field, cmp, UV__UNUSED static) +#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ +attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \ +attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\ +attr struct type *name##_RB_REMOVE(struct name *, struct type *); \ +attr struct type *name##_RB_INSERT(struct name *, struct type *); \ +attr struct type *name##_RB_FIND(struct name *, struct type *); \ +attr struct type *name##_RB_NFIND(struct name *, struct type *); \ +attr struct type *name##_RB_NEXT(struct type *); \ +attr struct type *name##_RB_PREV(struct type *); \ +attr struct type *name##_RB_MINMAX(struct name *, int); \ + \ + +/* Main rb operation. + * Moves node close to the key of elm to top + */ +#define RB_GENERATE(name, type, field, cmp) \ + RB_GENERATE_INTERNAL(name, type, field, cmp,) +#define RB_GENERATE_STATIC(name, type, field, cmp) \ + RB_GENERATE_INTERNAL(name, type, field, cmp, UV__UNUSED static) +#define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \ +attr void \ +name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \ +{ \ + struct type *parent, *gparent, *tmp; \ + while ((parent = RB_PARENT(elm, field)) != NULL && \ + RB_COLOR(parent, field) == RB_RED) { \ + gparent = RB_PARENT(parent, field); \ + if (parent == RB_LEFT(gparent, field)) { \ + tmp = RB_RIGHT(gparent, field); \ + if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ + RB_COLOR(tmp, field) = RB_BLACK; \ + RB_SET_BLACKRED(parent, gparent, field); \ + elm = gparent; \ + continue; \ + } \ + if (RB_RIGHT(parent, field) == elm) { \ + RB_ROTATE_LEFT(head, parent, tmp, field); \ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(parent, gparent, field); \ + RB_ROTATE_RIGHT(head, gparent, tmp, field); \ + } else { \ + tmp = RB_LEFT(gparent, field); \ + if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ + RB_COLOR(tmp, field) = RB_BLACK; \ + RB_SET_BLACKRED(parent, gparent, field); \ + elm = gparent; \ + continue; \ + } \ + if (RB_LEFT(parent, field) == elm) { \ + RB_ROTATE_RIGHT(head, parent, tmp, field); \ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(parent, gparent, field); \ + RB_ROTATE_LEFT(head, gparent, tmp, field); \ + } \ + } \ + RB_COLOR(head->rbh_root, field) = RB_BLACK; \ +} \ + \ +attr void \ +name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, \ + struct type *elm) \ +{ \ + struct type *tmp; \ + while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \ + elm != RB_ROOT(head)) { \ + if (RB_LEFT(parent, field) == elm) { \ + tmp = RB_RIGHT(parent, field); \ + if (RB_COLOR(tmp, field) == RB_RED) { \ + RB_SET_BLACKRED(tmp, parent, field); \ + RB_ROTATE_LEFT(head, parent, tmp, field); \ + tmp = RB_RIGHT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \ + (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \ + RB_COLOR(tmp, field) = RB_RED; \ + elm = parent; \ + parent = RB_PARENT(elm, field); \ + } else { \ + if (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) { \ + struct type *oleft; \ + if ((oleft = RB_LEFT(tmp, field)) \ + != NULL) \ + RB_COLOR(oleft, field) = RB_BLACK; \ + RB_COLOR(tmp, field) = RB_RED; \ + RB_ROTATE_RIGHT(head, tmp, oleft, field); \ + tmp = RB_RIGHT(parent, field); \ + } \ + RB_COLOR(tmp, field) = RB_COLOR(parent, field); \ + RB_COLOR(parent, field) = RB_BLACK; \ + if (RB_RIGHT(tmp, field)) \ + RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK; \ + RB_ROTATE_LEFT(head, parent, tmp, field); \ + elm = RB_ROOT(head); \ + break; \ + } \ + } else { \ + tmp = RB_LEFT(parent, field); \ + if (RB_COLOR(tmp, field) == RB_RED) { \ + RB_SET_BLACKRED(tmp, parent, field); \ + RB_ROTATE_RIGHT(head, parent, tmp, field); \ + tmp = RB_LEFT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \ + (RB_RIGHT(tmp, field) == NULL || \ + RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \ + RB_COLOR(tmp, field) = RB_RED; \ + elm = parent; \ + parent = RB_PARENT(elm, field); \ + } else { \ + if (RB_LEFT(tmp, field) == NULL || \ + RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) { \ + struct type *oright; \ + if ((oright = RB_RIGHT(tmp, field)) \ + != NULL) \ + RB_COLOR(oright, field) = RB_BLACK; \ + RB_COLOR(tmp, field) = RB_RED; \ + RB_ROTATE_LEFT(head, tmp, oright, field); \ + tmp = RB_LEFT(parent, field); \ + } \ + RB_COLOR(tmp, field) = RB_COLOR(parent, field); \ + RB_COLOR(parent, field) = RB_BLACK; \ + if (RB_LEFT(tmp, field)) \ + RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK; \ + RB_ROTATE_RIGHT(head, parent, tmp, field); \ + elm = RB_ROOT(head); \ + break; \ + } \ + } \ + } \ + if (elm) \ + RB_COLOR(elm, field) = RB_BLACK; \ +} \ + \ +attr struct type * \ +name##_RB_REMOVE(struct name *head, struct type *elm) \ +{ \ + struct type *child, *parent, *old = elm; \ + int color; \ + if (RB_LEFT(elm, field) == NULL) \ + child = RB_RIGHT(elm, field); \ + else if (RB_RIGHT(elm, field) == NULL) \ + child = RB_LEFT(elm, field); \ + else { \ + struct type *left; \ + elm = RB_RIGHT(elm, field); \ + while ((left = RB_LEFT(elm, field)) != NULL) \ + elm = left; \ + child = RB_RIGHT(elm, field); \ + parent = RB_PARENT(elm, field); \ + color = RB_COLOR(elm, field); \ + if (child) \ + RB_PARENT(child, field) = parent; \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ + if (RB_PARENT(elm, field) == old) \ + parent = elm; \ + (elm)->field = (old)->field; \ + if (RB_PARENT(old, field)) { \ + if (RB_LEFT(RB_PARENT(old, field), field) == old) \ + RB_LEFT(RB_PARENT(old, field), field) = elm; \ + else \ + RB_RIGHT(RB_PARENT(old, field), field) = elm; \ + RB_AUGMENT(RB_PARENT(old, field)); \ + } else \ + RB_ROOT(head) = elm; \ + RB_PARENT(RB_LEFT(old, field), field) = elm; \ + if (RB_RIGHT(old, field)) \ + RB_PARENT(RB_RIGHT(old, field), field) = elm; \ + if (parent) { \ + left = parent; \ + do { \ + RB_AUGMENT(left); \ + } while ((left = RB_PARENT(left, field)) != NULL); \ + } \ + goto color; \ + } \ + parent = RB_PARENT(elm, field); \ + color = RB_COLOR(elm, field); \ + if (child) \ + RB_PARENT(child, field) = parent; \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ +color: \ + if (color == RB_BLACK) \ + name##_RB_REMOVE_COLOR(head, parent, child); \ + return (old); \ +} \ + \ +/* Inserts a node into the RB tree */ \ +attr struct type * \ +name##_RB_INSERT(struct name *head, struct type *elm) \ +{ \ + struct type *tmp; \ + struct type *parent = NULL; \ + int comp = 0; \ + tmp = RB_ROOT(head); \ + while (tmp) { \ + parent = tmp; \ + comp = (cmp)(elm, parent); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + RB_SET(elm, parent, field); \ + if (parent != NULL) { \ + if (comp < 0) \ + RB_LEFT(parent, field) = elm; \ + else \ + RB_RIGHT(parent, field) = elm; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = elm; \ + name##_RB_INSERT_COLOR(head, elm); \ + return (NULL); \ +} \ + \ +/* Finds the node with the same key as elm */ \ +attr struct type * \ +name##_RB_FIND(struct name *head, struct type *elm) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (NULL); \ +} \ + \ +/* Finds the first node greater than or equal to the search key */ \ +attr struct type * \ +name##_RB_NFIND(struct name *head, struct type *elm) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + struct type *res = NULL; \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) { \ + res = tmp; \ + tmp = RB_LEFT(tmp, field); \ + } \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (res); \ +} \ + \ +/* ARGSUSED */ \ +attr struct type * \ +name##_RB_NEXT(struct type *elm) \ +{ \ + if (RB_RIGHT(elm, field)) { \ + elm = RB_RIGHT(elm, field); \ + while (RB_LEFT(elm, field)) \ + elm = RB_LEFT(elm, field); \ + } else { \ + if (RB_PARENT(elm, field) && \ + (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + else { \ + while (RB_PARENT(elm, field) && \ + (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + elm = RB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ +} \ + \ +/* ARGSUSED */ \ +attr struct type * \ +name##_RB_PREV(struct type *elm) \ +{ \ + if (RB_LEFT(elm, field)) { \ + elm = RB_LEFT(elm, field); \ + while (RB_RIGHT(elm, field)) \ + elm = RB_RIGHT(elm, field); \ + } else { \ + if (RB_PARENT(elm, field) && \ + (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + else { \ + while (RB_PARENT(elm, field) && \ + (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ + elm = RB_PARENT(elm, field); \ + elm = RB_PARENT(elm, field); \ + } \ + } \ + return (elm); \ +} \ + \ +attr struct type * \ +name##_RB_MINMAX(struct name *head, int val) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + struct type *parent = NULL; \ + while (tmp) { \ + parent = tmp; \ + if (val < 0) \ + tmp = RB_LEFT(tmp, field); \ + else \ + tmp = RB_RIGHT(tmp, field); \ + } \ + return (parent); \ +} + +#define RB_NEGINF -1 +#define RB_INF 1 + +#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) +#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) +#define RB_FIND(name, x, y) name##_RB_FIND(x, y) +#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y) +#define RB_NEXT(name, x, y) name##_RB_NEXT(y) +#define RB_PREV(name, x, y) name##_RB_PREV(y) +#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) +#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) + +#define RB_FOREACH(x, name, head) \ + for ((x) = RB_MIN(name, head); \ + (x) != NULL; \ + (x) = name##_RB_NEXT(x)) + +#define RB_FOREACH_FROM(x, name, y) \ + for ((x) = (y); \ + ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ + (x) = (y)) + +#define RB_FOREACH_SAFE(x, name, head, y) \ + for ((x) = RB_MIN(name, head); \ + ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ + (x) = (y)) + +#define RB_FOREACH_REVERSE(x, name, head) \ + for ((x) = RB_MAX(name, head); \ + (x) != NULL; \ + (x) = name##_RB_PREV(x)) + +#define RB_FOREACH_REVERSE_FROM(x, name, y) \ + for ((x) = (y); \ + ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ + (x) = (y)) + +#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \ + for ((x) = RB_MAX(name, head); \ + ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ + (x) = (y)) + +#endif /* UV_TREE_H_ */ diff --git a/deps/libuv/include/uv-aix.h b/deps/libuv/include/uv-aix.h new file mode 100644 index 00000000..7dc992fa --- /dev/null +++ b/deps/libuv/include/uv-aix.h @@ -0,0 +1,32 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_AIX_H +#define UV_AIX_H + +#define UV_PLATFORM_LOOP_FIELDS \ + int fs_fd; \ + +#define UV_PLATFORM_FS_EVENT_FIELDS \ + uv__io_t event_watcher; \ + char *dir_filename; \ + +#endif /* UV_AIX_H */ diff --git a/deps/libuv/include/uv-bsd.h b/deps/libuv/include/uv-bsd.h new file mode 100644 index 00000000..2d72b3d7 --- /dev/null +++ b/deps/libuv/include/uv-bsd.h @@ -0,0 +1,34 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_BSD_H +#define UV_BSD_H + +#define UV_PLATFORM_FS_EVENT_FIELDS \ + uv__io_t event_watcher; \ + +#define UV_IO_PRIVATE_PLATFORM_FIELDS \ + int rcount; \ + int wcount; \ + +#define UV_HAVE_KQUEUE 1 + +#endif /* UV_BSD_H */ diff --git a/deps/libuv/include/uv-darwin.h b/deps/libuv/include/uv-darwin.h new file mode 100644 index 00000000..d2264158 --- /dev/null +++ b/deps/libuv/include/uv-darwin.h @@ -0,0 +1,61 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_DARWIN_H +#define UV_DARWIN_H + +#if defined(__APPLE__) && defined(__MACH__) +# include +# include +# include +# include +# define UV_PLATFORM_SEM_T semaphore_t +#endif + +#define UV_IO_PRIVATE_PLATFORM_FIELDS \ + int rcount; \ + int wcount; \ + +#define UV_PLATFORM_LOOP_FIELDS \ + uv_thread_t cf_thread; \ + void* _cf_reserved; \ + void* cf_state; \ + uv_mutex_t cf_mutex; \ + uv_sem_t cf_sem; \ + void* cf_signals[2]; \ + +#define UV_PLATFORM_FS_EVENT_FIELDS \ + uv__io_t event_watcher; \ + char* realpath; \ + int realpath_len; \ + int cf_flags; \ + uv_async_t* cf_cb; \ + void* cf_events[2]; \ + void* cf_member[2]; \ + int cf_error; \ + uv_mutex_t cf_mutex; \ + +#define UV_STREAM_PRIVATE_PLATFORM_FIELDS \ + void* select; \ + +#define UV_HAVE_KQUEUE 1 + +#endif /* UV_DARWIN_H */ diff --git a/deps/libuv/include/uv-errno.h b/deps/libuv/include/uv-errno.h new file mode 100644 index 00000000..f1371517 --- /dev/null +++ b/deps/libuv/include/uv-errno.h @@ -0,0 +1,419 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_ERRNO_H_ +#define UV_ERRNO_H_ + +#include + +#define UV__EOF (-4095) +#define UV__UNKNOWN (-4094) + +#define UV__EAI_ADDRFAMILY (-3000) +#define UV__EAI_AGAIN (-3001) +#define UV__EAI_BADFLAGS (-3002) +#define UV__EAI_CANCELED (-3003) +#define UV__EAI_FAIL (-3004) +#define UV__EAI_FAMILY (-3005) +#define UV__EAI_MEMORY (-3006) +#define UV__EAI_NODATA (-3007) +#define UV__EAI_NONAME (-3008) +#define UV__EAI_OVERFLOW (-3009) +#define UV__EAI_SERVICE (-3010) +#define UV__EAI_SOCKTYPE (-3011) +#define UV__EAI_BADHINTS (-3013) +#define UV__EAI_PROTOCOL (-3014) + +/* Only map to the system errno on non-Windows platforms. It's apparently + * a fairly common practice for Windows programmers to redefine errno codes. + */ +#if defined(E2BIG) && !defined(_WIN32) +# define UV__E2BIG (-E2BIG) +#else +# define UV__E2BIG (-4093) +#endif + +#if defined(EACCES) && !defined(_WIN32) +# define UV__EACCES (-EACCES) +#else +# define UV__EACCES (-4092) +#endif + +#if defined(EADDRINUSE) && !defined(_WIN32) +# define UV__EADDRINUSE (-EADDRINUSE) +#else +# define UV__EADDRINUSE (-4091) +#endif + +#if defined(EADDRNOTAVAIL) && !defined(_WIN32) +# define UV__EADDRNOTAVAIL (-EADDRNOTAVAIL) +#else +# define UV__EADDRNOTAVAIL (-4090) +#endif + +#if defined(EAFNOSUPPORT) && !defined(_WIN32) +# define UV__EAFNOSUPPORT (-EAFNOSUPPORT) +#else +# define UV__EAFNOSUPPORT (-4089) +#endif + +#if defined(EAGAIN) && !defined(_WIN32) +# define UV__EAGAIN (-EAGAIN) +#else +# define UV__EAGAIN (-4088) +#endif + +#if defined(EALREADY) && !defined(_WIN32) +# define UV__EALREADY (-EALREADY) +#else +# define UV__EALREADY (-4084) +#endif + +#if defined(EBADF) && !defined(_WIN32) +# define UV__EBADF (-EBADF) +#else +# define UV__EBADF (-4083) +#endif + +#if defined(EBUSY) && !defined(_WIN32) +# define UV__EBUSY (-EBUSY) +#else +# define UV__EBUSY (-4082) +#endif + +#if defined(ECANCELED) && !defined(_WIN32) +# define UV__ECANCELED (-ECANCELED) +#else +# define UV__ECANCELED (-4081) +#endif + +#if defined(ECHARSET) && !defined(_WIN32) +# define UV__ECHARSET (-ECHARSET) +#else +# define UV__ECHARSET (-4080) +#endif + +#if defined(ECONNABORTED) && !defined(_WIN32) +# define UV__ECONNABORTED (-ECONNABORTED) +#else +# define UV__ECONNABORTED (-4079) +#endif + +#if defined(ECONNREFUSED) && !defined(_WIN32) +# define UV__ECONNREFUSED (-ECONNREFUSED) +#else +# define UV__ECONNREFUSED (-4078) +#endif + +#if defined(ECONNRESET) && !defined(_WIN32) +# define UV__ECONNRESET (-ECONNRESET) +#else +# define UV__ECONNRESET (-4077) +#endif + +#if defined(EDESTADDRREQ) && !defined(_WIN32) +# define UV__EDESTADDRREQ (-EDESTADDRREQ) +#else +# define UV__EDESTADDRREQ (-4076) +#endif + +#if defined(EEXIST) && !defined(_WIN32) +# define UV__EEXIST (-EEXIST) +#else +# define UV__EEXIST (-4075) +#endif + +#if defined(EFAULT) && !defined(_WIN32) +# define UV__EFAULT (-EFAULT) +#else +# define UV__EFAULT (-4074) +#endif + +#if defined(EHOSTUNREACH) && !defined(_WIN32) +# define UV__EHOSTUNREACH (-EHOSTUNREACH) +#else +# define UV__EHOSTUNREACH (-4073) +#endif + +#if defined(EINTR) && !defined(_WIN32) +# define UV__EINTR (-EINTR) +#else +# define UV__EINTR (-4072) +#endif + +#if defined(EINVAL) && !defined(_WIN32) +# define UV__EINVAL (-EINVAL) +#else +# define UV__EINVAL (-4071) +#endif + +#if defined(EIO) && !defined(_WIN32) +# define UV__EIO (-EIO) +#else +# define UV__EIO (-4070) +#endif + +#if defined(EISCONN) && !defined(_WIN32) +# define UV__EISCONN (-EISCONN) +#else +# define UV__EISCONN (-4069) +#endif + +#if defined(EISDIR) && !defined(_WIN32) +# define UV__EISDIR (-EISDIR) +#else +# define UV__EISDIR (-4068) +#endif + +#if defined(ELOOP) && !defined(_WIN32) +# define UV__ELOOP (-ELOOP) +#else +# define UV__ELOOP (-4067) +#endif + +#if defined(EMFILE) && !defined(_WIN32) +# define UV__EMFILE (-EMFILE) +#else +# define UV__EMFILE (-4066) +#endif + +#if defined(EMSGSIZE) && !defined(_WIN32) +# define UV__EMSGSIZE (-EMSGSIZE) +#else +# define UV__EMSGSIZE (-4065) +#endif + +#if defined(ENAMETOOLONG) && !defined(_WIN32) +# define UV__ENAMETOOLONG (-ENAMETOOLONG) +#else +# define UV__ENAMETOOLONG (-4064) +#endif + +#if defined(ENETDOWN) && !defined(_WIN32) +# define UV__ENETDOWN (-ENETDOWN) +#else +# define UV__ENETDOWN (-4063) +#endif + +#if defined(ENETUNREACH) && !defined(_WIN32) +# define UV__ENETUNREACH (-ENETUNREACH) +#else +# define UV__ENETUNREACH (-4062) +#endif + +#if defined(ENFILE) && !defined(_WIN32) +# define UV__ENFILE (-ENFILE) +#else +# define UV__ENFILE (-4061) +#endif + +#if defined(ENOBUFS) && !defined(_WIN32) +# define UV__ENOBUFS (-ENOBUFS) +#else +# define UV__ENOBUFS (-4060) +#endif + +#if defined(ENODEV) && !defined(_WIN32) +# define UV__ENODEV (-ENODEV) +#else +# define UV__ENODEV (-4059) +#endif + +#if defined(ENOENT) && !defined(_WIN32) +# define UV__ENOENT (-ENOENT) +#else +# define UV__ENOENT (-4058) +#endif + +#if defined(ENOMEM) && !defined(_WIN32) +# define UV__ENOMEM (-ENOMEM) +#else +# define UV__ENOMEM (-4057) +#endif + +#if defined(ENONET) && !defined(_WIN32) +# define UV__ENONET (-ENONET) +#else +# define UV__ENONET (-4056) +#endif + +#if defined(ENOSPC) && !defined(_WIN32) +# define UV__ENOSPC (-ENOSPC) +#else +# define UV__ENOSPC (-4055) +#endif + +#if defined(ENOSYS) && !defined(_WIN32) +# define UV__ENOSYS (-ENOSYS) +#else +# define UV__ENOSYS (-4054) +#endif + +#if defined(ENOTCONN) && !defined(_WIN32) +# define UV__ENOTCONN (-ENOTCONN) +#else +# define UV__ENOTCONN (-4053) +#endif + +#if defined(ENOTDIR) && !defined(_WIN32) +# define UV__ENOTDIR (-ENOTDIR) +#else +# define UV__ENOTDIR (-4052) +#endif + +#if defined(ENOTEMPTY) && !defined(_WIN32) +# define UV__ENOTEMPTY (-ENOTEMPTY) +#else +# define UV__ENOTEMPTY (-4051) +#endif + +#if defined(ENOTSOCK) && !defined(_WIN32) +# define UV__ENOTSOCK (-ENOTSOCK) +#else +# define UV__ENOTSOCK (-4050) +#endif + +#if defined(ENOTSUP) && !defined(_WIN32) +# define UV__ENOTSUP (-ENOTSUP) +#else +# define UV__ENOTSUP (-4049) +#endif + +#if defined(EPERM) && !defined(_WIN32) +# define UV__EPERM (-EPERM) +#else +# define UV__EPERM (-4048) +#endif + +#if defined(EPIPE) && !defined(_WIN32) +# define UV__EPIPE (-EPIPE) +#else +# define UV__EPIPE (-4047) +#endif + +#if defined(EPROTO) && !defined(_WIN32) +# define UV__EPROTO (-EPROTO) +#else +# define UV__EPROTO (-4046) +#endif + +#if defined(EPROTONOSUPPORT) && !defined(_WIN32) +# define UV__EPROTONOSUPPORT (-EPROTONOSUPPORT) +#else +# define UV__EPROTONOSUPPORT (-4045) +#endif + +#if defined(EPROTOTYPE) && !defined(_WIN32) +# define UV__EPROTOTYPE (-EPROTOTYPE) +#else +# define UV__EPROTOTYPE (-4044) +#endif + +#if defined(EROFS) && !defined(_WIN32) +# define UV__EROFS (-EROFS) +#else +# define UV__EROFS (-4043) +#endif + +#if defined(ESHUTDOWN) && !defined(_WIN32) +# define UV__ESHUTDOWN (-ESHUTDOWN) +#else +# define UV__ESHUTDOWN (-4042) +#endif + +#if defined(ESPIPE) && !defined(_WIN32) +# define UV__ESPIPE (-ESPIPE) +#else +# define UV__ESPIPE (-4041) +#endif + +#if defined(ESRCH) && !defined(_WIN32) +# define UV__ESRCH (-ESRCH) +#else +# define UV__ESRCH (-4040) +#endif + +#if defined(ETIMEDOUT) && !defined(_WIN32) +# define UV__ETIMEDOUT (-ETIMEDOUT) +#else +# define UV__ETIMEDOUT (-4039) +#endif + +#if defined(ETXTBSY) && !defined(_WIN32) +# define UV__ETXTBSY (-ETXTBSY) +#else +# define UV__ETXTBSY (-4038) +#endif + +#if defined(EXDEV) && !defined(_WIN32) +# define UV__EXDEV (-EXDEV) +#else +# define UV__EXDEV (-4037) +#endif + +#if defined(EFBIG) && !defined(_WIN32) +# define UV__EFBIG (-EFBIG) +#else +# define UV__EFBIG (-4036) +#endif + +#if defined(ENOPROTOOPT) && !defined(_WIN32) +# define UV__ENOPROTOOPT (-ENOPROTOOPT) +#else +# define UV__ENOPROTOOPT (-4035) +#endif + +#if defined(ERANGE) && !defined(_WIN32) +# define UV__ERANGE (-ERANGE) +#else +# define UV__ERANGE (-4034) +#endif + +#if defined(ENXIO) && !defined(_WIN32) +# define UV__ENXIO (-ENXIO) +#else +# define UV__ENXIO (-4033) +#endif + +#if defined(EMLINK) && !defined(_WIN32) +# define UV__EMLINK (-EMLINK) +#else +# define UV__EMLINK (-4032) +#endif + +/* EHOSTDOWN is not visible on BSD-like systems when _POSIX_C_SOURCE is + * defined. Fortunately, its value is always 64 so it's possible albeit + * icky to hard-code it. + */ +#if defined(EHOSTDOWN) && !defined(_WIN32) +# define UV__EHOSTDOWN (-EHOSTDOWN) +#elif defined(__APPLE__) || \ + defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel__) || \ + defined(__NetBSD__) || \ + defined(__OpenBSD__) +# define UV__EHOSTDOWN (-64) +#else +# define UV__EHOSTDOWN (-4031) +#endif + +#endif /* UV_ERRNO_H_ */ diff --git a/deps/libuv/include/uv-linux.h b/deps/libuv/include/uv-linux.h new file mode 100644 index 00000000..9b38405a --- /dev/null +++ b/deps/libuv/include/uv-linux.h @@ -0,0 +1,34 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_LINUX_H +#define UV_LINUX_H + +#define UV_PLATFORM_LOOP_FIELDS \ + uv__io_t inotify_read_watcher; \ + void* inotify_watchers; \ + int inotify_fd; \ + +#define UV_PLATFORM_FS_EVENT_FIELDS \ + void* watchers[2]; \ + int wd; \ + +#endif /* UV_LINUX_H */ diff --git a/deps/libuv/include/uv-os390.h b/deps/libuv/include/uv-os390.h new file mode 100644 index 00000000..b0b068f4 --- /dev/null +++ b/deps/libuv/include/uv-os390.h @@ -0,0 +1,27 @@ +/* Copyright libuv project contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_MVS_H +#define UV_MVS_H + +#define UV_PLATFORM_SEM_T int + +#endif /* UV_MVS_H */ diff --git a/deps/libuv/include/uv-sunos.h b/deps/libuv/include/uv-sunos.h new file mode 100644 index 00000000..04216642 --- /dev/null +++ b/deps/libuv/include/uv-sunos.h @@ -0,0 +1,44 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_SUNOS_H +#define UV_SUNOS_H + +#include +#include + +/* For the sake of convenience and reduced #ifdef-ery in src/unix/sunos.c, + * add the fs_event fields even when this version of SunOS doesn't support + * file watching. + */ +#define UV_PLATFORM_LOOP_FIELDS \ + uv__io_t fs_event_watcher; \ + int fs_fd; \ + +#if defined(PORT_SOURCE_FILE) + +# define UV_PLATFORM_FS_EVENT_FIELDS \ + file_obj_t fo; \ + int fd; \ + +#endif /* defined(PORT_SOURCE_FILE) */ + +#endif /* UV_SUNOS_H */ diff --git a/deps/libuv/include/uv-threadpool.h b/deps/libuv/include/uv-threadpool.h new file mode 100644 index 00000000..9708ebdd --- /dev/null +++ b/deps/libuv/include/uv-threadpool.h @@ -0,0 +1,37 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* + * This file is private to libuv. It provides common functionality to both + * Windows and Unix backends. + */ + +#ifndef UV_THREADPOOL_H_ +#define UV_THREADPOOL_H_ + +struct uv__work { + void (*work)(struct uv__work *w); + void (*done)(struct uv__work *w, int status); + struct uv_loop_s* loop; + void* wq[2]; +}; + +#endif /* UV_THREADPOOL_H_ */ diff --git a/deps/libuv/include/uv-unix.h b/deps/libuv/include/uv-unix.h new file mode 100644 index 00000000..bca27144 --- /dev/null +++ b/deps/libuv/include/uv-unix.h @@ -0,0 +1,371 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_UNIX_H +#define UV_UNIX_H + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include "uv-threadpool.h" + +#if defined(__linux__) +# include "uv-linux.h" +#elif defined(_AIX) +# include "uv-aix.h" +#elif defined(__sun) +# include "uv-sunos.h" +#elif defined(__APPLE__) +# include "uv-darwin.h" +#elif defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel__) || \ + defined(__OpenBSD__) || \ + defined(__NetBSD__) +# include "uv-bsd.h" +#endif + +#ifndef PTHREAD_BARRIER_SERIAL_THREAD +# include "pthread-barrier.h" +#endif + +#ifndef NI_MAXHOST +# define NI_MAXHOST 1025 +#endif + +#ifndef NI_MAXSERV +# define NI_MAXSERV 32 +#endif + +#ifndef UV_IO_PRIVATE_PLATFORM_FIELDS +# define UV_IO_PRIVATE_PLATFORM_FIELDS /* empty */ +#endif + +struct uv__io_s; +struct uv__async; +struct uv_loop_s; + +typedef void (*uv__io_cb)(struct uv_loop_s* loop, + struct uv__io_s* w, + unsigned int events); +typedef struct uv__io_s uv__io_t; + +struct uv__io_s { + uv__io_cb cb; + void* pending_queue[2]; + void* watcher_queue[2]; + unsigned int pevents; /* Pending event mask i.e. mask at next tick. */ + unsigned int events; /* Current event mask. */ + int fd; + UV_IO_PRIVATE_PLATFORM_FIELDS +}; + +typedef void (*uv__async_cb)(struct uv_loop_s* loop, + struct uv__async* w, + unsigned int nevents); + +struct uv__async { + uv__async_cb cb; + uv__io_t io_watcher; + int wfd; +}; + +#ifndef UV_PLATFORM_SEM_T +# define UV_PLATFORM_SEM_T sem_t +#endif + +#ifndef UV_PLATFORM_LOOP_FIELDS +# define UV_PLATFORM_LOOP_FIELDS /* empty */ +#endif + +#ifndef UV_PLATFORM_FS_EVENT_FIELDS +# define UV_PLATFORM_FS_EVENT_FIELDS /* empty */ +#endif + +#ifndef UV_STREAM_PRIVATE_PLATFORM_FIELDS +# define UV_STREAM_PRIVATE_PLATFORM_FIELDS /* empty */ +#endif + +/* Note: May be cast to struct iovec. See writev(2). */ +typedef struct uv_buf_t { + char* base; + size_t len; +} uv_buf_t; + +typedef int uv_file; +typedef int uv_os_sock_t; +typedef int uv_os_fd_t; + +#define UV_ONCE_INIT PTHREAD_ONCE_INIT + +typedef pthread_once_t uv_once_t; +typedef pthread_t uv_thread_t; +typedef pthread_mutex_t uv_mutex_t; +typedef pthread_rwlock_t uv_rwlock_t; +typedef UV_PLATFORM_SEM_T uv_sem_t; +typedef pthread_cond_t uv_cond_t; +typedef pthread_key_t uv_key_t; +typedef pthread_barrier_t uv_barrier_t; + + +/* Platform-specific definitions for uv_spawn support. */ +typedef gid_t uv_gid_t; +typedef uid_t uv_uid_t; + +typedef struct dirent uv__dirent_t; + +#if defined(DT_UNKNOWN) +# define HAVE_DIRENT_TYPES +# if defined(DT_REG) +# define UV__DT_FILE DT_REG +# else +# define UV__DT_FILE -1 +# endif +# if defined(DT_DIR) +# define UV__DT_DIR DT_DIR +# else +# define UV__DT_DIR -2 +# endif +# if defined(DT_LNK) +# define UV__DT_LINK DT_LNK +# else +# define UV__DT_LINK -3 +# endif +# if defined(DT_FIFO) +# define UV__DT_FIFO DT_FIFO +# else +# define UV__DT_FIFO -4 +# endif +# if defined(DT_SOCK) +# define UV__DT_SOCKET DT_SOCK +# else +# define UV__DT_SOCKET -5 +# endif +# if defined(DT_CHR) +# define UV__DT_CHAR DT_CHR +# else +# define UV__DT_CHAR -6 +# endif +# if defined(DT_BLK) +# define UV__DT_BLOCK DT_BLK +# else +# define UV__DT_BLOCK -7 +# endif +#endif + +/* Platform-specific definitions for uv_dlopen support. */ +#define UV_DYNAMIC /* empty */ + +typedef struct { + void* handle; + char* errmsg; +} uv_lib_t; + +#define UV_LOOP_PRIVATE_FIELDS \ + unsigned long flags; \ + int backend_fd; \ + void* pending_queue[2]; \ + void* watcher_queue[2]; \ + uv__io_t** watchers; \ + unsigned int nwatchers; \ + unsigned int nfds; \ + void* wq[2]; \ + uv_mutex_t wq_mutex; \ + uv_async_t wq_async; \ + uv_rwlock_t cloexec_lock; \ + uv_handle_t* closing_handles; \ + void* process_handles[2]; \ + void* prepare_handles[2]; \ + void* check_handles[2]; \ + void* idle_handles[2]; \ + void* async_handles[2]; \ + struct uv__async async_watcher; \ + struct { \ + void* min; \ + unsigned int nelts; \ + } timer_heap; \ + uint64_t timer_counter; \ + uint64_t time; \ + int signal_pipefd[2]; \ + uv__io_t signal_io_watcher; \ + uv_signal_t child_watcher; \ + int emfile_fd; \ + UV_PLATFORM_LOOP_FIELDS \ + +#define UV_REQ_TYPE_PRIVATE /* empty */ + +#define UV_REQ_PRIVATE_FIELDS /* empty */ + +#define UV_PRIVATE_REQ_TYPES /* empty */ + +#define UV_WRITE_PRIVATE_FIELDS \ + void* queue[2]; \ + unsigned int write_index; \ + uv_buf_t* bufs; \ + unsigned int nbufs; \ + int error; \ + uv_buf_t bufsml[4]; \ + +#define UV_CONNECT_PRIVATE_FIELDS \ + void* queue[2]; \ + +#define UV_SHUTDOWN_PRIVATE_FIELDS /* empty */ + +#define UV_UDP_SEND_PRIVATE_FIELDS \ + void* queue[2]; \ + struct sockaddr_storage addr; \ + unsigned int nbufs; \ + uv_buf_t* bufs; \ + ssize_t status; \ + uv_udp_send_cb send_cb; \ + uv_buf_t bufsml[4]; \ + +#define UV_HANDLE_PRIVATE_FIELDS \ + uv_handle_t* next_closing; \ + unsigned int flags; \ + +#define UV_STREAM_PRIVATE_FIELDS \ + uv_connect_t *connect_req; \ + uv_shutdown_t *shutdown_req; \ + uv__io_t io_watcher; \ + void* write_queue[2]; \ + void* write_completed_queue[2]; \ + uv_connection_cb connection_cb; \ + int delayed_error; \ + int accepted_fd; \ + void* queued_fds; \ + UV_STREAM_PRIVATE_PLATFORM_FIELDS \ + +#define UV_TCP_PRIVATE_FIELDS /* empty */ + +#define UV_UDP_PRIVATE_FIELDS \ + uv_alloc_cb alloc_cb; \ + uv_udp_recv_cb recv_cb; \ + uv__io_t io_watcher; \ + void* write_queue[2]; \ + void* write_completed_queue[2]; \ + +#define UV_PIPE_PRIVATE_FIELDS \ + const char* pipe_fname; /* strdup'ed */ + +#define UV_POLL_PRIVATE_FIELDS \ + uv__io_t io_watcher; + +#define UV_PREPARE_PRIVATE_FIELDS \ + uv_prepare_cb prepare_cb; \ + void* queue[2]; \ + +#define UV_CHECK_PRIVATE_FIELDS \ + uv_check_cb check_cb; \ + void* queue[2]; \ + +#define UV_IDLE_PRIVATE_FIELDS \ + uv_idle_cb idle_cb; \ + void* queue[2]; \ + +#define UV_ASYNC_PRIVATE_FIELDS \ + uv_async_cb async_cb; \ + void* queue[2]; \ + int pending; \ + +#define UV_TIMER_PRIVATE_FIELDS \ + uv_timer_cb timer_cb; \ + void* heap_node[3]; \ + uint64_t timeout; \ + uint64_t repeat; \ + uint64_t start_id; + +#define UV_GETADDRINFO_PRIVATE_FIELDS \ + struct uv__work work_req; \ + uv_getaddrinfo_cb cb; \ + struct addrinfo* hints; \ + char* hostname; \ + char* service; \ + struct addrinfo* addrinfo; \ + int retcode; + +#define UV_GETNAMEINFO_PRIVATE_FIELDS \ + struct uv__work work_req; \ + uv_getnameinfo_cb getnameinfo_cb; \ + struct sockaddr_storage storage; \ + int flags; \ + char host[NI_MAXHOST]; \ + char service[NI_MAXSERV]; \ + int retcode; + +#define UV_PROCESS_PRIVATE_FIELDS \ + void* queue[2]; \ + int status; \ + +#define UV_FS_PRIVATE_FIELDS \ + const char *new_path; \ + uv_file file; \ + int flags; \ + mode_t mode; \ + unsigned int nbufs; \ + uv_buf_t* bufs; \ + off_t off; \ + uv_uid_t uid; \ + uv_gid_t gid; \ + double atime; \ + double mtime; \ + struct uv__work work_req; \ + uv_buf_t bufsml[4]; \ + +#define UV_WORK_PRIVATE_FIELDS \ + struct uv__work work_req; + +#define UV_TTY_PRIVATE_FIELDS \ + struct termios orig_termios; \ + int mode; + +#define UV_SIGNAL_PRIVATE_FIELDS \ + /* RB_ENTRY(uv_signal_s) tree_entry; */ \ + struct { \ + struct uv_signal_s* rbe_left; \ + struct uv_signal_s* rbe_right; \ + struct uv_signal_s* rbe_parent; \ + int rbe_color; \ + } tree_entry; \ + /* Use two counters here so we don have to fiddle with atomics. */ \ + unsigned int caught_signals; \ + unsigned int dispatched_signals; + +#define UV_FS_EVENT_PRIVATE_FIELDS \ + uv_fs_event_cb cb; \ + UV_PLATFORM_FS_EVENT_FIELDS \ + +#endif /* UV_UNIX_H */ diff --git a/deps/libuv/include/uv-version.h b/deps/libuv/include/uv-version.h new file mode 100644 index 00000000..a5631e91 --- /dev/null +++ b/deps/libuv/include/uv-version.h @@ -0,0 +1,43 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_VERSION_H +#define UV_VERSION_H + + /* + * Versions with the same major number are ABI stable. API is allowed to + * evolve between minor releases, but only in a backwards compatible way. + * Make sure you update the -soname directives in configure.ac + * and uv.gyp whenever you bump UV_VERSION_MAJOR or UV_VERSION_MINOR (but + * not UV_VERSION_PATCH.) + */ + +#define UV_VERSION_MAJOR 1 +#define UV_VERSION_MINOR 10 +#define UV_VERSION_PATCH 0 +#define UV_VERSION_IS_RELEASE 1 +#define UV_VERSION_SUFFIX "" + +#define UV_VERSION_HEX ((UV_VERSION_MAJOR << 16) | \ + (UV_VERSION_MINOR << 8) | \ + (UV_VERSION_PATCH)) + +#endif /* UV_VERSION_H */ diff --git a/deps/libuv/include/uv-win.h b/deps/libuv/include/uv-win.h new file mode 100644 index 00000000..e8b9b15e --- /dev/null +++ b/deps/libuv/include/uv-win.h @@ -0,0 +1,649 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _WIN32_WINNT +# define _WIN32_WINNT 0x0502 +#endif + +#if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED) +typedef intptr_t ssize_t; +# define _SSIZE_T_ +# define _SSIZE_T_DEFINED +#endif + +#include + +#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR) +typedef struct pollfd { + SOCKET fd; + short events; + short revents; +} WSAPOLLFD, *PWSAPOLLFD, *LPWSAPOLLFD; +#endif + +#ifndef LOCALE_INVARIANT +# define LOCALE_INVARIANT 0x007f +#endif + +#include +#include +#include + +#include +#include +#include + +#if defined(_MSC_VER) && _MSC_VER < 1600 +# include "stdint-msvc2008.h" +#else +# include +#endif + +#include "tree.h" +#include "uv-threadpool.h" + +#define MAX_PIPENAME_LEN 256 + +#ifndef S_IFLNK +# define S_IFLNK 0xA000 +#endif + +/* Additional signals supported by uv_signal and or uv_kill. The CRT defines + * the following signals already: + * + * #define SIGINT 2 + * #define SIGILL 4 + * #define SIGABRT_COMPAT 6 + * #define SIGFPE 8 + * #define SIGSEGV 11 + * #define SIGTERM 15 + * #define SIGBREAK 21 + * #define SIGABRT 22 + * + * The additional signals have values that are common on other Unix + * variants (Linux and Darwin) + */ +#define SIGHUP 1 +#define SIGKILL 9 +#define SIGWINCH 28 + +/* The CRT defines SIGABRT_COMPAT as 6, which equals SIGABRT on many */ +/* unix-like platforms. However MinGW doesn't define it, so we do. */ +#ifndef SIGABRT_COMPAT +# define SIGABRT_COMPAT 6 +#endif + +/* + * Guids and typedefs for winsock extension functions + * Mingw32 doesn't have these :-( + */ +#ifndef WSAID_ACCEPTEX +# define WSAID_ACCEPTEX \ + {0xb5367df1, 0xcbac, 0x11cf, \ + {0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}} + +# define WSAID_CONNECTEX \ + {0x25a207b9, 0xddf3, 0x4660, \ + {0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}} + +# define WSAID_GETACCEPTEXSOCKADDRS \ + {0xb5367df2, 0xcbac, 0x11cf, \ + {0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}} + +# define WSAID_DISCONNECTEX \ + {0x7fda2e11, 0x8630, 0x436f, \ + {0xa0, 0x31, 0xf5, 0x36, 0xa6, 0xee, 0xc1, 0x57}} + +# define WSAID_TRANSMITFILE \ + {0xb5367df0, 0xcbac, 0x11cf, \ + {0x95, 0xca, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}} + + typedef BOOL (PASCAL *LPFN_ACCEPTEX) + (SOCKET sListenSocket, + SOCKET sAcceptSocket, + PVOID lpOutputBuffer, + DWORD dwReceiveDataLength, + DWORD dwLocalAddressLength, + DWORD dwRemoteAddressLength, + LPDWORD lpdwBytesReceived, + LPOVERLAPPED lpOverlapped); + + typedef BOOL (PASCAL *LPFN_CONNECTEX) + (SOCKET s, + const struct sockaddr* name, + int namelen, + PVOID lpSendBuffer, + DWORD dwSendDataLength, + LPDWORD lpdwBytesSent, + LPOVERLAPPED lpOverlapped); + + typedef void (PASCAL *LPFN_GETACCEPTEXSOCKADDRS) + (PVOID lpOutputBuffer, + DWORD dwReceiveDataLength, + DWORD dwLocalAddressLength, + DWORD dwRemoteAddressLength, + LPSOCKADDR* LocalSockaddr, + LPINT LocalSockaddrLength, + LPSOCKADDR* RemoteSockaddr, + LPINT RemoteSockaddrLength); + + typedef BOOL (PASCAL *LPFN_DISCONNECTEX) + (SOCKET hSocket, + LPOVERLAPPED lpOverlapped, + DWORD dwFlags, + DWORD reserved); + + typedef BOOL (PASCAL *LPFN_TRANSMITFILE) + (SOCKET hSocket, + HANDLE hFile, + DWORD nNumberOfBytesToWrite, + DWORD nNumberOfBytesPerSend, + LPOVERLAPPED lpOverlapped, + LPTRANSMIT_FILE_BUFFERS lpTransmitBuffers, + DWORD dwFlags); + + typedef PVOID RTL_SRWLOCK; + typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK; +#endif + +typedef int (WSAAPI* LPFN_WSARECV) + (SOCKET socket, + LPWSABUF buffers, + DWORD buffer_count, + LPDWORD bytes, + LPDWORD flags, + LPWSAOVERLAPPED overlapped, + LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine); + +typedef int (WSAAPI* LPFN_WSARECVFROM) + (SOCKET socket, + LPWSABUF buffers, + DWORD buffer_count, + LPDWORD bytes, + LPDWORD flags, + struct sockaddr* addr, + LPINT addr_len, + LPWSAOVERLAPPED overlapped, + LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine); + +#ifndef _NTDEF_ + typedef LONG NTSTATUS; + typedef NTSTATUS *PNTSTATUS; +#endif + +#ifndef RTL_CONDITION_VARIABLE_INIT + typedef PVOID CONDITION_VARIABLE, *PCONDITION_VARIABLE; +#endif + +typedef struct _AFD_POLL_HANDLE_INFO { + HANDLE Handle; + ULONG Events; + NTSTATUS Status; +} AFD_POLL_HANDLE_INFO, *PAFD_POLL_HANDLE_INFO; + +typedef struct _AFD_POLL_INFO { + LARGE_INTEGER Timeout; + ULONG NumberOfHandles; + ULONG Exclusive; + AFD_POLL_HANDLE_INFO Handles[1]; +} AFD_POLL_INFO, *PAFD_POLL_INFO; + +#define UV_MSAFD_PROVIDER_COUNT 3 + + +/** + * It should be possible to cast uv_buf_t[] to WSABUF[] + * see http://msdn.microsoft.com/en-us/library/ms741542(v=vs.85).aspx + */ +typedef struct uv_buf_t { + ULONG len; + char* base; +} uv_buf_t; + +typedef int uv_file; +typedef SOCKET uv_os_sock_t; +typedef HANDLE uv_os_fd_t; + +typedef HANDLE uv_thread_t; + +typedef HANDLE uv_sem_t; + +typedef CRITICAL_SECTION uv_mutex_t; + +/* This condition variable implementation is based on the SetEvent solution + * (section 3.2) at http://www.cs.wustl.edu/~schmidt/win32-cv-1.html + * We could not use the SignalObjectAndWait solution (section 3.4) because + * it want the 2nd argument (type uv_mutex_t) of uv_cond_wait() and + * uv_cond_timedwait() to be HANDLEs, but we use CRITICAL_SECTIONs. + */ + +typedef union { + CONDITION_VARIABLE cond_var; + struct { + unsigned int waiters_count; + CRITICAL_SECTION waiters_count_lock; + HANDLE signal_event; + HANDLE broadcast_event; + } fallback; +} uv_cond_t; + +typedef union { + struct { + unsigned int num_readers_; + CRITICAL_SECTION num_readers_lock_; + HANDLE write_semaphore_; + } state_; + /* TODO: remove me in v2.x. */ + struct { + SRWLOCK unused_; + } unused1_; + /* TODO: remove me in v2.x. */ + struct { + uv_mutex_t unused1_; + uv_mutex_t unused2_; + } unused2_; +} uv_rwlock_t; + +typedef struct { + unsigned int n; + unsigned int count; + uv_mutex_t mutex; + uv_sem_t turnstile1; + uv_sem_t turnstile2; +} uv_barrier_t; + +typedef struct { + DWORD tls_index; +} uv_key_t; + +#define UV_ONCE_INIT { 0, NULL } + +typedef struct uv_once_s { + unsigned char ran; + HANDLE event; +} uv_once_t; + +/* Platform-specific definitions for uv_spawn support. */ +typedef unsigned char uv_uid_t; +typedef unsigned char uv_gid_t; + +typedef struct uv__dirent_s { + int d_type; + char d_name[1]; +} uv__dirent_t; + +#define HAVE_DIRENT_TYPES +#define UV__DT_DIR UV_DIRENT_DIR +#define UV__DT_FILE UV_DIRENT_FILE +#define UV__DT_LINK UV_DIRENT_LINK +#define UV__DT_FIFO UV_DIRENT_FIFO +#define UV__DT_SOCKET UV_DIRENT_SOCKET +#define UV__DT_CHAR UV_DIRENT_CHAR +#define UV__DT_BLOCK UV_DIRENT_BLOCK + +/* Platform-specific definitions for uv_dlopen support. */ +#define UV_DYNAMIC FAR WINAPI +typedef struct { + HMODULE handle; + char* errmsg; +} uv_lib_t; + +RB_HEAD(uv_timer_tree_s, uv_timer_s); + +#define UV_LOOP_PRIVATE_FIELDS \ + /* The loop's I/O completion port */ \ + HANDLE iocp; \ + /* The current time according to the event loop. in msecs. */ \ + uint64_t time; \ + /* Tail of a single-linked circular queue of pending reqs. If the queue */ \ + /* is empty, tail_ is NULL. If there is only one item, */ \ + /* tail_->next_req == tail_ */ \ + uv_req_t* pending_reqs_tail; \ + /* Head of a single-linked list of closed handles */ \ + uv_handle_t* endgame_handles; \ + /* The head of the timers tree */ \ + struct uv_timer_tree_s timers; \ + /* Lists of active loop (prepare / check / idle) watchers */ \ + uv_prepare_t* prepare_handles; \ + uv_check_t* check_handles; \ + uv_idle_t* idle_handles; \ + /* This pointer will refer to the prepare/check/idle handle whose */ \ + /* callback is scheduled to be called next. This is needed to allow */ \ + /* safe removal from one of the lists above while that list being */ \ + /* iterated over. */ \ + uv_prepare_t* next_prepare_handle; \ + uv_check_t* next_check_handle; \ + uv_idle_t* next_idle_handle; \ + /* This handle holds the peer sockets for the fast variant of uv_poll_t */ \ + SOCKET poll_peer_sockets[UV_MSAFD_PROVIDER_COUNT]; \ + /* Counter to keep track of active tcp streams */ \ + unsigned int active_tcp_streams; \ + /* Counter to keep track of active udp streams */ \ + unsigned int active_udp_streams; \ + /* Counter to started timer */ \ + uint64_t timer_counter; \ + /* Threadpool */ \ + void* wq[2]; \ + uv_mutex_t wq_mutex; \ + uv_async_t wq_async; + +#define UV_REQ_TYPE_PRIVATE \ + /* TODO: remove the req suffix */ \ + UV_ACCEPT, \ + UV_FS_EVENT_REQ, \ + UV_POLL_REQ, \ + UV_PROCESS_EXIT, \ + UV_READ, \ + UV_UDP_RECV, \ + UV_WAKEUP, \ + UV_SIGNAL_REQ, + +#define UV_REQ_PRIVATE_FIELDS \ + union { \ + /* Used by I/O operations */ \ + struct { \ + OVERLAPPED overlapped; \ + size_t queued_bytes; \ + } io; \ + } u; \ + struct uv_req_s* next_req; + +#define UV_WRITE_PRIVATE_FIELDS \ + int ipc_header; \ + uv_buf_t write_buffer; \ + HANDLE event_handle; \ + HANDLE wait_handle; + +#define UV_CONNECT_PRIVATE_FIELDS \ + /* empty */ + +#define UV_SHUTDOWN_PRIVATE_FIELDS \ + /* empty */ + +#define UV_UDP_SEND_PRIVATE_FIELDS \ + /* empty */ + +#define UV_PRIVATE_REQ_TYPES \ + typedef struct uv_pipe_accept_s { \ + UV_REQ_FIELDS \ + HANDLE pipeHandle; \ + struct uv_pipe_accept_s* next_pending; \ + } uv_pipe_accept_t; \ + \ + typedef struct uv_tcp_accept_s { \ + UV_REQ_FIELDS \ + SOCKET accept_socket; \ + char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32]; \ + HANDLE event_handle; \ + HANDLE wait_handle; \ + struct uv_tcp_accept_s* next_pending; \ + } uv_tcp_accept_t; \ + \ + typedef struct uv_read_s { \ + UV_REQ_FIELDS \ + HANDLE event_handle; \ + HANDLE wait_handle; \ + } uv_read_t; + +#define uv_stream_connection_fields \ + unsigned int write_reqs_pending; \ + uv_shutdown_t* shutdown_req; + +#define uv_stream_server_fields \ + uv_connection_cb connection_cb; + +#define UV_STREAM_PRIVATE_FIELDS \ + unsigned int reqs_pending; \ + int activecnt; \ + uv_read_t read_req; \ + union { \ + struct { uv_stream_connection_fields } conn; \ + struct { uv_stream_server_fields } serv; \ + } stream; + +#define uv_tcp_server_fields \ + uv_tcp_accept_t* accept_reqs; \ + unsigned int processed_accepts; \ + uv_tcp_accept_t* pending_accepts; \ + LPFN_ACCEPTEX func_acceptex; + +#define uv_tcp_connection_fields \ + uv_buf_t read_buffer; \ + LPFN_CONNECTEX func_connectex; + +#define UV_TCP_PRIVATE_FIELDS \ + SOCKET socket; \ + int delayed_error; \ + union { \ + struct { uv_tcp_server_fields } serv; \ + struct { uv_tcp_connection_fields } conn; \ + } tcp; + +#define UV_UDP_PRIVATE_FIELDS \ + SOCKET socket; \ + unsigned int reqs_pending; \ + int activecnt; \ + uv_req_t recv_req; \ + uv_buf_t recv_buffer; \ + struct sockaddr_storage recv_from; \ + int recv_from_len; \ + uv_udp_recv_cb recv_cb; \ + uv_alloc_cb alloc_cb; \ + LPFN_WSARECV func_wsarecv; \ + LPFN_WSARECVFROM func_wsarecvfrom; + +#define uv_pipe_server_fields \ + int pending_instances; \ + uv_pipe_accept_t* accept_reqs; \ + uv_pipe_accept_t* pending_accepts; + +#define uv_pipe_connection_fields \ + uv_timer_t* eof_timer; \ + uv_write_t ipc_header_write_req; \ + int ipc_pid; \ + uint64_t remaining_ipc_rawdata_bytes; \ + struct { \ + void* queue[2]; \ + int queue_len; \ + } pending_ipc_info; \ + uv_write_t* non_overlapped_writes_tail; \ + uv_mutex_t readfile_mutex; \ + volatile HANDLE readfile_thread; + +#define UV_PIPE_PRIVATE_FIELDS \ + HANDLE handle; \ + WCHAR* name; \ + union { \ + struct { uv_pipe_server_fields } serv; \ + struct { uv_pipe_connection_fields } conn; \ + } pipe; + +/* TODO: put the parser states in an union - TTY handles are always */ +/* half-duplex so read-state can safely overlap write-state. */ +#define UV_TTY_PRIVATE_FIELDS \ + HANDLE handle; \ + union { \ + struct { \ + /* Used for readable TTY handles */ \ + /* TODO: remove me in v2.x. */ \ + HANDLE unused_; \ + uv_buf_t read_line_buffer; \ + HANDLE read_raw_wait; \ + /* Fields used for translating win keystrokes into vt100 characters */ \ + char last_key[8]; \ + unsigned char last_key_offset; \ + unsigned char last_key_len; \ + WCHAR last_utf16_high_surrogate; \ + INPUT_RECORD last_input_record; \ + } rd; \ + struct { \ + /* Used for writable TTY handles */ \ + /* utf8-to-utf16 conversion state */ \ + unsigned int utf8_codepoint; \ + unsigned char utf8_bytes_left; \ + /* eol conversion state */ \ + unsigned char previous_eol; \ + /* ansi parser state */ \ + unsigned char ansi_parser_state; \ + unsigned char ansi_csi_argc; \ + unsigned short ansi_csi_argv[4]; \ + COORD saved_position; \ + WORD saved_attributes; \ + } wr; \ + } tty; + +#define UV_POLL_PRIVATE_FIELDS \ + SOCKET socket; \ + /* Used in fast mode */ \ + SOCKET peer_socket; \ + AFD_POLL_INFO afd_poll_info_1; \ + AFD_POLL_INFO afd_poll_info_2; \ + /* Used in fast and slow mode. */ \ + uv_req_t poll_req_1; \ + uv_req_t poll_req_2; \ + unsigned char submitted_events_1; \ + unsigned char submitted_events_2; \ + unsigned char mask_events_1; \ + unsigned char mask_events_2; \ + unsigned char events; + +#define UV_TIMER_PRIVATE_FIELDS \ + RB_ENTRY(uv_timer_s) tree_entry; \ + uint64_t due; \ + uint64_t repeat; \ + uint64_t start_id; \ + uv_timer_cb timer_cb; + +#define UV_ASYNC_PRIVATE_FIELDS \ + struct uv_req_s async_req; \ + uv_async_cb async_cb; \ + /* char to avoid alignment issues */ \ + char volatile async_sent; + +#define UV_PREPARE_PRIVATE_FIELDS \ + uv_prepare_t* prepare_prev; \ + uv_prepare_t* prepare_next; \ + uv_prepare_cb prepare_cb; + +#define UV_CHECK_PRIVATE_FIELDS \ + uv_check_t* check_prev; \ + uv_check_t* check_next; \ + uv_check_cb check_cb; + +#define UV_IDLE_PRIVATE_FIELDS \ + uv_idle_t* idle_prev; \ + uv_idle_t* idle_next; \ + uv_idle_cb idle_cb; + +#define UV_HANDLE_PRIVATE_FIELDS \ + uv_handle_t* endgame_next; \ + unsigned int flags; + +#define UV_GETADDRINFO_PRIVATE_FIELDS \ + struct uv__work work_req; \ + uv_getaddrinfo_cb getaddrinfo_cb; \ + void* alloc; \ + WCHAR* node; \ + WCHAR* service; \ + /* The addrinfoW field is used to store a pointer to the hints, and */ \ + /* later on to store the result of GetAddrInfoW. The final result will */ \ + /* be converted to struct addrinfo* and stored in the addrinfo field. */ \ + struct addrinfoW* addrinfow; \ + struct addrinfo* addrinfo; \ + int retcode; + +#define UV_GETNAMEINFO_PRIVATE_FIELDS \ + struct uv__work work_req; \ + uv_getnameinfo_cb getnameinfo_cb; \ + struct sockaddr_storage storage; \ + int flags; \ + char host[NI_MAXHOST]; \ + char service[NI_MAXSERV]; \ + int retcode; + +#define UV_PROCESS_PRIVATE_FIELDS \ + struct uv_process_exit_s { \ + UV_REQ_FIELDS \ + } exit_req; \ + BYTE* child_stdio_buffer; \ + int exit_signal; \ + HANDLE wait_handle; \ + HANDLE process_handle; \ + volatile char exit_cb_pending; + +#define UV_FS_PRIVATE_FIELDS \ + struct uv__work work_req; \ + int flags; \ + DWORD sys_errno_; \ + union { \ + /* TODO: remove me in 0.9. */ \ + WCHAR* pathw; \ + int fd; \ + } file; \ + union { \ + struct { \ + int mode; \ + WCHAR* new_pathw; \ + int file_flags; \ + int fd_out; \ + unsigned int nbufs; \ + uv_buf_t* bufs; \ + int64_t offset; \ + uv_buf_t bufsml[4]; \ + } info; \ + struct { \ + double atime; \ + double mtime; \ + } time; \ + } fs; + +#define UV_WORK_PRIVATE_FIELDS \ + struct uv__work work_req; + +#define UV_FS_EVENT_PRIVATE_FIELDS \ + struct uv_fs_event_req_s { \ + UV_REQ_FIELDS \ + } req; \ + HANDLE dir_handle; \ + int req_pending; \ + uv_fs_event_cb cb; \ + WCHAR* filew; \ + WCHAR* short_filew; \ + WCHAR* dirw; \ + char* buffer; + +#define UV_SIGNAL_PRIVATE_FIELDS \ + RB_ENTRY(uv_signal_s) tree_entry; \ + struct uv_req_s signal_req; \ + unsigned long pending_signum; + +#ifndef F_OK +#define F_OK 0 +#endif +#ifndef R_OK +#define R_OK 4 +#endif +#ifndef W_OK +#define W_OK 2 +#endif +#ifndef X_OK +#define X_OK 1 +#endif diff --git a/deps/libuv/include/uv.h b/deps/libuv/include/uv.h new file mode 100644 index 00000000..31f09f0f --- /dev/null +++ b/deps/libuv/include/uv.h @@ -0,0 +1,1497 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* See https://github.com/libuv/libuv#documentation for documentation. */ + +#ifndef UV_H +#define UV_H +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _WIN32 + /* Windows - set up dll import/export decorators. */ +# if defined(BUILDING_UV_SHARED) + /* Building shared library. */ +# define UV_EXTERN __declspec(dllexport) +# elif defined(USING_UV_SHARED) + /* Using shared library. */ +# define UV_EXTERN __declspec(dllimport) +# else + /* Building static library. */ +# define UV_EXTERN /* nothing */ +# endif +#elif __GNUC__ >= 4 +# define UV_EXTERN __attribute__((visibility("default"))) +#else +# define UV_EXTERN /* nothing */ +#endif + +#include "uv-errno.h" +#include "uv-version.h" +#include +#include + +#if defined(_MSC_VER) && _MSC_VER < 1600 +# include "stdint-msvc2008.h" +#else +# include +#endif + +#if defined(_WIN32) +# include "uv-win.h" +#else +# include "uv-unix.h" +#endif + +/* Expand this list if necessary. */ +#define UV_ERRNO_MAP(XX) \ + XX(E2BIG, "argument list too long") \ + XX(EACCES, "permission denied") \ + XX(EADDRINUSE, "address already in use") \ + XX(EADDRNOTAVAIL, "address not available") \ + XX(EAFNOSUPPORT, "address family not supported") \ + XX(EAGAIN, "resource temporarily unavailable") \ + XX(EAI_ADDRFAMILY, "address family not supported") \ + XX(EAI_AGAIN, "temporary failure") \ + XX(EAI_BADFLAGS, "bad ai_flags value") \ + XX(EAI_BADHINTS, "invalid value for hints") \ + XX(EAI_CANCELED, "request canceled") \ + XX(EAI_FAIL, "permanent failure") \ + XX(EAI_FAMILY, "ai_family not supported") \ + XX(EAI_MEMORY, "out of memory") \ + XX(EAI_NODATA, "no address") \ + XX(EAI_NONAME, "unknown node or service") \ + XX(EAI_OVERFLOW, "argument buffer overflow") \ + XX(EAI_PROTOCOL, "resolved protocol is unknown") \ + XX(EAI_SERVICE, "service not available for socket type") \ + XX(EAI_SOCKTYPE, "socket type not supported") \ + XX(EALREADY, "connection already in progress") \ + XX(EBADF, "bad file descriptor") \ + XX(EBUSY, "resource busy or locked") \ + XX(ECANCELED, "operation canceled") \ + XX(ECHARSET, "invalid Unicode character") \ + XX(ECONNABORTED, "software caused connection abort") \ + XX(ECONNREFUSED, "connection refused") \ + XX(ECONNRESET, "connection reset by peer") \ + XX(EDESTADDRREQ, "destination address required") \ + XX(EEXIST, "file already exists") \ + XX(EFAULT, "bad address in system call argument") \ + XX(EFBIG, "file too large") \ + XX(EHOSTUNREACH, "host is unreachable") \ + XX(EINTR, "interrupted system call") \ + XX(EINVAL, "invalid argument") \ + XX(EIO, "i/o error") \ + XX(EISCONN, "socket is already connected") \ + XX(EISDIR, "illegal operation on a directory") \ + XX(ELOOP, "too many symbolic links encountered") \ + XX(EMFILE, "too many open files") \ + XX(EMSGSIZE, "message too long") \ + XX(ENAMETOOLONG, "name too long") \ + XX(ENETDOWN, "network is down") \ + XX(ENETUNREACH, "network is unreachable") \ + XX(ENFILE, "file table overflow") \ + XX(ENOBUFS, "no buffer space available") \ + XX(ENODEV, "no such device") \ + XX(ENOENT, "no such file or directory") \ + XX(ENOMEM, "not enough memory") \ + XX(ENONET, "machine is not on the network") \ + XX(ENOPROTOOPT, "protocol not available") \ + XX(ENOSPC, "no space left on device") \ + XX(ENOSYS, "function not implemented") \ + XX(ENOTCONN, "socket is not connected") \ + XX(ENOTDIR, "not a directory") \ + XX(ENOTEMPTY, "directory not empty") \ + XX(ENOTSOCK, "socket operation on non-socket") \ + XX(ENOTSUP, "operation not supported on socket") \ + XX(EPERM, "operation not permitted") \ + XX(EPIPE, "broken pipe") \ + XX(EPROTO, "protocol error") \ + XX(EPROTONOSUPPORT, "protocol not supported") \ + XX(EPROTOTYPE, "protocol wrong type for socket") \ + XX(ERANGE, "result too large") \ + XX(EROFS, "read-only file system") \ + XX(ESHUTDOWN, "cannot send after transport endpoint shutdown") \ + XX(ESPIPE, "invalid seek") \ + XX(ESRCH, "no such process") \ + XX(ETIMEDOUT, "connection timed out") \ + XX(ETXTBSY, "text file is busy") \ + XX(EXDEV, "cross-device link not permitted") \ + XX(UNKNOWN, "unknown error") \ + XX(EOF, "end of file") \ + XX(ENXIO, "no such device or address") \ + XX(EMLINK, "too many links") \ + XX(EHOSTDOWN, "host is down") \ + +#define UV_HANDLE_TYPE_MAP(XX) \ + XX(ASYNC, async) \ + XX(CHECK, check) \ + XX(FS_EVENT, fs_event) \ + XX(FS_POLL, fs_poll) \ + XX(HANDLE, handle) \ + XX(IDLE, idle) \ + XX(NAMED_PIPE, pipe) \ + XX(POLL, poll) \ + XX(PREPARE, prepare) \ + XX(PROCESS, process) \ + XX(STREAM, stream) \ + XX(TCP, tcp) \ + XX(TIMER, timer) \ + XX(TTY, tty) \ + XX(UDP, udp) \ + XX(SIGNAL, signal) \ + +#define UV_REQ_TYPE_MAP(XX) \ + XX(REQ, req) \ + XX(CONNECT, connect) \ + XX(WRITE, write) \ + XX(SHUTDOWN, shutdown) \ + XX(UDP_SEND, udp_send) \ + XX(FS, fs) \ + XX(WORK, work) \ + XX(GETADDRINFO, getaddrinfo) \ + XX(GETNAMEINFO, getnameinfo) \ + +typedef enum { +#define XX(code, _) UV_ ## code = UV__ ## code, + UV_ERRNO_MAP(XX) +#undef XX + UV_ERRNO_MAX = UV__EOF - 1 +} uv_errno_t; + +typedef enum { + UV_UNKNOWN_HANDLE = 0, +#define XX(uc, lc) UV_##uc, + UV_HANDLE_TYPE_MAP(XX) +#undef XX + UV_FILE, + UV_HANDLE_TYPE_MAX +} uv_handle_type; + +typedef enum { + UV_UNKNOWN_REQ = 0, +#define XX(uc, lc) UV_##uc, + UV_REQ_TYPE_MAP(XX) +#undef XX + UV_REQ_TYPE_PRIVATE + UV_REQ_TYPE_MAX +} uv_req_type; + + +/* Handle types. */ +typedef struct uv_loop_s uv_loop_t; +typedef struct uv_handle_s uv_handle_t; +typedef struct uv_stream_s uv_stream_t; +typedef struct uv_tcp_s uv_tcp_t; +typedef struct uv_udp_s uv_udp_t; +typedef struct uv_pipe_s uv_pipe_t; +typedef struct uv_tty_s uv_tty_t; +typedef struct uv_poll_s uv_poll_t; +typedef struct uv_timer_s uv_timer_t; +typedef struct uv_prepare_s uv_prepare_t; +typedef struct uv_check_s uv_check_t; +typedef struct uv_idle_s uv_idle_t; +typedef struct uv_async_s uv_async_t; +typedef struct uv_process_s uv_process_t; +typedef struct uv_fs_event_s uv_fs_event_t; +typedef struct uv_fs_poll_s uv_fs_poll_t; +typedef struct uv_signal_s uv_signal_t; + +/* Request types. */ +typedef struct uv_req_s uv_req_t; +typedef struct uv_getaddrinfo_s uv_getaddrinfo_t; +typedef struct uv_getnameinfo_s uv_getnameinfo_t; +typedef struct uv_shutdown_s uv_shutdown_t; +typedef struct uv_write_s uv_write_t; +typedef struct uv_connect_s uv_connect_t; +typedef struct uv_udp_send_s uv_udp_send_t; +typedef struct uv_fs_s uv_fs_t; +typedef struct uv_work_s uv_work_t; + +/* None of the above. */ +typedef struct uv_cpu_info_s uv_cpu_info_t; +typedef struct uv_interface_address_s uv_interface_address_t; +typedef struct uv_dirent_s uv_dirent_t; +typedef struct uv_passwd_s uv_passwd_t; + +typedef enum { + UV_LOOP_BLOCK_SIGNAL +} uv_loop_option; + +typedef enum { + UV_RUN_DEFAULT = 0, + UV_RUN_ONCE, + UV_RUN_NOWAIT +} uv_run_mode; + + +UV_EXTERN unsigned int uv_version(void); +UV_EXTERN const char* uv_version_string(void); + +typedef void* (*uv_malloc_func)(size_t size); +typedef void* (*uv_realloc_func)(void* ptr, size_t size); +typedef void* (*uv_calloc_func)(size_t count, size_t size); +typedef void (*uv_free_func)(void* ptr); + +UV_EXTERN int uv_replace_allocator(uv_malloc_func malloc_func, + uv_realloc_func realloc_func, + uv_calloc_func calloc_func, + uv_free_func free_func); + +UV_EXTERN uv_loop_t* uv_default_loop(void); +UV_EXTERN int uv_loop_init(uv_loop_t* loop); +UV_EXTERN int uv_loop_close(uv_loop_t* loop); +/* + * NOTE: + * This function is DEPRECATED (to be removed after 0.12), users should + * allocate the loop manually and use uv_loop_init instead. + */ +UV_EXTERN uv_loop_t* uv_loop_new(void); +/* + * NOTE: + * This function is DEPRECATED (to be removed after 0.12). Users should use + * uv_loop_close and free the memory manually instead. + */ +UV_EXTERN void uv_loop_delete(uv_loop_t*); +UV_EXTERN size_t uv_loop_size(void); +UV_EXTERN int uv_loop_alive(const uv_loop_t* loop); +UV_EXTERN int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...); + +UV_EXTERN int uv_run(uv_loop_t*, uv_run_mode mode); +UV_EXTERN void uv_stop(uv_loop_t*); + +UV_EXTERN void uv_ref(uv_handle_t*); +UV_EXTERN void uv_unref(uv_handle_t*); +UV_EXTERN int uv_has_ref(const uv_handle_t*); + +UV_EXTERN void uv_update_time(uv_loop_t*); +UV_EXTERN uint64_t uv_now(const uv_loop_t*); + +UV_EXTERN int uv_backend_fd(const uv_loop_t*); +UV_EXTERN int uv_backend_timeout(const uv_loop_t*); + +typedef void (*uv_alloc_cb)(uv_handle_t* handle, + size_t suggested_size, + uv_buf_t* buf); +typedef void (*uv_read_cb)(uv_stream_t* stream, + ssize_t nread, + const uv_buf_t* buf); +typedef void (*uv_write_cb)(uv_write_t* req, int status); +typedef void (*uv_connect_cb)(uv_connect_t* req, int status); +typedef void (*uv_shutdown_cb)(uv_shutdown_t* req, int status); +typedef void (*uv_connection_cb)(uv_stream_t* server, int status); +typedef void (*uv_close_cb)(uv_handle_t* handle); +typedef void (*uv_poll_cb)(uv_poll_t* handle, int status, int events); +typedef void (*uv_timer_cb)(uv_timer_t* handle); +typedef void (*uv_async_cb)(uv_async_t* handle); +typedef void (*uv_prepare_cb)(uv_prepare_t* handle); +typedef void (*uv_check_cb)(uv_check_t* handle); +typedef void (*uv_idle_cb)(uv_idle_t* handle); +typedef void (*uv_exit_cb)(uv_process_t*, int64_t exit_status, int term_signal); +typedef void (*uv_walk_cb)(uv_handle_t* handle, void* arg); +typedef void (*uv_fs_cb)(uv_fs_t* req); +typedef void (*uv_work_cb)(uv_work_t* req); +typedef void (*uv_after_work_cb)(uv_work_t* req, int status); +typedef void (*uv_getaddrinfo_cb)(uv_getaddrinfo_t* req, + int status, + struct addrinfo* res); +typedef void (*uv_getnameinfo_cb)(uv_getnameinfo_t* req, + int status, + const char* hostname, + const char* service); + +typedef struct { + long tv_sec; + long tv_nsec; +} uv_timespec_t; + + +typedef struct { + uint64_t st_dev; + uint64_t st_mode; + uint64_t st_nlink; + uint64_t st_uid; + uint64_t st_gid; + uint64_t st_rdev; + uint64_t st_ino; + uint64_t st_size; + uint64_t st_blksize; + uint64_t st_blocks; + uint64_t st_flags; + uint64_t st_gen; + uv_timespec_t st_atim; + uv_timespec_t st_mtim; + uv_timespec_t st_ctim; + uv_timespec_t st_birthtim; +} uv_stat_t; + + +typedef void (*uv_fs_event_cb)(uv_fs_event_t* handle, + const char* filename, + int events, + int status); + +typedef void (*uv_fs_poll_cb)(uv_fs_poll_t* handle, + int status, + const uv_stat_t* prev, + const uv_stat_t* curr); + +typedef void (*uv_signal_cb)(uv_signal_t* handle, int signum); + + +typedef enum { + UV_LEAVE_GROUP = 0, + UV_JOIN_GROUP +} uv_membership; + + +UV_EXTERN int uv_translate_sys_error(int sys_errno); + +UV_EXTERN const char* uv_strerror(int err); +UV_EXTERN const char* uv_err_name(int err); + + +#define UV_REQ_FIELDS \ + /* public */ \ + void* data; \ + /* read-only */ \ + uv_req_type type; \ + /* private */ \ + void* active_queue[2]; \ + void* reserved[4]; \ + UV_REQ_PRIVATE_FIELDS \ + +/* Abstract base class of all requests. */ +struct uv_req_s { + UV_REQ_FIELDS +}; + + +/* Platform-specific request types. */ +UV_PRIVATE_REQ_TYPES + + +UV_EXTERN int uv_shutdown(uv_shutdown_t* req, + uv_stream_t* handle, + uv_shutdown_cb cb); + +struct uv_shutdown_s { + UV_REQ_FIELDS + uv_stream_t* handle; + uv_shutdown_cb cb; + UV_SHUTDOWN_PRIVATE_FIELDS +}; + + +#define UV_HANDLE_FIELDS \ + /* public */ \ + void* data; \ + /* read-only */ \ + uv_loop_t* loop; \ + uv_handle_type type; \ + /* private */ \ + uv_close_cb close_cb; \ + void* handle_queue[2]; \ + union { \ + int fd; \ + void* reserved[4]; \ + } u; \ + UV_HANDLE_PRIVATE_FIELDS \ + +/* The abstract base class of all handles. */ +struct uv_handle_s { + UV_HANDLE_FIELDS +}; + +UV_EXTERN size_t uv_handle_size(uv_handle_type type); +UV_EXTERN size_t uv_req_size(uv_req_type type); + +UV_EXTERN int uv_is_active(const uv_handle_t* handle); + +UV_EXTERN void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg); + +/* Helpers for ad hoc debugging, no API/ABI stability guaranteed. */ +UV_EXTERN void uv_print_all_handles(uv_loop_t* loop, FILE* stream); +UV_EXTERN void uv_print_active_handles(uv_loop_t* loop, FILE* stream); + +UV_EXTERN void uv_close(uv_handle_t* handle, uv_close_cb close_cb); + +UV_EXTERN int uv_send_buffer_size(uv_handle_t* handle, int* value); +UV_EXTERN int uv_recv_buffer_size(uv_handle_t* handle, int* value); + +UV_EXTERN int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd); + +UV_EXTERN uv_buf_t uv_buf_init(char* base, unsigned int len); + + +#define UV_STREAM_FIELDS \ + /* number of bytes queued for writing */ \ + size_t write_queue_size; \ + uv_alloc_cb alloc_cb; \ + uv_read_cb read_cb; \ + /* private */ \ + UV_STREAM_PRIVATE_FIELDS + +/* + * uv_stream_t is a subclass of uv_handle_t. + * + * uv_stream is an abstract class. + * + * uv_stream_t is the parent class of uv_tcp_t, uv_pipe_t and uv_tty_t. + */ +struct uv_stream_s { + UV_HANDLE_FIELDS + UV_STREAM_FIELDS +}; + +UV_EXTERN int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb); +UV_EXTERN int uv_accept(uv_stream_t* server, uv_stream_t* client); + +UV_EXTERN int uv_read_start(uv_stream_t*, + uv_alloc_cb alloc_cb, + uv_read_cb read_cb); +UV_EXTERN int uv_read_stop(uv_stream_t*); + +UV_EXTERN int uv_write(uv_write_t* req, + uv_stream_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_write_cb cb); +UV_EXTERN int uv_write2(uv_write_t* req, + uv_stream_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_stream_t* send_handle, + uv_write_cb cb); +UV_EXTERN int uv_try_write(uv_stream_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs); + +/* uv_write_t is a subclass of uv_req_t. */ +struct uv_write_s { + UV_REQ_FIELDS + uv_write_cb cb; + uv_stream_t* send_handle; + uv_stream_t* handle; + UV_WRITE_PRIVATE_FIELDS +}; + + +UV_EXTERN int uv_is_readable(const uv_stream_t* handle); +UV_EXTERN int uv_is_writable(const uv_stream_t* handle); + +UV_EXTERN int uv_stream_set_blocking(uv_stream_t* handle, int blocking); + +UV_EXTERN int uv_is_closing(const uv_handle_t* handle); + + +/* + * uv_tcp_t is a subclass of uv_stream_t. + * + * Represents a TCP stream or TCP server. + */ +struct uv_tcp_s { + UV_HANDLE_FIELDS + UV_STREAM_FIELDS + UV_TCP_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_tcp_init(uv_loop_t*, uv_tcp_t* handle); +UV_EXTERN int uv_tcp_init_ex(uv_loop_t*, uv_tcp_t* handle, unsigned int flags); +UV_EXTERN int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock); +UV_EXTERN int uv_tcp_nodelay(uv_tcp_t* handle, int enable); +UV_EXTERN int uv_tcp_keepalive(uv_tcp_t* handle, + int enable, + unsigned int delay); +UV_EXTERN int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable); + +enum uv_tcp_flags { + /* Used with uv_tcp_bind, when an IPv6 address is used. */ + UV_TCP_IPV6ONLY = 1 +}; + +UV_EXTERN int uv_tcp_bind(uv_tcp_t* handle, + const struct sockaddr* addr, + unsigned int flags); +UV_EXTERN int uv_tcp_getsockname(const uv_tcp_t* handle, + struct sockaddr* name, + int* namelen); +UV_EXTERN int uv_tcp_getpeername(const uv_tcp_t* handle, + struct sockaddr* name, + int* namelen); +UV_EXTERN int uv_tcp_connect(uv_connect_t* req, + uv_tcp_t* handle, + const struct sockaddr* addr, + uv_connect_cb cb); + +/* uv_connect_t is a subclass of uv_req_t. */ +struct uv_connect_s { + UV_REQ_FIELDS + uv_connect_cb cb; + uv_stream_t* handle; + UV_CONNECT_PRIVATE_FIELDS +}; + + +/* + * UDP support. + */ + +enum uv_udp_flags { + /* Disables dual stack mode. */ + UV_UDP_IPV6ONLY = 1, + /* + * Indicates message was truncated because read buffer was too small. The + * remainder was discarded by the OS. Used in uv_udp_recv_cb. + */ + UV_UDP_PARTIAL = 2, + /* + * Indicates if SO_REUSEADDR will be set when binding the handle. + * This sets the SO_REUSEPORT socket flag on the BSDs and OS X. On other + * Unix platforms, it sets the SO_REUSEADDR flag. What that means is that + * multiple threads or processes can bind to the same address without error + * (provided they all set the flag) but only the last one to bind will receive + * any traffic, in effect "stealing" the port from the previous listener. + */ + UV_UDP_REUSEADDR = 4 +}; + +typedef void (*uv_udp_send_cb)(uv_udp_send_t* req, int status); +typedef void (*uv_udp_recv_cb)(uv_udp_t* handle, + ssize_t nread, + const uv_buf_t* buf, + const struct sockaddr* addr, + unsigned flags); + +/* uv_udp_t is a subclass of uv_handle_t. */ +struct uv_udp_s { + UV_HANDLE_FIELDS + /* read-only */ + /* + * Number of bytes queued for sending. This field strictly shows how much + * information is currently queued. + */ + size_t send_queue_size; + /* + * Number of send requests currently in the queue awaiting to be processed. + */ + size_t send_queue_count; + UV_UDP_PRIVATE_FIELDS +}; + +/* uv_udp_send_t is a subclass of uv_req_t. */ +struct uv_udp_send_s { + UV_REQ_FIELDS + uv_udp_t* handle; + uv_udp_send_cb cb; + UV_UDP_SEND_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_udp_init(uv_loop_t*, uv_udp_t* handle); +UV_EXTERN int uv_udp_init_ex(uv_loop_t*, uv_udp_t* handle, unsigned int flags); +UV_EXTERN int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock); +UV_EXTERN int uv_udp_bind(uv_udp_t* handle, + const struct sockaddr* addr, + unsigned int flags); + +UV_EXTERN int uv_udp_getsockname(const uv_udp_t* handle, + struct sockaddr* name, + int* namelen); +UV_EXTERN int uv_udp_set_membership(uv_udp_t* handle, + const char* multicast_addr, + const char* interface_addr, + uv_membership membership); +UV_EXTERN int uv_udp_set_multicast_loop(uv_udp_t* handle, int on); +UV_EXTERN int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl); +UV_EXTERN int uv_udp_set_multicast_interface(uv_udp_t* handle, + const char* interface_addr); +UV_EXTERN int uv_udp_set_broadcast(uv_udp_t* handle, int on); +UV_EXTERN int uv_udp_set_ttl(uv_udp_t* handle, int ttl); +UV_EXTERN int uv_udp_send(uv_udp_send_t* req, + uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + uv_udp_send_cb send_cb); +UV_EXTERN int uv_udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr); +UV_EXTERN int uv_udp_recv_start(uv_udp_t* handle, + uv_alloc_cb alloc_cb, + uv_udp_recv_cb recv_cb); +UV_EXTERN int uv_udp_recv_stop(uv_udp_t* handle); + + +/* + * uv_tty_t is a subclass of uv_stream_t. + * + * Representing a stream for the console. + */ +struct uv_tty_s { + UV_HANDLE_FIELDS + UV_STREAM_FIELDS + UV_TTY_PRIVATE_FIELDS +}; + +typedef enum { + /* Initial/normal terminal mode */ + UV_TTY_MODE_NORMAL, + /* Raw input mode (On Windows, ENABLE_WINDOW_INPUT is also enabled) */ + UV_TTY_MODE_RAW, + /* Binary-safe I/O mode for IPC (Unix-only) */ + UV_TTY_MODE_IO +} uv_tty_mode_t; + +UV_EXTERN int uv_tty_init(uv_loop_t*, uv_tty_t*, uv_file fd, int readable); +UV_EXTERN int uv_tty_set_mode(uv_tty_t*, uv_tty_mode_t mode); +UV_EXTERN int uv_tty_reset_mode(void); +UV_EXTERN int uv_tty_get_winsize(uv_tty_t*, int* width, int* height); + +#ifdef __cplusplus +extern "C++" { + +inline int uv_tty_set_mode(uv_tty_t* handle, int mode) { + return uv_tty_set_mode(handle, static_cast(mode)); +} + +} +#endif + +UV_EXTERN uv_handle_type uv_guess_handle(uv_file file); + +/* + * uv_pipe_t is a subclass of uv_stream_t. + * + * Representing a pipe stream or pipe server. On Windows this is a Named + * Pipe. On Unix this is a Unix domain socket. + */ +struct uv_pipe_s { + UV_HANDLE_FIELDS + UV_STREAM_FIELDS + int ipc; /* non-zero if this pipe is used for passing handles */ + UV_PIPE_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_pipe_init(uv_loop_t*, uv_pipe_t* handle, int ipc); +UV_EXTERN int uv_pipe_open(uv_pipe_t*, uv_file file); +UV_EXTERN int uv_pipe_bind(uv_pipe_t* handle, const char* name); +UV_EXTERN void uv_pipe_connect(uv_connect_t* req, + uv_pipe_t* handle, + const char* name, + uv_connect_cb cb); +UV_EXTERN int uv_pipe_getsockname(const uv_pipe_t* handle, + char* buffer, + size_t* size); +UV_EXTERN int uv_pipe_getpeername(const uv_pipe_t* handle, + char* buffer, + size_t* size); +UV_EXTERN void uv_pipe_pending_instances(uv_pipe_t* handle, int count); +UV_EXTERN int uv_pipe_pending_count(uv_pipe_t* handle); +UV_EXTERN uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle); + + +struct uv_poll_s { + UV_HANDLE_FIELDS + uv_poll_cb poll_cb; + UV_POLL_PRIVATE_FIELDS +}; + +enum uv_poll_event { + UV_READABLE = 1, + UV_WRITABLE = 2, + UV_DISCONNECT = 4 +}; + +UV_EXTERN int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd); +UV_EXTERN int uv_poll_init_socket(uv_loop_t* loop, + uv_poll_t* handle, + uv_os_sock_t socket); +UV_EXTERN int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb); +UV_EXTERN int uv_poll_stop(uv_poll_t* handle); + + +struct uv_prepare_s { + UV_HANDLE_FIELDS + UV_PREPARE_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_prepare_init(uv_loop_t*, uv_prepare_t* prepare); +UV_EXTERN int uv_prepare_start(uv_prepare_t* prepare, uv_prepare_cb cb); +UV_EXTERN int uv_prepare_stop(uv_prepare_t* prepare); + + +struct uv_check_s { + UV_HANDLE_FIELDS + UV_CHECK_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_check_init(uv_loop_t*, uv_check_t* check); +UV_EXTERN int uv_check_start(uv_check_t* check, uv_check_cb cb); +UV_EXTERN int uv_check_stop(uv_check_t* check); + + +struct uv_idle_s { + UV_HANDLE_FIELDS + UV_IDLE_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_idle_init(uv_loop_t*, uv_idle_t* idle); +UV_EXTERN int uv_idle_start(uv_idle_t* idle, uv_idle_cb cb); +UV_EXTERN int uv_idle_stop(uv_idle_t* idle); + + +struct uv_async_s { + UV_HANDLE_FIELDS + UV_ASYNC_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_async_init(uv_loop_t*, + uv_async_t* async, + uv_async_cb async_cb); +UV_EXTERN int uv_async_send(uv_async_t* async); + + +/* + * uv_timer_t is a subclass of uv_handle_t. + * + * Used to get woken up at a specified time in the future. + */ +struct uv_timer_s { + UV_HANDLE_FIELDS + UV_TIMER_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_timer_init(uv_loop_t*, uv_timer_t* handle); +UV_EXTERN int uv_timer_start(uv_timer_t* handle, + uv_timer_cb cb, + uint64_t timeout, + uint64_t repeat); +UV_EXTERN int uv_timer_stop(uv_timer_t* handle); +UV_EXTERN int uv_timer_again(uv_timer_t* handle); +UV_EXTERN void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat); +UV_EXTERN uint64_t uv_timer_get_repeat(const uv_timer_t* handle); + + +/* + * uv_getaddrinfo_t is a subclass of uv_req_t. + * + * Request object for uv_getaddrinfo. + */ +struct uv_getaddrinfo_s { + UV_REQ_FIELDS + /* read-only */ + uv_loop_t* loop; + /* struct addrinfo* addrinfo is marked as private, but it really isn't. */ + UV_GETADDRINFO_PRIVATE_FIELDS +}; + + +UV_EXTERN int uv_getaddrinfo(uv_loop_t* loop, + uv_getaddrinfo_t* req, + uv_getaddrinfo_cb getaddrinfo_cb, + const char* node, + const char* service, + const struct addrinfo* hints); +UV_EXTERN void uv_freeaddrinfo(struct addrinfo* ai); + + +/* +* uv_getnameinfo_t is a subclass of uv_req_t. +* +* Request object for uv_getnameinfo. +*/ +struct uv_getnameinfo_s { + UV_REQ_FIELDS + /* read-only */ + uv_loop_t* loop; + /* host and service are marked as private, but they really aren't. */ + UV_GETNAMEINFO_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_getnameinfo(uv_loop_t* loop, + uv_getnameinfo_t* req, + uv_getnameinfo_cb getnameinfo_cb, + const struct sockaddr* addr, + int flags); + + +/* uv_spawn() options. */ +typedef enum { + UV_IGNORE = 0x00, + UV_CREATE_PIPE = 0x01, + UV_INHERIT_FD = 0x02, + UV_INHERIT_STREAM = 0x04, + + /* + * When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE + * determine the direction of flow, from the child process' perspective. Both + * flags may be specified to create a duplex data stream. + */ + UV_READABLE_PIPE = 0x10, + UV_WRITABLE_PIPE = 0x20 +} uv_stdio_flags; + +typedef struct uv_stdio_container_s { + uv_stdio_flags flags; + + union { + uv_stream_t* stream; + int fd; + } data; +} uv_stdio_container_t; + +typedef struct uv_process_options_s { + uv_exit_cb exit_cb; /* Called after the process exits. */ + const char* file; /* Path to program to execute. */ + /* + * Command line arguments. args[0] should be the path to the program. On + * Windows this uses CreateProcess which concatenates the arguments into a + * string this can cause some strange errors. See the note at + * windows_verbatim_arguments. + */ + char** args; + /* + * This will be set as the environ variable in the subprocess. If this is + * NULL then the parents environ will be used. + */ + char** env; + /* + * If non-null this represents a directory the subprocess should execute + * in. Stands for current working directory. + */ + const char* cwd; + /* + * Various flags that control how uv_spawn() behaves. See the definition of + * `enum uv_process_flags` below. + */ + unsigned int flags; + /* + * The `stdio` field points to an array of uv_stdio_container_t structs that + * describe the file descriptors that will be made available to the child + * process. The convention is that stdio[0] points to stdin, fd 1 is used for + * stdout, and fd 2 is stderr. + * + * Note that on windows file descriptors greater than 2 are available to the + * child process only if the child processes uses the MSVCRT runtime. + */ + int stdio_count; + uv_stdio_container_t* stdio; + /* + * Libuv can change the child process' user/group id. This happens only when + * the appropriate bits are set in the flags fields. This is not supported on + * windows; uv_spawn() will fail and set the error to UV_ENOTSUP. + */ + uv_uid_t uid; + uv_gid_t gid; +} uv_process_options_t; + +/* + * These are the flags that can be used for the uv_process_options.flags field. + */ +enum uv_process_flags { + /* + * Set the child process' user id. The user id is supplied in the `uid` field + * of the options struct. This does not work on windows; setting this flag + * will cause uv_spawn() to fail. + */ + UV_PROCESS_SETUID = (1 << 0), + /* + * Set the child process' group id. The user id is supplied in the `gid` + * field of the options struct. This does not work on windows; setting this + * flag will cause uv_spawn() to fail. + */ + UV_PROCESS_SETGID = (1 << 1), + /* + * Do not wrap any arguments in quotes, or perform any other escaping, when + * converting the argument list into a command line string. This option is + * only meaningful on Windows systems. On Unix it is silently ignored. + */ + UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS = (1 << 2), + /* + * Spawn the child process in a detached state - this will make it a process + * group leader, and will effectively enable the child to keep running after + * the parent exits. Note that the child process will still keep the + * parent's event loop alive unless the parent process calls uv_unref() on + * the child's process handle. + */ + UV_PROCESS_DETACHED = (1 << 3), + /* + * Hide the subprocess console window that would normally be created. This + * option is only meaningful on Windows systems. On Unix it is silently + * ignored. + */ + UV_PROCESS_WINDOWS_HIDE = (1 << 4) +}; + +/* + * uv_process_t is a subclass of uv_handle_t. + */ +struct uv_process_s { + UV_HANDLE_FIELDS + uv_exit_cb exit_cb; + int pid; + UV_PROCESS_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_spawn(uv_loop_t* loop, + uv_process_t* handle, + const uv_process_options_t* options); +UV_EXTERN int uv_process_kill(uv_process_t*, int signum); +UV_EXTERN int uv_kill(int pid, int signum); + + +/* + * uv_work_t is a subclass of uv_req_t. + */ +struct uv_work_s { + UV_REQ_FIELDS + uv_loop_t* loop; + uv_work_cb work_cb; + uv_after_work_cb after_work_cb; + UV_WORK_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_queue_work(uv_loop_t* loop, + uv_work_t* req, + uv_work_cb work_cb, + uv_after_work_cb after_work_cb); + +UV_EXTERN int uv_cancel(uv_req_t* req); + + +struct uv_cpu_info_s { + char* model; + int speed; + struct uv_cpu_times_s { + uint64_t user; + uint64_t nice; + uint64_t sys; + uint64_t idle; + uint64_t irq; + } cpu_times; +}; + +struct uv_interface_address_s { + char* name; + char phys_addr[6]; + int is_internal; + union { + struct sockaddr_in address4; + struct sockaddr_in6 address6; + } address; + union { + struct sockaddr_in netmask4; + struct sockaddr_in6 netmask6; + } netmask; +}; + +struct uv_passwd_s { + char* username; + long uid; + long gid; + char* shell; + char* homedir; +}; + +typedef enum { + UV_DIRENT_UNKNOWN, + UV_DIRENT_FILE, + UV_DIRENT_DIR, + UV_DIRENT_LINK, + UV_DIRENT_FIFO, + UV_DIRENT_SOCKET, + UV_DIRENT_CHAR, + UV_DIRENT_BLOCK +} uv_dirent_type_t; + +struct uv_dirent_s { + const char* name; + uv_dirent_type_t type; +}; + +UV_EXTERN char** uv_setup_args(int argc, char** argv); +UV_EXTERN int uv_get_process_title(char* buffer, size_t size); +UV_EXTERN int uv_set_process_title(const char* title); +UV_EXTERN int uv_resident_set_memory(size_t* rss); +UV_EXTERN int uv_uptime(double* uptime); + +typedef struct { + long tv_sec; + long tv_usec; +} uv_timeval_t; + +typedef struct { + uv_timeval_t ru_utime; /* user CPU time used */ + uv_timeval_t ru_stime; /* system CPU time used */ + uint64_t ru_maxrss; /* maximum resident set size */ + uint64_t ru_ixrss; /* integral shared memory size */ + uint64_t ru_idrss; /* integral unshared data size */ + uint64_t ru_isrss; /* integral unshared stack size */ + uint64_t ru_minflt; /* page reclaims (soft page faults) */ + uint64_t ru_majflt; /* page faults (hard page faults) */ + uint64_t ru_nswap; /* swaps */ + uint64_t ru_inblock; /* block input operations */ + uint64_t ru_oublock; /* block output operations */ + uint64_t ru_msgsnd; /* IPC messages sent */ + uint64_t ru_msgrcv; /* IPC messages received */ + uint64_t ru_nsignals; /* signals received */ + uint64_t ru_nvcsw; /* voluntary context switches */ + uint64_t ru_nivcsw; /* involuntary context switches */ +} uv_rusage_t; + +UV_EXTERN int uv_getrusage(uv_rusage_t* rusage); + +UV_EXTERN int uv_os_homedir(char* buffer, size_t* size); +UV_EXTERN int uv_os_tmpdir(char* buffer, size_t* size); +UV_EXTERN int uv_os_get_passwd(uv_passwd_t* pwd); +UV_EXTERN void uv_os_free_passwd(uv_passwd_t* pwd); + +UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count); +UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count); + +UV_EXTERN int uv_interface_addresses(uv_interface_address_t** addresses, + int* count); +UV_EXTERN void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count); + + +typedef enum { + UV_FS_UNKNOWN = -1, + UV_FS_CUSTOM, + UV_FS_OPEN, + UV_FS_CLOSE, + UV_FS_READ, + UV_FS_WRITE, + UV_FS_SENDFILE, + UV_FS_STAT, + UV_FS_LSTAT, + UV_FS_FSTAT, + UV_FS_FTRUNCATE, + UV_FS_UTIME, + UV_FS_FUTIME, + UV_FS_ACCESS, + UV_FS_CHMOD, + UV_FS_FCHMOD, + UV_FS_FSYNC, + UV_FS_FDATASYNC, + UV_FS_UNLINK, + UV_FS_RMDIR, + UV_FS_MKDIR, + UV_FS_MKDTEMP, + UV_FS_RENAME, + UV_FS_SCANDIR, + UV_FS_LINK, + UV_FS_SYMLINK, + UV_FS_READLINK, + UV_FS_CHOWN, + UV_FS_FCHOWN, + UV_FS_REALPATH +} uv_fs_type; + +/* uv_fs_t is a subclass of uv_req_t. */ +struct uv_fs_s { + UV_REQ_FIELDS + uv_fs_type fs_type; + uv_loop_t* loop; + uv_fs_cb cb; + ssize_t result; + void* ptr; + const char* path; + uv_stat_t statbuf; /* Stores the result of uv_fs_stat() and uv_fs_fstat(). */ + UV_FS_PRIVATE_FIELDS +}; + +UV_EXTERN void uv_fs_req_cleanup(uv_fs_t* req); +UV_EXTERN int uv_fs_close(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_fs_cb cb); +UV_EXTERN int uv_fs_open(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + int mode, + uv_fs_cb cb); +UV_EXTERN int uv_fs_read(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + const uv_buf_t bufs[], + unsigned int nbufs, + int64_t offset, + uv_fs_cb cb); +UV_EXTERN int uv_fs_unlink(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_write(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + const uv_buf_t bufs[], + unsigned int nbufs, + int64_t offset, + uv_fs_cb cb); +UV_EXTERN int uv_fs_mkdir(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int mode, + uv_fs_cb cb); +UV_EXTERN int uv_fs_mkdtemp(uv_loop_t* loop, + uv_fs_t* req, + const char* tpl, + uv_fs_cb cb); +UV_EXTERN int uv_fs_rmdir(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_scandir(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + uv_fs_cb cb); +UV_EXTERN int uv_fs_scandir_next(uv_fs_t* req, + uv_dirent_t* ent); +UV_EXTERN int uv_fs_stat(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_fstat(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_fs_cb cb); +UV_EXTERN int uv_fs_rename(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_fsync(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_fs_cb cb); +UV_EXTERN int uv_fs_fdatasync(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_fs_cb cb); +UV_EXTERN int uv_fs_ftruncate(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + int64_t offset, + uv_fs_cb cb); +UV_EXTERN int uv_fs_sendfile(uv_loop_t* loop, + uv_fs_t* req, + uv_file out_fd, + uv_file in_fd, + int64_t in_offset, + size_t length, + uv_fs_cb cb); +UV_EXTERN int uv_fs_access(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int mode, + uv_fs_cb cb); +UV_EXTERN int uv_fs_chmod(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int mode, + uv_fs_cb cb); +UV_EXTERN int uv_fs_utime(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + double atime, + double mtime, + uv_fs_cb cb); +UV_EXTERN int uv_fs_futime(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + double atime, + double mtime, + uv_fs_cb cb); +UV_EXTERN int uv_fs_lstat(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_link(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + uv_fs_cb cb); + +/* + * This flag can be used with uv_fs_symlink() on Windows to specify whether + * path argument points to a directory. + */ +#define UV_FS_SYMLINK_DIR 0x0001 + +/* + * This flag can be used with uv_fs_symlink() on Windows to specify whether + * the symlink is to be created using junction points. + */ +#define UV_FS_SYMLINK_JUNCTION 0x0002 + +UV_EXTERN int uv_fs_symlink(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + int flags, + uv_fs_cb cb); +UV_EXTERN int uv_fs_readlink(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_realpath(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb); +UV_EXTERN int uv_fs_fchmod(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + int mode, + uv_fs_cb cb); +UV_EXTERN int uv_fs_chown(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_uid_t uid, + uv_gid_t gid, + uv_fs_cb cb); +UV_EXTERN int uv_fs_fchown(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_uid_t uid, + uv_gid_t gid, + uv_fs_cb cb); + + +enum uv_fs_event { + UV_RENAME = 1, + UV_CHANGE = 2 +}; + + +struct uv_fs_event_s { + UV_HANDLE_FIELDS + /* private */ + char* path; + UV_FS_EVENT_PRIVATE_FIELDS +}; + + +/* + * uv_fs_stat() based polling file watcher. + */ +struct uv_fs_poll_s { + UV_HANDLE_FIELDS + /* Private, don't touch. */ + void* poll_ctx; +}; + +UV_EXTERN int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle); +UV_EXTERN int uv_fs_poll_start(uv_fs_poll_t* handle, + uv_fs_poll_cb poll_cb, + const char* path, + unsigned int interval); +UV_EXTERN int uv_fs_poll_stop(uv_fs_poll_t* handle); +UV_EXTERN int uv_fs_poll_getpath(uv_fs_poll_t* handle, + char* buffer, + size_t* size); + + +struct uv_signal_s { + UV_HANDLE_FIELDS + uv_signal_cb signal_cb; + int signum; + UV_SIGNAL_PRIVATE_FIELDS +}; + +UV_EXTERN int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle); +UV_EXTERN int uv_signal_start(uv_signal_t* handle, + uv_signal_cb signal_cb, + int signum); +UV_EXTERN int uv_signal_stop(uv_signal_t* handle); + +UV_EXTERN void uv_loadavg(double avg[3]); + + +/* + * Flags to be passed to uv_fs_event_start(). + */ +enum uv_fs_event_flags { + /* + * By default, if the fs event watcher is given a directory name, we will + * watch for all events in that directory. This flags overrides this behavior + * and makes fs_event report only changes to the directory entry itself. This + * flag does not affect individual files watched. + * This flag is currently not implemented yet on any backend. + */ + UV_FS_EVENT_WATCH_ENTRY = 1, + + /* + * By default uv_fs_event will try to use a kernel interface such as inotify + * or kqueue to detect events. This may not work on remote filesystems such + * as NFS mounts. This flag makes fs_event fall back to calling stat() on a + * regular interval. + * This flag is currently not implemented yet on any backend. + */ + UV_FS_EVENT_STAT = 2, + + /* + * By default, event watcher, when watching directory, is not registering + * (is ignoring) changes in it's subdirectories. + * This flag will override this behaviour on platforms that support it. + */ + UV_FS_EVENT_RECURSIVE = 4 +}; + + +UV_EXTERN int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle); +UV_EXTERN int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* path, + unsigned int flags); +UV_EXTERN int uv_fs_event_stop(uv_fs_event_t* handle); +UV_EXTERN int uv_fs_event_getpath(uv_fs_event_t* handle, + char* buffer, + size_t* size); + +UV_EXTERN int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr); +UV_EXTERN int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr); + +UV_EXTERN int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size); +UV_EXTERN int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size); + +UV_EXTERN int uv_inet_ntop(int af, const void* src, char* dst, size_t size); +UV_EXTERN int uv_inet_pton(int af, const char* src, void* dst); + +UV_EXTERN int uv_exepath(char* buffer, size_t* size); + +UV_EXTERN int uv_cwd(char* buffer, size_t* size); + +UV_EXTERN int uv_chdir(const char* dir); + +UV_EXTERN uint64_t uv_get_free_memory(void); +UV_EXTERN uint64_t uv_get_total_memory(void); + +UV_EXTERN uint64_t uv_hrtime(void); + +UV_EXTERN void uv_disable_stdio_inheritance(void); + +UV_EXTERN int uv_dlopen(const char* filename, uv_lib_t* lib); +UV_EXTERN void uv_dlclose(uv_lib_t* lib); +UV_EXTERN int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr); +UV_EXTERN const char* uv_dlerror(const uv_lib_t* lib); + +UV_EXTERN int uv_mutex_init(uv_mutex_t* handle); +UV_EXTERN void uv_mutex_destroy(uv_mutex_t* handle); +UV_EXTERN void uv_mutex_lock(uv_mutex_t* handle); +UV_EXTERN int uv_mutex_trylock(uv_mutex_t* handle); +UV_EXTERN void uv_mutex_unlock(uv_mutex_t* handle); + +UV_EXTERN int uv_rwlock_init(uv_rwlock_t* rwlock); +UV_EXTERN void uv_rwlock_destroy(uv_rwlock_t* rwlock); +UV_EXTERN void uv_rwlock_rdlock(uv_rwlock_t* rwlock); +UV_EXTERN int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock); +UV_EXTERN void uv_rwlock_rdunlock(uv_rwlock_t* rwlock); +UV_EXTERN void uv_rwlock_wrlock(uv_rwlock_t* rwlock); +UV_EXTERN int uv_rwlock_trywrlock(uv_rwlock_t* rwlock); +UV_EXTERN void uv_rwlock_wrunlock(uv_rwlock_t* rwlock); + +UV_EXTERN int uv_sem_init(uv_sem_t* sem, unsigned int value); +UV_EXTERN void uv_sem_destroy(uv_sem_t* sem); +UV_EXTERN void uv_sem_post(uv_sem_t* sem); +UV_EXTERN void uv_sem_wait(uv_sem_t* sem); +UV_EXTERN int uv_sem_trywait(uv_sem_t* sem); + +UV_EXTERN int uv_cond_init(uv_cond_t* cond); +UV_EXTERN void uv_cond_destroy(uv_cond_t* cond); +UV_EXTERN void uv_cond_signal(uv_cond_t* cond); +UV_EXTERN void uv_cond_broadcast(uv_cond_t* cond); + +UV_EXTERN int uv_barrier_init(uv_barrier_t* barrier, unsigned int count); +UV_EXTERN void uv_barrier_destroy(uv_barrier_t* barrier); +UV_EXTERN int uv_barrier_wait(uv_barrier_t* barrier); + +UV_EXTERN void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex); +UV_EXTERN int uv_cond_timedwait(uv_cond_t* cond, + uv_mutex_t* mutex, + uint64_t timeout); + +UV_EXTERN void uv_once(uv_once_t* guard, void (*callback)(void)); + +UV_EXTERN int uv_key_create(uv_key_t* key); +UV_EXTERN void uv_key_delete(uv_key_t* key); +UV_EXTERN void* uv_key_get(uv_key_t* key); +UV_EXTERN void uv_key_set(uv_key_t* key, void* value); + +typedef void (*uv_thread_cb)(void* arg); + +UV_EXTERN int uv_thread_create(uv_thread_t* tid, uv_thread_cb entry, void* arg); +UV_EXTERN uv_thread_t uv_thread_self(void); +UV_EXTERN int uv_thread_join(uv_thread_t *tid); +UV_EXTERN int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2); + +/* The presence of these unions force similar struct layout. */ +#define XX(_, name) uv_ ## name ## _t name; +union uv_any_handle { + UV_HANDLE_TYPE_MAP(XX) +}; + +union uv_any_req { + UV_REQ_TYPE_MAP(XX) +}; +#undef XX + + +struct uv_loop_s { + /* User data - use this for whatever. */ + void* data; + /* Loop reference counting. */ + unsigned int active_handles; + void* handle_queue[2]; + void* active_reqs[2]; + /* Internal flag to signal loop stop. */ + unsigned int stop_flag; + UV_LOOP_PRIVATE_FIELDS +}; + + +/* Don't export the private CPP symbols. */ +#undef UV_HANDLE_TYPE_PRIVATE +#undef UV_REQ_TYPE_PRIVATE +#undef UV_REQ_PRIVATE_FIELDS +#undef UV_STREAM_PRIVATE_FIELDS +#undef UV_TCP_PRIVATE_FIELDS +#undef UV_PREPARE_PRIVATE_FIELDS +#undef UV_CHECK_PRIVATE_FIELDS +#undef UV_IDLE_PRIVATE_FIELDS +#undef UV_ASYNC_PRIVATE_FIELDS +#undef UV_TIMER_PRIVATE_FIELDS +#undef UV_GETADDRINFO_PRIVATE_FIELDS +#undef UV_GETNAMEINFO_PRIVATE_FIELDS +#undef UV_FS_REQ_PRIVATE_FIELDS +#undef UV_WORK_PRIVATE_FIELDS +#undef UV_FS_EVENT_PRIVATE_FIELDS +#undef UV_SIGNAL_PRIVATE_FIELDS +#undef UV_LOOP_PRIVATE_FIELDS +#undef UV_LOOP_PRIVATE_PLATFORM_FIELDS + +#ifdef __cplusplus +} +#endif +#endif /* UV_H */ diff --git a/deps/libuv/libuv.nsi b/deps/libuv/libuv.nsi new file mode 100644 index 00000000..159756e1 --- /dev/null +++ b/deps/libuv/libuv.nsi @@ -0,0 +1,86 @@ +; NSIS installer script for libuv + +!include "MUI2.nsh" + +Name "libuv" +OutFile "libuv-${ARCH}-${VERSION}.exe" + +!include "x64.nsh" +# Default install location, for 32-bit files +InstallDir "$PROGRAMFILES\libuv" + +# Override install and registry locations if this is a 64-bit install. +function .onInit + ${If} ${ARCH} == "x64" + SetRegView 64 + StrCpy $INSTDIR "$PROGRAMFILES64\libuv" + ${EndIf} +functionEnd + +;-------------------------------- +; Installer pages +!insertmacro MUI_PAGE_WELCOME +!insertmacro MUI_PAGE_DIRECTORY +!insertmacro MUI_PAGE_INSTFILES +!insertmacro MUI_PAGE_FINISH + + +;-------------------------------- +; Uninstaller pages +!insertmacro MUI_UNPAGE_WELCOME +!insertmacro MUI_UNPAGE_CONFIRM +!insertmacro MUI_UNPAGE_INSTFILES +!insertmacro MUI_UNPAGE_FINISH + +;-------------------------------- +; Languages +!insertmacro MUI_LANGUAGE "English" + +;-------------------------------- +; Installer sections + +Section "Files" SecInstall + SectionIn RO + SetOutPath "$INSTDIR" + File "Release\*.dll" + File "Release\*.lib" + File "LICENSE" + File "README.md" + + SetOutPath "$INSTDIR\include" + File "include\uv.h" + File "include\uv-errno.h" + File "include\uv-threadpool.h" + File "include\uv-version.h" + File "include\uv-win.h" + File "include\tree.h" + + WriteUninstaller "$INSTDIR\Uninstall.exe" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "DisplayName" "libuv-${ARCH}-${VERSION}" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "UninstallString" "$\"$INSTDIR\Uninstall.exe$\"" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "QuietUninstallString" "$\"$INSTDIR\Uninstall.exe$\" /S" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "HelpLink" "http://libuv.org/" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "URLInfoAbout" "http://libuv.org/" + WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "DisplayVersion" "${VERSION}" + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "NoModify" "1" + WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "NoRepair" "1" +SectionEnd + +Section "Uninstall" + Delete "$INSTDIR\libuv.dll" + Delete "$INSTDIR\libuv.lib" + Delete "$INSTDIR\LICENSE" + Delete "$INSTDIR\README.md" + + Delete "$INSTDIR\include\uv.h" + Delete "$INSTDIR\include\uv-errno.h" + Delete "$INSTDIR\include\uv-threadpool.h" + Delete "$INSTDIR\include\uv-version.h" + Delete "$INSTDIR\include\uv-win.h" + Delete "$INSTDIR\include\tree.h" + + Delete "$INSTDIR\Uninstall.exe" + RMDir "$INSTDIR" + DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" +SectionEnd + diff --git a/deps/libuv/libuv.pc.in b/deps/libuv/libuv.pc.in new file mode 100644 index 00000000..9174fe15 --- /dev/null +++ b/deps/libuv/libuv.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=${prefix} +libdir=@libdir@ +includedir=@includedir@ + +Name: @PACKAGE_NAME@ +Version: @PACKAGE_VERSION@ +Description: multi-platform support library with a focus on asynchronous I/O. + +Libs: -L${libdir} -luv @LIBS@ +Cflags: -I${includedir} diff --git a/deps/libuv/m4/.gitignore b/deps/libuv/m4/.gitignore new file mode 100644 index 00000000..c44e4c29 --- /dev/null +++ b/deps/libuv/m4/.gitignore @@ -0,0 +1,4 @@ +# Ignore libtoolize-generated files. +*.m4 +!as_case.m4 +!libuv-check-flags.m4 diff --git a/deps/libuv/m4/as_case.m4 b/deps/libuv/m4/as_case.m4 new file mode 100644 index 00000000..c7ae0f0f --- /dev/null +++ b/deps/libuv/m4/as_case.m4 @@ -0,0 +1,21 @@ +# AS_CASE(WORD, [PATTERN1], [IF-MATCHED1]...[DEFAULT]) +# ---------------------------------------------------- +# Expand into +# | case WORD in +# | PATTERN1) IF-MATCHED1 ;; +# | ... +# | *) DEFAULT ;; +# | esac +m4_define([_AS_CASE], +[m4_if([$#], 0, [m4_fatal([$0: too few arguments: $#])], + [$#], 1, [ *) $1 ;;], + [$#], 2, [ $1) m4_default([$2], [:]) ;;], + [ $1) m4_default([$2], [:]) ;; +$0(m4_shiftn(2, $@))])dnl +]) +m4_defun([AS_CASE], +[m4_ifval([$2$3], +[case $1 in +_AS_CASE(m4_shift($@)) +esac])]) + diff --git a/deps/libuv/m4/libuv-check-flags.m4 b/deps/libuv/m4/libuv-check-flags.m4 new file mode 100644 index 00000000..59c30635 --- /dev/null +++ b/deps/libuv/m4/libuv-check-flags.m4 @@ -0,0 +1,319 @@ +dnl Macros to check the presence of generic (non-typed) symbols. +dnl Copyright (c) 2006-2008 Diego Pettenà +dnl Copyright (c) 2006-2008 xine project +dnl +dnl This program is free software; you can redistribute it and/or modify +dnl it under the terms of the GNU General Public License as published by +dnl the Free Software Foundation; either version 3, or (at your option) +dnl any later version. +dnl +dnl This program is distributed in the hope that it will be useful, +dnl but WITHOUT ANY WARRANTY; without even the implied warranty of +dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +dnl GNU General Public License for more details. +dnl +dnl You should have received a copy of the GNU General Public License +dnl along with this program; if not, write to the Free Software +dnl Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +dnl 02110-1301, USA. +dnl +dnl As a special exception, the copyright owners of the +dnl macro gives unlimited permission to copy, distribute and modify the +dnl configure scripts that are the output of Autoconf when processing the +dnl Macro. You need not follow the terms of the GNU General Public +dnl License when using or distributing such scripts, even though portions +dnl of the text of the Macro appear in them. The GNU General Public +dnl License (GPL) does govern all other use of the material that +dnl constitutes the Autoconf Macro. +dnl +dnl This special exception to the GPL applies to versions of the +dnl Autoconf Macro released by this project. When you make and +dnl distribute a modified version of the Autoconf Macro, you may extend +dnl this special exception to the GPL to apply to your modified version as +dnl well. + +dnl Check if the flag is supported by compiler +dnl CC_CHECK_CFLAGS_SILENT([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND]) + +AC_DEFUN([CC_CHECK_CFLAGS_SILENT], [ + AC_CACHE_VAL(AS_TR_SH([cc_cv_cflags_$1]), + [ac_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $1" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([int a;])], + [eval "AS_TR_SH([cc_cv_cflags_$1])='yes'"], + [eval "AS_TR_SH([cc_cv_cflags_$1])='no'"]) + CFLAGS="$ac_save_CFLAGS" + ]) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes], + [$2], [$3]) +]) + +dnl Check if the flag is supported by compiler (cacheable) +dnl CC_CHECK_CFLAGS([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND]) + +AC_DEFUN([CC_CHECK_CFLAGS], [ + AC_CACHE_CHECK([if $CC supports $1 flag], + AS_TR_SH([cc_cv_cflags_$1]), + CC_CHECK_CFLAGS_SILENT([$1]) dnl Don't execute actions here! + ) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes], + [$2], [$3]) +]) + +dnl CC_CHECK_CFLAG_APPEND(FLAG, [action-if-found], [action-if-not-found]) +dnl Check for CFLAG and appends them to CFLAGS if supported +AC_DEFUN([CC_CHECK_CFLAG_APPEND], [ + AC_CACHE_CHECK([if $CC supports $1 flag], + AS_TR_SH([cc_cv_cflags_$1]), + CC_CHECK_CFLAGS_SILENT([$1]) dnl Don't execute actions here! + ) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes], + [CFLAGS="$CFLAGS $1"; DEBUG_CFLAGS="$DEBUG_CFLAGS $1"; $2], [$3]) +]) + +dnl CC_CHECK_CFLAGS_APPEND([FLAG1 FLAG2], [action-if-found], [action-if-not]) +AC_DEFUN([CC_CHECK_CFLAGS_APPEND], [ + for flag in $1; do + CC_CHECK_CFLAG_APPEND($flag, [$2], [$3]) + done +]) + +dnl Check if the flag is supported by linker (cacheable) +dnl CC_CHECK_LDFLAGS([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND]) + +AC_DEFUN([CC_CHECK_LDFLAGS], [ + AC_CACHE_CHECK([if $CC supports $1 flag], + AS_TR_SH([cc_cv_ldflags_$1]), + [ac_save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $1" + AC_LANG_PUSH([C]) + AC_LINK_IFELSE([AC_LANG_SOURCE([int main() { return 1; }])], + [eval "AS_TR_SH([cc_cv_ldflags_$1])='yes'"], + [eval "AS_TR_SH([cc_cv_ldflags_$1])="]) + AC_LANG_POP([C]) + LDFLAGS="$ac_save_LDFLAGS" + ]) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_ldflags_$1])[ = xyes], + [$2], [$3]) +]) + +dnl define the LDFLAGS_NOUNDEFINED variable with the correct value for +dnl the current linker to avoid undefined references in a shared object. +AC_DEFUN([CC_NOUNDEFINED], [ + dnl We check $host for which systems to enable this for. + AC_REQUIRE([AC_CANONICAL_HOST]) + + case $host in + dnl FreeBSD (et al.) does not complete linking for shared objects when pthreads + dnl are requested, as different implementations are present; to avoid problems + dnl use -Wl,-z,defs only for those platform not behaving this way. + *-freebsd* | *-openbsd*) ;; + *) + dnl First of all check for the --no-undefined variant of GNU ld. This allows + dnl for a much more readable commandline, so that people can understand what + dnl it does without going to look for what the heck -z defs does. + for possible_flags in "-Wl,--no-undefined" "-Wl,-z,defs"; do + CC_CHECK_LDFLAGS([$possible_flags], [LDFLAGS_NOUNDEFINED="$possible_flags"]) + break + done + ;; + esac + + AC_SUBST([LDFLAGS_NOUNDEFINED]) +]) + +dnl Check for a -Werror flag or equivalent. -Werror is the GCC +dnl and ICC flag that tells the compiler to treat all the warnings +dnl as fatal. We usually need this option to make sure that some +dnl constructs (like attributes) are not simply ignored. +dnl +dnl Other compilers don't support -Werror per se, but they support +dnl an equivalent flag: +dnl - Sun Studio compiler supports -errwarn=%all +AC_DEFUN([CC_CHECK_WERROR], [ + AC_CACHE_CHECK( + [for $CC way to treat warnings as errors], + [cc_cv_werror], + [CC_CHECK_CFLAGS_SILENT([-Werror], [cc_cv_werror=-Werror], + [CC_CHECK_CFLAGS_SILENT([-errwarn=%all], [cc_cv_werror=-errwarn=%all])]) + ]) +]) + +AC_DEFUN([CC_CHECK_ATTRIBUTE], [ + AC_REQUIRE([CC_CHECK_WERROR]) + AC_CACHE_CHECK([if $CC supports __attribute__(( ifelse([$2], , [$1], [$2]) ))], + AS_TR_SH([cc_cv_attribute_$1]), + [ac_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $cc_cv_werror" + AC_LANG_PUSH([C]) + AC_COMPILE_IFELSE([AC_LANG_SOURCE([$3])], + [eval "AS_TR_SH([cc_cv_attribute_$1])='yes'"], + [eval "AS_TR_SH([cc_cv_attribute_$1])='no'"]) + AC_LANG_POP([C]) + CFLAGS="$ac_save_CFLAGS" + ]) + + AS_IF([eval test x$]AS_TR_SH([cc_cv_attribute_$1])[ = xyes], + [AC_DEFINE( + AS_TR_CPP([SUPPORT_ATTRIBUTE_$1]), 1, + [Define this if the compiler supports __attribute__(( ifelse([$2], , [$1], [$2]) ))] + ) + $4], + [$5]) +]) + +AC_DEFUN([CC_ATTRIBUTE_CONSTRUCTOR], [ + CC_CHECK_ATTRIBUTE( + [constructor],, + [void __attribute__((constructor)) ctor() { int a; }], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_FORMAT], [ + CC_CHECK_ATTRIBUTE( + [format], [format(printf, n, n)], + [void __attribute__((format(printf, 1, 2))) printflike(const char *fmt, ...) { fmt = (void *)0; }], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_FORMAT_ARG], [ + CC_CHECK_ATTRIBUTE( + [format_arg], [format_arg(printf)], + [char *__attribute__((format_arg(1))) gettextlike(const char *fmt) { fmt = (void *)0; }], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_VISIBILITY], [ + CC_CHECK_ATTRIBUTE( + [visibility_$1], [visibility("$1")], + [void __attribute__((visibility("$1"))) $1_function() { }], + [$2], [$3]) +]) + +AC_DEFUN([CC_ATTRIBUTE_NONNULL], [ + CC_CHECK_ATTRIBUTE( + [nonnull], [nonnull()], + [void __attribute__((nonnull())) some_function(void *foo, void *bar) { foo = (void*)0; bar = (void*)0; }], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_UNUSED], [ + CC_CHECK_ATTRIBUTE( + [unused], , + [void some_function(void *foo, __attribute__((unused)) void *bar);], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_SENTINEL], [ + CC_CHECK_ATTRIBUTE( + [sentinel], , + [void some_function(void *foo, ...) __attribute__((sentinel));], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_DEPRECATED], [ + CC_CHECK_ATTRIBUTE( + [deprecated], , + [void some_function(void *foo, ...) __attribute__((deprecated));], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_ALIAS], [ + CC_CHECK_ATTRIBUTE( + [alias], [weak, alias], + [void other_function(void *foo) { } + void some_function(void *foo) __attribute__((weak, alias("other_function")));], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_MALLOC], [ + CC_CHECK_ATTRIBUTE( + [malloc], , + [void * __attribute__((malloc)) my_alloc(int n);], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_PACKED], [ + CC_CHECK_ATTRIBUTE( + [packed], , + [struct astructure { char a; int b; long c; void *d; } __attribute__((packed));], + [$1], [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_CONST], [ + CC_CHECK_ATTRIBUTE( + [const], , + [int __attribute__((const)) twopow(int n) { return 1 << n; } ], + [$1], [$2]) +]) + +AC_DEFUN([CC_FLAG_VISIBILITY], [ + AC_REQUIRE([CC_CHECK_WERROR]) + AC_CACHE_CHECK([if $CC supports -fvisibility=hidden], + [cc_cv_flag_visibility], + [cc_flag_visibility_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $cc_cv_werror" + CC_CHECK_CFLAGS_SILENT([-fvisibility=hidden], + cc_cv_flag_visibility='yes', + cc_cv_flag_visibility='no') + CFLAGS="$cc_flag_visibility_save_CFLAGS"]) + + AS_IF([test "x$cc_cv_flag_visibility" = "xyes"], + [AC_DEFINE([SUPPORT_FLAG_VISIBILITY], 1, + [Define this if the compiler supports the -fvisibility flag]) + $1], + [$2]) +]) + +AC_DEFUN([CC_FUNC_EXPECT], [ + AC_REQUIRE([CC_CHECK_WERROR]) + AC_CACHE_CHECK([if compiler has __builtin_expect function], + [cc_cv_func_expect], + [ac_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $cc_cv_werror" + AC_LANG_PUSH([C]) + AC_COMPILE_IFELSE([AC_LANG_SOURCE( + [int some_function() { + int a = 3; + return (int)__builtin_expect(a, 3); + }])], + [cc_cv_func_expect=yes], + [cc_cv_func_expect=no]) + AC_LANG_POP([C]) + CFLAGS="$ac_save_CFLAGS" + ]) + + AS_IF([test "x$cc_cv_func_expect" = "xyes"], + [AC_DEFINE([SUPPORT__BUILTIN_EXPECT], 1, + [Define this if the compiler supports __builtin_expect() function]) + $1], + [$2]) +]) + +AC_DEFUN([CC_ATTRIBUTE_ALIGNED], [ + AC_REQUIRE([CC_CHECK_WERROR]) + AC_CACHE_CHECK([highest __attribute__ ((aligned ())) supported], + [cc_cv_attribute_aligned], + [ac_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $cc_cv_werror" + AC_LANG_PUSH([C]) + for cc_attribute_align_try in 64 32 16 8 4 2; do + AC_COMPILE_IFELSE([AC_LANG_SOURCE([ + int main() { + static char c __attribute__ ((aligned($cc_attribute_align_try))) = 0; + return c; + }])], [cc_cv_attribute_aligned=$cc_attribute_align_try; break]) + done + AC_LANG_POP([C]) + CFLAGS="$ac_save_CFLAGS" + ]) + + if test "x$cc_cv_attribute_aligned" != "x"; then + AC_DEFINE_UNQUOTED([ATTRIBUTE_ALIGNED_MAX], [$cc_cv_attribute_aligned], + [Define the highest alignment supported]) + fi +]) \ No newline at end of file diff --git a/deps/libuv/src/fs-poll.c b/deps/libuv/src/fs-poll.c new file mode 100644 index 00000000..ee73d5a2 --- /dev/null +++ b/deps/libuv/src/fs-poll.c @@ -0,0 +1,256 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "uv-common.h" + +#include +#include +#include + +struct poll_ctx { + uv_fs_poll_t* parent_handle; /* NULL if parent has been stopped or closed */ + int busy_polling; + unsigned int interval; + uint64_t start_time; + uv_loop_t* loop; + uv_fs_poll_cb poll_cb; + uv_timer_t timer_handle; + uv_fs_t fs_req; /* TODO(bnoordhuis) mark fs_req internal */ + uv_stat_t statbuf; + char path[1]; /* variable length */ +}; + +static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b); +static void poll_cb(uv_fs_t* req); +static void timer_cb(uv_timer_t* timer); +static void timer_close_cb(uv_handle_t* handle); + +static uv_stat_t zero_statbuf; + + +int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle) { + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_POLL); + return 0; +} + + +int uv_fs_poll_start(uv_fs_poll_t* handle, + uv_fs_poll_cb cb, + const char* path, + unsigned int interval) { + struct poll_ctx* ctx; + uv_loop_t* loop; + size_t len; + int err; + + if (uv__is_active(handle)) + return 0; + + loop = handle->loop; + len = strlen(path); + ctx = uv__calloc(1, sizeof(*ctx) + len); + + if (ctx == NULL) + return UV_ENOMEM; + + ctx->loop = loop; + ctx->poll_cb = cb; + ctx->interval = interval ? interval : 1; + ctx->start_time = uv_now(loop); + ctx->parent_handle = handle; + memcpy(ctx->path, path, len + 1); + + err = uv_timer_init(loop, &ctx->timer_handle); + if (err < 0) + goto error; + + ctx->timer_handle.flags |= UV__HANDLE_INTERNAL; + uv__handle_unref(&ctx->timer_handle); + + err = uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb); + if (err < 0) + goto error; + + handle->poll_ctx = ctx; + uv__handle_start(handle); + + return 0; + +error: + uv__free(ctx); + return err; +} + + +int uv_fs_poll_stop(uv_fs_poll_t* handle) { + struct poll_ctx* ctx; + + if (!uv__is_active(handle)) + return 0; + + ctx = handle->poll_ctx; + assert(ctx != NULL); + assert(ctx->parent_handle != NULL); + ctx->parent_handle = NULL; + handle->poll_ctx = NULL; + + /* Close the timer if it's active. If it's inactive, there's a stat request + * in progress and poll_cb will take care of the cleanup. + */ + if (uv__is_active(&ctx->timer_handle)) + uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); + + uv__handle_stop(handle); + + return 0; +} + + +int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buffer, size_t* size) { + struct poll_ctx* ctx; + size_t required_len; + + if (!uv__is_active(handle)) { + *size = 0; + return UV_EINVAL; + } + + ctx = handle->poll_ctx; + assert(ctx != NULL); + + required_len = strlen(ctx->path); + if (required_len >= *size) { + *size = required_len + 1; + return UV_ENOBUFS; + } + + memcpy(buffer, ctx->path, required_len); + *size = required_len; + buffer[required_len] = '\0'; + + return 0; +} + + +void uv__fs_poll_close(uv_fs_poll_t* handle) { + uv_fs_poll_stop(handle); +} + + +static void timer_cb(uv_timer_t* timer) { + struct poll_ctx* ctx; + + ctx = container_of(timer, struct poll_ctx, timer_handle); + assert(ctx->parent_handle != NULL); + assert(ctx->parent_handle->poll_ctx == ctx); + ctx->start_time = uv_now(ctx->loop); + + if (uv_fs_stat(ctx->loop, &ctx->fs_req, ctx->path, poll_cb)) + abort(); +} + + +static void poll_cb(uv_fs_t* req) { + uv_stat_t* statbuf; + struct poll_ctx* ctx; + uint64_t interval; + + ctx = container_of(req, struct poll_ctx, fs_req); + + if (ctx->parent_handle == NULL) { /* handle has been stopped or closed */ + uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); + uv_fs_req_cleanup(req); + return; + } + + if (req->result != 0) { + if (ctx->busy_polling != req->result) { + ctx->poll_cb(ctx->parent_handle, + req->result, + &ctx->statbuf, + &zero_statbuf); + ctx->busy_polling = req->result; + } + goto out; + } + + statbuf = &req->statbuf; + + if (ctx->busy_polling != 0) + if (ctx->busy_polling < 0 || !statbuf_eq(&ctx->statbuf, statbuf)) + ctx->poll_cb(ctx->parent_handle, 0, &ctx->statbuf, statbuf); + + ctx->statbuf = *statbuf; + ctx->busy_polling = 1; + +out: + uv_fs_req_cleanup(req); + + if (ctx->parent_handle == NULL) { /* handle has been stopped by callback */ + uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb); + return; + } + + /* Reschedule timer, subtract the delay from doing the stat(). */ + interval = ctx->interval; + interval -= (uv_now(ctx->loop) - ctx->start_time) % interval; + + if (uv_timer_start(&ctx->timer_handle, timer_cb, interval, 0)) + abort(); +} + + +static void timer_close_cb(uv_handle_t* handle) { + uv__free(container_of(handle, struct poll_ctx, timer_handle)); +} + + +static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b) { + return a->st_ctim.tv_nsec == b->st_ctim.tv_nsec + && a->st_mtim.tv_nsec == b->st_mtim.tv_nsec + && a->st_birthtim.tv_nsec == b->st_birthtim.tv_nsec + && a->st_ctim.tv_sec == b->st_ctim.tv_sec + && a->st_mtim.tv_sec == b->st_mtim.tv_sec + && a->st_birthtim.tv_sec == b->st_birthtim.tv_sec + && a->st_size == b->st_size + && a->st_mode == b->st_mode + && a->st_uid == b->st_uid + && a->st_gid == b->st_gid + && a->st_ino == b->st_ino + && a->st_dev == b->st_dev + && a->st_flags == b->st_flags + && a->st_gen == b->st_gen; +} + + +#if defined(_WIN32) + +#include "win/internal.h" +#include "win/handle-inl.h" + +void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle) { + assert(handle->flags & UV__HANDLE_CLOSING); + assert(!(handle->flags & UV_HANDLE_CLOSED)); + uv__handle_close(handle); +} + +#endif /* _WIN32 */ diff --git a/deps/libuv/src/heap-inl.h b/deps/libuv/src/heap-inl.h new file mode 100644 index 00000000..1e2ed60e --- /dev/null +++ b/deps/libuv/src/heap-inl.h @@ -0,0 +1,245 @@ +/* Copyright (c) 2013, Ben Noordhuis + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef UV_SRC_HEAP_H_ +#define UV_SRC_HEAP_H_ + +#include /* NULL */ + +#if defined(__GNUC__) +# define HEAP_EXPORT(declaration) __attribute__((unused)) static declaration +#else +# define HEAP_EXPORT(declaration) static declaration +#endif + +struct heap_node { + struct heap_node* left; + struct heap_node* right; + struct heap_node* parent; +}; + +/* A binary min heap. The usual properties hold: the root is the lowest + * element in the set, the height of the tree is at most log2(nodes) and + * it's always a complete binary tree. + * + * The heap function try hard to detect corrupted tree nodes at the cost + * of a minor reduction in performance. Compile with -DNDEBUG to disable. + */ +struct heap { + struct heap_node* min; + unsigned int nelts; +}; + +/* Return non-zero if a < b. */ +typedef int (*heap_compare_fn)(const struct heap_node* a, + const struct heap_node* b); + +/* Public functions. */ +HEAP_EXPORT(void heap_init(struct heap* heap)); +HEAP_EXPORT(struct heap_node* heap_min(const struct heap* heap)); +HEAP_EXPORT(void heap_insert(struct heap* heap, + struct heap_node* newnode, + heap_compare_fn less_than)); +HEAP_EXPORT(void heap_remove(struct heap* heap, + struct heap_node* node, + heap_compare_fn less_than)); +HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than)); + +/* Implementation follows. */ + +HEAP_EXPORT(void heap_init(struct heap* heap)) { + heap->min = NULL; + heap->nelts = 0; +} + +HEAP_EXPORT(struct heap_node* heap_min(const struct heap* heap)) { + return heap->min; +} + +/* Swap parent with child. Child moves closer to the root, parent moves away. */ +static void heap_node_swap(struct heap* heap, + struct heap_node* parent, + struct heap_node* child) { + struct heap_node* sibling; + struct heap_node t; + + t = *parent; + *parent = *child; + *child = t; + + parent->parent = child; + if (child->left == child) { + child->left = parent; + sibling = child->right; + } else { + child->right = parent; + sibling = child->left; + } + if (sibling != NULL) + sibling->parent = child; + + if (parent->left != NULL) + parent->left->parent = parent; + if (parent->right != NULL) + parent->right->parent = parent; + + if (child->parent == NULL) + heap->min = child; + else if (child->parent->left == parent) + child->parent->left = child; + else + child->parent->right = child; +} + +HEAP_EXPORT(void heap_insert(struct heap* heap, + struct heap_node* newnode, + heap_compare_fn less_than)) { + struct heap_node** parent; + struct heap_node** child; + unsigned int path; + unsigned int n; + unsigned int k; + + newnode->left = NULL; + newnode->right = NULL; + newnode->parent = NULL; + + /* Calculate the path from the root to the insertion point. This is a min + * heap so we always insert at the left-most free node of the bottom row. + */ + path = 0; + for (k = 0, n = 1 + heap->nelts; n >= 2; k += 1, n /= 2) + path = (path << 1) | (n & 1); + + /* Now traverse the heap using the path we calculated in the previous step. */ + parent = child = &heap->min; + while (k > 0) { + parent = child; + if (path & 1) + child = &(*child)->right; + else + child = &(*child)->left; + path >>= 1; + k -= 1; + } + + /* Insert the new node. */ + newnode->parent = *parent; + *child = newnode; + heap->nelts += 1; + + /* Walk up the tree and check at each node if the heap property holds. + * It's a min heap so parent < child must be true. + */ + while (newnode->parent != NULL && less_than(newnode, newnode->parent)) + heap_node_swap(heap, newnode->parent, newnode); +} + +HEAP_EXPORT(void heap_remove(struct heap* heap, + struct heap_node* node, + heap_compare_fn less_than)) { + struct heap_node* smallest; + struct heap_node** max; + struct heap_node* child; + unsigned int path; + unsigned int k; + unsigned int n; + + if (heap->nelts == 0) + return; + + /* Calculate the path from the min (the root) to the max, the left-most node + * of the bottom row. + */ + path = 0; + for (k = 0, n = heap->nelts; n >= 2; k += 1, n /= 2) + path = (path << 1) | (n & 1); + + /* Now traverse the heap using the path we calculated in the previous step. */ + max = &heap->min; + while (k > 0) { + if (path & 1) + max = &(*max)->right; + else + max = &(*max)->left; + path >>= 1; + k -= 1; + } + + heap->nelts -= 1; + + /* Unlink the max node. */ + child = *max; + *max = NULL; + + if (child == node) { + /* We're removing either the max or the last node in the tree. */ + if (child == heap->min) { + heap->min = NULL; + } + return; + } + + /* Replace the to be deleted node with the max node. */ + child->left = node->left; + child->right = node->right; + child->parent = node->parent; + + if (child->left != NULL) { + child->left->parent = child; + } + + if (child->right != NULL) { + child->right->parent = child; + } + + if (node->parent == NULL) { + heap->min = child; + } else if (node->parent->left == node) { + node->parent->left = child; + } else { + node->parent->right = child; + } + + /* Walk down the subtree and check at each node if the heap property holds. + * It's a min heap so parent < child must be true. If the parent is bigger, + * swap it with the smallest child. + */ + for (;;) { + smallest = child; + if (child->left != NULL && less_than(child->left, smallest)) + smallest = child->left; + if (child->right != NULL && less_than(child->right, smallest)) + smallest = child->right; + if (smallest == child) + break; + heap_node_swap(heap, child, smallest); + } + + /* Walk up the subtree and check that each parent is less than the node + * this is required, because `max` node is not guaranteed to be the + * actual maximum in tree + */ + while (child->parent != NULL && less_than(child, child->parent)) + heap_node_swap(heap, child->parent, child); +} + +HEAP_EXPORT(void heap_dequeue(struct heap* heap, heap_compare_fn less_than)) { + heap_remove(heap, heap->min, less_than); +} + +#undef HEAP_EXPORT + +#endif /* UV_SRC_HEAP_H_ */ diff --git a/deps/libuv/src/inet.c b/deps/libuv/src/inet.c new file mode 100644 index 00000000..da63a688 --- /dev/null +++ b/deps/libuv/src/inet.c @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC") + * Copyright (c) 1996-1999 by Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#if defined(_MSC_VER) && _MSC_VER < 1600 +# include "stdint-msvc2008.h" +#else +# include +#endif + +#include "uv.h" +#include "uv-common.h" + +#define UV__INET_ADDRSTRLEN 16 +#define UV__INET6_ADDRSTRLEN 46 + + +static int inet_ntop4(const unsigned char *src, char *dst, size_t size); +static int inet_ntop6(const unsigned char *src, char *dst, size_t size); +static int inet_pton4(const char *src, unsigned char *dst); +static int inet_pton6(const char *src, unsigned char *dst); + + +int uv_inet_ntop(int af, const void* src, char* dst, size_t size) { + switch (af) { + case AF_INET: + return (inet_ntop4(src, dst, size)); + case AF_INET6: + return (inet_ntop6(src, dst, size)); + default: + return UV_EAFNOSUPPORT; + } + /* NOTREACHED */ +} + + +static int inet_ntop4(const unsigned char *src, char *dst, size_t size) { + static const char fmt[] = "%u.%u.%u.%u"; + char tmp[UV__INET_ADDRSTRLEN]; + int l; + + l = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]); + if (l <= 0 || (size_t) l >= size) { + return UV_ENOSPC; + } + strncpy(dst, tmp, size); + dst[size - 1] = '\0'; + return 0; +} + + +static int inet_ntop6(const unsigned char *src, char *dst, size_t size) { + /* + * Note that int32_t and int16_t need only be "at least" large enough + * to contain a value of the specified size. On some systems, like + * Crays, there is no such thing as an integer variable with 16 bits. + * Keep this in mind if you think this function should have been coded + * to use pointer overlays. All the world's not a VAX. + */ + char tmp[UV__INET6_ADDRSTRLEN], *tp; + struct { int base, len; } best, cur; + unsigned int words[sizeof(struct in6_addr) / sizeof(uint16_t)]; + int i; + + /* + * Preprocess: + * Copy the input (bytewise) array into a wordwise array. + * Find the longest run of 0x00's in src[] for :: shorthanding. + */ + memset(words, '\0', sizeof words); + for (i = 0; i < (int) sizeof(struct in6_addr); i++) + words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3)); + best.base = -1; + best.len = 0; + cur.base = -1; + cur.len = 0; + for (i = 0; i < (int) ARRAY_SIZE(words); i++) { + if (words[i] == 0) { + if (cur.base == -1) + cur.base = i, cur.len = 1; + else + cur.len++; + } else { + if (cur.base != -1) { + if (best.base == -1 || cur.len > best.len) + best = cur; + cur.base = -1; + } + } + } + if (cur.base != -1) { + if (best.base == -1 || cur.len > best.len) + best = cur; + } + if (best.base != -1 && best.len < 2) + best.base = -1; + + /* + * Format the result. + */ + tp = tmp; + for (i = 0; i < (int) ARRAY_SIZE(words); i++) { + /* Are we inside the best run of 0x00's? */ + if (best.base != -1 && i >= best.base && + i < (best.base + best.len)) { + if (i == best.base) + *tp++ = ':'; + continue; + } + /* Are we following an initial run of 0x00s or any real hex? */ + if (i != 0) + *tp++ = ':'; + /* Is this address an encapsulated IPv4? */ + if (i == 6 && best.base == 0 && (best.len == 6 || + (best.len == 7 && words[7] != 0x0001) || + (best.len == 5 && words[5] == 0xffff))) { + int err = inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp)); + if (err) + return err; + tp += strlen(tp); + break; + } + tp += sprintf(tp, "%x", words[i]); + } + /* Was it a trailing run of 0x00's? */ + if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words)) + *tp++ = ':'; + *tp++ = '\0'; + + /* + * Check for overflow, copy, and we're done. + */ + if ((size_t)(tp - tmp) > size) { + return UV_ENOSPC; + } + strcpy(dst, tmp); + return 0; +} + + +int uv_inet_pton(int af, const char* src, void* dst) { + if (src == NULL || dst == NULL) + return UV_EINVAL; + + switch (af) { + case AF_INET: + return (inet_pton4(src, dst)); + case AF_INET6: { + int len; + char tmp[UV__INET6_ADDRSTRLEN], *s, *p; + s = (char*) src; + p = strchr(src, '%'); + if (p != NULL) { + s = tmp; + len = p - src; + if (len > UV__INET6_ADDRSTRLEN-1) + return UV_EINVAL; + memcpy(s, src, len); + s[len] = '\0'; + } + return inet_pton6(s, dst); + } + default: + return UV_EAFNOSUPPORT; + } + /* NOTREACHED */ +} + + +static int inet_pton4(const char *src, unsigned char *dst) { + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[sizeof(struct in_addr)], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + if ((pch = strchr(digits, ch)) != NULL) { + unsigned int nw = *tp * 10 + (pch - digits); + + if (saw_digit && *tp == 0) + return UV_EINVAL; + if (nw > 255) + return UV_EINVAL; + *tp = nw; + if (!saw_digit) { + if (++octets > 4) + return UV_EINVAL; + saw_digit = 1; + } + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return UV_EINVAL; + *++tp = 0; + saw_digit = 0; + } else + return UV_EINVAL; + } + if (octets < 4) + return UV_EINVAL; + memcpy(dst, tmp, sizeof(struct in_addr)); + return 0; +} + + +static int inet_pton6(const char *src, unsigned char *dst) { + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[sizeof(struct in6_addr)], *tp, *endp, *colonp; + const char *xdigits, *curtok; + int ch, seen_xdigits; + unsigned int val; + + memset((tp = tmp), '\0', sizeof tmp); + endp = tp + sizeof tmp; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return UV_EINVAL; + curtok = src; + seen_xdigits = 0; + val = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + val <<= 4; + val |= (pch - xdigits); + if (++seen_xdigits > 4) + return UV_EINVAL; + continue; + } + if (ch == ':') { + curtok = src; + if (!seen_xdigits) { + if (colonp) + return UV_EINVAL; + colonp = tp; + continue; + } else if (*src == '\0') { + return UV_EINVAL; + } + if (tp + sizeof(uint16_t) > endp) + return UV_EINVAL; + *tp++ = (unsigned char) (val >> 8) & 0xff; + *tp++ = (unsigned char) val & 0xff; + seen_xdigits = 0; + val = 0; + continue; + } + if (ch == '.' && ((tp + sizeof(struct in_addr)) <= endp)) { + int err = inet_pton4(curtok, tp); + if (err == 0) { + tp += sizeof(struct in_addr); + seen_xdigits = 0; + break; /*%< '\\0' was seen by inet_pton4(). */ + } + } + return UV_EINVAL; + } + if (seen_xdigits) { + if (tp + sizeof(uint16_t) > endp) + return UV_EINVAL; + *tp++ = (unsigned char) (val >> 8) & 0xff; + *tp++ = (unsigned char) val & 0xff; + } + if (colonp != NULL) { + /* + * Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + if (tp == endp) + return UV_EINVAL; + for (i = 1; i <= n; i++) { + endp[- i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return UV_EINVAL; + memcpy(dst, tmp, sizeof tmp); + return 0; +} diff --git a/deps/libuv/src/queue.h b/deps/libuv/src/queue.h new file mode 100644 index 00000000..ff3540a0 --- /dev/null +++ b/deps/libuv/src/queue.h @@ -0,0 +1,108 @@ +/* Copyright (c) 2013, Ben Noordhuis + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef QUEUE_H_ +#define QUEUE_H_ + +#include + +typedef void *QUEUE[2]; + +/* Private macros. */ +#define QUEUE_NEXT(q) (*(QUEUE **) &((*(q))[0])) +#define QUEUE_PREV(q) (*(QUEUE **) &((*(q))[1])) +#define QUEUE_PREV_NEXT(q) (QUEUE_NEXT(QUEUE_PREV(q))) +#define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q))) + +/* Public macros. */ +#define QUEUE_DATA(ptr, type, field) \ + ((type *) ((char *) (ptr) - offsetof(type, field))) + +/* Important note: mutating the list while QUEUE_FOREACH is + * iterating over its elements results in undefined behavior. + */ +#define QUEUE_FOREACH(q, h) \ + for ((q) = QUEUE_NEXT(h); (q) != (h); (q) = QUEUE_NEXT(q)) + +#define QUEUE_EMPTY(q) \ + ((const QUEUE *) (q) == (const QUEUE *) QUEUE_NEXT(q)) + +#define QUEUE_HEAD(q) \ + (QUEUE_NEXT(q)) + +#define QUEUE_INIT(q) \ + do { \ + QUEUE_NEXT(q) = (q); \ + QUEUE_PREV(q) = (q); \ + } \ + while (0) + +#define QUEUE_ADD(h, n) \ + do { \ + QUEUE_PREV_NEXT(h) = QUEUE_NEXT(n); \ + QUEUE_NEXT_PREV(n) = QUEUE_PREV(h); \ + QUEUE_PREV(h) = QUEUE_PREV(n); \ + QUEUE_PREV_NEXT(h) = (h); \ + } \ + while (0) + +#define QUEUE_SPLIT(h, q, n) \ + do { \ + QUEUE_PREV(n) = QUEUE_PREV(h); \ + QUEUE_PREV_NEXT(n) = (n); \ + QUEUE_NEXT(n) = (q); \ + QUEUE_PREV(h) = QUEUE_PREV(q); \ + QUEUE_PREV_NEXT(h) = (h); \ + QUEUE_PREV(q) = (n); \ + } \ + while (0) + +#define QUEUE_MOVE(h, n) \ + do { \ + if (QUEUE_EMPTY(h)) \ + QUEUE_INIT(n); \ + else { \ + QUEUE* q = QUEUE_HEAD(h); \ + QUEUE_SPLIT(h, q, n); \ + } \ + } \ + while (0) + +#define QUEUE_INSERT_HEAD(h, q) \ + do { \ + QUEUE_NEXT(q) = QUEUE_NEXT(h); \ + QUEUE_PREV(q) = (h); \ + QUEUE_NEXT_PREV(q) = (q); \ + QUEUE_NEXT(h) = (q); \ + } \ + while (0) + +#define QUEUE_INSERT_TAIL(h, q) \ + do { \ + QUEUE_NEXT(q) = (h); \ + QUEUE_PREV(q) = QUEUE_PREV(h); \ + QUEUE_PREV_NEXT(q) = (q); \ + QUEUE_PREV(h) = (q); \ + } \ + while (0) + +#define QUEUE_REMOVE(q) \ + do { \ + QUEUE_PREV_NEXT(q) = QUEUE_NEXT(q); \ + QUEUE_NEXT_PREV(q) = QUEUE_PREV(q); \ + } \ + while (0) + +#endif /* QUEUE_H_ */ diff --git a/deps/libuv/src/threadpool.c b/deps/libuv/src/threadpool.c new file mode 100644 index 00000000..2c5152b4 --- /dev/null +++ b/deps/libuv/src/threadpool.c @@ -0,0 +1,303 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv-common.h" + +#if !defined(_WIN32) +# include "unix/internal.h" +#else +# include "win/req-inl.h" +/* TODO(saghul): unify internal req functions */ +static void uv__req_init(uv_loop_t* loop, + uv_req_t* req, + uv_req_type type) { + uv_req_init(loop, req); + req->type = type; + uv__req_register(loop, req); +} +# define uv__req_init(loop, req, type) \ + uv__req_init((loop), (uv_req_t*)(req), (type)) +#endif + +#include + +#define MAX_THREADPOOL_SIZE 128 + +static uv_once_t once = UV_ONCE_INIT; +static uv_cond_t cond; +static uv_mutex_t mutex; +static unsigned int idle_threads; +static unsigned int nthreads; +static uv_thread_t* threads; +static uv_thread_t default_threads[4]; +static QUEUE exit_message; +static QUEUE wq; +static volatile int initialized; + + +static void uv__cancelled(struct uv__work* w) { + abort(); +} + + +/* To avoid deadlock with uv_cancel() it's crucial that the worker + * never holds the global mutex and the loop-local mutex at the same time. + */ +static void worker(void* arg) { + struct uv__work* w; + QUEUE* q; + + (void) arg; + + for (;;) { + uv_mutex_lock(&mutex); + + while (QUEUE_EMPTY(&wq)) { + idle_threads += 1; + uv_cond_wait(&cond, &mutex); + idle_threads -= 1; + } + + q = QUEUE_HEAD(&wq); + + if (q == &exit_message) + uv_cond_signal(&cond); + else { + QUEUE_REMOVE(q); + QUEUE_INIT(q); /* Signal uv_cancel() that the work req is + executing. */ + } + + uv_mutex_unlock(&mutex); + + if (q == &exit_message) + break; + + w = QUEUE_DATA(q, struct uv__work, wq); + w->work(w); + + uv_mutex_lock(&w->loop->wq_mutex); + w->work = NULL; /* Signal uv_cancel() that the work req is done + executing. */ + QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq); + uv_async_send(&w->loop->wq_async); + uv_mutex_unlock(&w->loop->wq_mutex); + } +} + + +static void post(QUEUE* q) { + uv_mutex_lock(&mutex); + QUEUE_INSERT_TAIL(&wq, q); + if (idle_threads > 0) + uv_cond_signal(&cond); + uv_mutex_unlock(&mutex); +} + + +#ifndef _WIN32 +UV_DESTRUCTOR(static void cleanup(void)) { + unsigned int i; + + if (initialized == 0) + return; + + post(&exit_message); + + for (i = 0; i < nthreads; i++) + if (uv_thread_join(threads + i)) + abort(); + + if (threads != default_threads) + uv__free(threads); + + uv_mutex_destroy(&mutex); + uv_cond_destroy(&cond); + + threads = NULL; + nthreads = 0; + initialized = 0; +} +#endif + + +static void init_once(void) { + unsigned int i; + const char* val; + + nthreads = ARRAY_SIZE(default_threads); + val = getenv("UV_THREADPOOL_SIZE"); + if (val != NULL) + nthreads = atoi(val); + if (nthreads == 0) + nthreads = 1; + if (nthreads > MAX_THREADPOOL_SIZE) + nthreads = MAX_THREADPOOL_SIZE; + + threads = default_threads; + if (nthreads > ARRAY_SIZE(default_threads)) { + threads = uv__malloc(nthreads * sizeof(threads[0])); + if (threads == NULL) { + nthreads = ARRAY_SIZE(default_threads); + threads = default_threads; + } + } + + if (uv_cond_init(&cond)) + abort(); + + if (uv_mutex_init(&mutex)) + abort(); + + QUEUE_INIT(&wq); + + for (i = 0; i < nthreads; i++) + if (uv_thread_create(threads + i, worker, NULL)) + abort(); + + initialized = 1; +} + + +void uv__work_submit(uv_loop_t* loop, + struct uv__work* w, + void (*work)(struct uv__work* w), + void (*done)(struct uv__work* w, int status)) { + uv_once(&once, init_once); + w->loop = loop; + w->work = work; + w->done = done; + post(&w->wq); +} + + +static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { + int cancelled; + + uv_mutex_lock(&mutex); + uv_mutex_lock(&w->loop->wq_mutex); + + cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL; + if (cancelled) + QUEUE_REMOVE(&w->wq); + + uv_mutex_unlock(&w->loop->wq_mutex); + uv_mutex_unlock(&mutex); + + if (!cancelled) + return UV_EBUSY; + + w->work = uv__cancelled; + uv_mutex_lock(&loop->wq_mutex); + QUEUE_INSERT_TAIL(&loop->wq, &w->wq); + uv_async_send(&loop->wq_async); + uv_mutex_unlock(&loop->wq_mutex); + + return 0; +} + + +void uv__work_done(uv_async_t* handle) { + struct uv__work* w; + uv_loop_t* loop; + QUEUE* q; + QUEUE wq; + int err; + + loop = container_of(handle, uv_loop_t, wq_async); + uv_mutex_lock(&loop->wq_mutex); + QUEUE_MOVE(&loop->wq, &wq); + uv_mutex_unlock(&loop->wq_mutex); + + while (!QUEUE_EMPTY(&wq)) { + q = QUEUE_HEAD(&wq); + QUEUE_REMOVE(q); + + w = container_of(q, struct uv__work, wq); + err = (w->work == uv__cancelled) ? UV_ECANCELED : 0; + w->done(w, err); + } +} + + +static void uv__queue_work(struct uv__work* w) { + uv_work_t* req = container_of(w, uv_work_t, work_req); + + req->work_cb(req); +} + + +static void uv__queue_done(struct uv__work* w, int err) { + uv_work_t* req; + + req = container_of(w, uv_work_t, work_req); + uv__req_unregister(req->loop, req); + + if (req->after_work_cb == NULL) + return; + + req->after_work_cb(req, err); +} + + +int uv_queue_work(uv_loop_t* loop, + uv_work_t* req, + uv_work_cb work_cb, + uv_after_work_cb after_work_cb) { + if (work_cb == NULL) + return UV_EINVAL; + + uv__req_init(loop, req, UV_WORK); + req->loop = loop; + req->work_cb = work_cb; + req->after_work_cb = after_work_cb; + uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done); + return 0; +} + + +int uv_cancel(uv_req_t* req) { + struct uv__work* wreq; + uv_loop_t* loop; + + switch (req->type) { + case UV_FS: + loop = ((uv_fs_t*) req)->loop; + wreq = &((uv_fs_t*) req)->work_req; + break; + case UV_GETADDRINFO: + loop = ((uv_getaddrinfo_t*) req)->loop; + wreq = &((uv_getaddrinfo_t*) req)->work_req; + break; + case UV_GETNAMEINFO: + loop = ((uv_getnameinfo_t*) req)->loop; + wreq = &((uv_getnameinfo_t*) req)->work_req; + break; + case UV_WORK: + loop = ((uv_work_t*) req)->loop; + wreq = &((uv_work_t*) req)->work_req; + break; + default: + return UV_EINVAL; + } + + return uv__work_cancel(loop, req, wreq); +} diff --git a/deps/libuv/src/unix/aix.c b/deps/libuv/src/unix/aix.c new file mode 100644 index 00000000..652cd980 --- /dev/null +++ b/deps/libuv/src/unix/aix.c @@ -0,0 +1,1154 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#ifdef HAVE_SYS_AHAFS_EVPRODS_H +#include +#endif + +#include +#include +#include +#include +#include + +#define RDWR_BUF_SIZE 4096 +#define EQ(a,b) (strcmp(a,b) == 0) + +int uv__platform_loop_init(uv_loop_t* loop) { + loop->fs_fd = -1; + + /* Passing maxfd of -1 should mean the limit is determined + * by the user's ulimit or the global limit as per the doc */ + loop->backend_fd = pollset_create(-1); + + if (loop->backend_fd == -1) + return -1; + + return 0; +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { + if (loop->fs_fd != -1) { + uv__close(loop->fs_fd); + loop->fs_fd = -1; + } + + if (loop->backend_fd != -1) { + pollset_destroy(loop->backend_fd); + loop->backend_fd = -1; + } +} + + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + struct poll_ctl pc; + + pc.events = POLLIN; + pc.cmd = PS_MOD; /* Equivalent to PS_ADD if the fd is not in the pollset. */ + pc.fd = fd; + + if (pollset_ctl(loop->backend_fd, &pc, 1)) + return -errno; + + pc.cmd = PS_DELETE; + if (pollset_ctl(loop->backend_fd, &pc, 1)) + abort(); + + return 0; +} + + +void uv__io_poll(uv_loop_t* loop, int timeout) { + struct pollfd events[1024]; + struct pollfd pqry; + struct pollfd* pe; + struct poll_ctl pc; + QUEUE* q; + uv__io_t* w; + uint64_t base; + uint64_t diff; + int have_signals; + int nevents; + int count; + int nfds; + int i; + int rc; + int add_failed; + + if (loop->nfds == 0) { + assert(QUEUE_EMPTY(&loop->watcher_queue)); + return; + } + + while (!QUEUE_EMPTY(&loop->watcher_queue)) { + q = QUEUE_HEAD(&loop->watcher_queue); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + + w = QUEUE_DATA(q, uv__io_t, watcher_queue); + assert(w->pevents != 0); + assert(w->fd >= 0); + assert(w->fd < (int) loop->nwatchers); + + pc.events = w->pevents; + pc.fd = w->fd; + + add_failed = 0; + if (w->events == 0) { + pc.cmd = PS_ADD; + if (pollset_ctl(loop->backend_fd, &pc, 1)) { + if (errno != EINVAL) { + assert(0 && "Failed to add file descriptor (pc.fd) to pollset"); + abort(); + } + /* Check if the fd is already in the pollset */ + pqry.fd = pc.fd; + rc = pollset_query(loop->backend_fd, &pqry); + switch (rc) { + case -1: + assert(0 && "Failed to query pollset for file descriptor"); + abort(); + case 0: + assert(0 && "Pollset does not contain file descriptor"); + abort(); + } + /* If we got here then the pollset already contained the file descriptor even though + * we didn't think it should. This probably shouldn't happen, but we can continue. */ + add_failed = 1; + } + } + if (w->events != 0 || add_failed) { + /* Modify, potentially removing events -- need to delete then add. + * Could maybe mod if we knew for sure no events are removed, but + * content of w->events is handled above as not reliable (falls back) + * so may require a pollset_query() which would have to be pretty cheap + * compared to a PS_DELETE to be worth optimizing. Alternatively, could + * lazily remove events, squelching them in the mean time. */ + pc.cmd = PS_DELETE; + if (pollset_ctl(loop->backend_fd, &pc, 1)) { + assert(0 && "Failed to delete file descriptor (pc.fd) from pollset"); + abort(); + } + pc.cmd = PS_ADD; + if (pollset_ctl(loop->backend_fd, &pc, 1)) { + assert(0 && "Failed to add file descriptor (pc.fd) to pollset"); + abort(); + } + } + + w->events = w->pevents; + } + + assert(timeout >= -1); + base = loop->time; + count = 48; /* Benchmarks suggest this gives the best throughput. */ + + for (;;) { + nfds = pollset_poll(loop->backend_fd, + events, + ARRAY_SIZE(events), + timeout); + + /* Update loop->time unconditionally. It's tempting to skip the update when + * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the + * operating system didn't reschedule our process while in the syscall. + */ + SAVE_ERRNO(uv__update_time(loop)); + + if (nfds == 0) { + assert(timeout != -1); + return; + } + + if (nfds == -1) { + if (errno != EINTR) { + abort(); + } + + if (timeout == -1) + continue; + + if (timeout == 0) + return; + + /* Interrupted by a signal. Update timeout and poll again. */ + goto update_timeout; + } + + have_signals = 0; + nevents = 0; + + assert(loop->watchers != NULL); + loop->watchers[loop->nwatchers] = (void*) events; + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; + + for (i = 0; i < nfds; i++) { + pe = events + i; + pc.cmd = PS_DELETE; + pc.fd = pe->fd; + + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (pc.fd == -1) + continue; + + assert(pc.fd >= 0); + assert((unsigned) pc.fd < loop->nwatchers); + + w = loop->watchers[pc.fd]; + + if (w == NULL) { + /* File descriptor that we've stopped watching, disarm it. + * + * Ignore all errors because we may be racing with another thread + * when the file descriptor is closed. + */ + pollset_ctl(loop->backend_fd, &pc, 1); + continue; + } + + /* Run signal watchers last. This also affects child process watchers + * because those are implemented in terms of signal watchers. + */ + if (w == &loop->signal_io_watcher) + have_signals = 1; + else + w->cb(loop, w, pe->revents); + + nevents++; + } + + if (have_signals != 0) + loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); + + loop->watchers[loop->nwatchers] = NULL; + loop->watchers[loop->nwatchers + 1] = NULL; + + if (have_signals != 0) + return; /* Event loop should cycle now so don't poll again. */ + + if (nevents != 0) { + if (nfds == ARRAY_SIZE(events) && --count != 0) { + /* Poll for more events but don't block this time. */ + timeout = 0; + continue; + } + return; + } + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + +update_timeout: + assert(timeout > 0); + + diff = loop->time - base; + if (diff >= (uint64_t) timeout) + return; + + timeout -= diff; + } +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + uint64_t G = 1000000000; + timebasestruct_t t; + read_wall_time(&t, TIMEBASE_SZ); + time_base_to_time(&t, TIMEBASE_SZ); + return (uint64_t) t.tb_high * G + t.tb_low; +} + + +/* + * We could use a static buffer for the path manipulations that we need outside + * of the function, but this function could be called by multiple consumers and + * we don't want to potentially create a race condition in the use of snprintf. + * There is no direct way of getting the exe path in AIX - either through /procfs + * or through some libc APIs. The below approach is to parse the argv[0]'s pattern + * and use it in conjunction with PATH environment variable to craft one. + */ +int uv_exepath(char* buffer, size_t* size) { + int res; + char args[PATH_MAX]; + char abspath[PATH_MAX]; + size_t abspath_size; + struct procsinfo pi; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + pi.pi_pid = getpid(); + res = getargs(&pi, sizeof(pi), args, sizeof(args)); + if (res < 0) + return -EINVAL; + + /* + * Possibilities for args: + * i) an absolute path such as: /home/user/myprojects/nodejs/node + * ii) a relative path such as: ./node or ../myprojects/nodejs/node + * iii) a bare filename such as "node", after exporting PATH variable + * to its location. + */ + + /* Case i) and ii) absolute or relative paths */ + if (strchr(args, '/') != NULL) { + if (realpath(args, abspath) != abspath) + return -errno; + + abspath_size = strlen(abspath); + + *size -= 1; + if (*size > abspath_size) + *size = abspath_size; + + memcpy(buffer, abspath, *size); + buffer[*size] = '\0'; + + return 0; + } else { + /* Case iii). Search PATH environment variable */ + char trypath[PATH_MAX]; + char *clonedpath = NULL; + char *token = NULL; + char *path = getenv("PATH"); + + if (path == NULL) + return -EINVAL; + + clonedpath = uv__strdup(path); + if (clonedpath == NULL) + return -ENOMEM; + + token = strtok(clonedpath, ":"); + while (token != NULL) { + snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, args); + if (realpath(trypath, abspath) == abspath) { + /* Check the match is executable */ + if (access(abspath, X_OK) == 0) { + abspath_size = strlen(abspath); + + *size -= 1; + if (*size > abspath_size) + *size = abspath_size; + + memcpy(buffer, abspath, *size); + buffer[*size] = '\0'; + + uv__free(clonedpath); + return 0; + } + } + token = strtok(NULL, ":"); + } + uv__free(clonedpath); + + /* Out of tokens (path entries), and no match found */ + return -EINVAL; + } +} + + +uint64_t uv_get_free_memory(void) { + perfstat_memory_total_t mem_total; + int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1); + if (result == -1) { + return 0; + } + return mem_total.real_free * 4096; +} + + +uint64_t uv_get_total_memory(void) { + perfstat_memory_total_t mem_total; + int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1); + if (result == -1) { + return 0; + } + return mem_total.real_total * 4096; +} + + +void uv_loadavg(double avg[3]) { + perfstat_cpu_total_t ps_total; + int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1); + if (result == -1) { + avg[0] = 0.; avg[1] = 0.; avg[2] = 0.; + return; + } + avg[0] = ps_total.loadavg[0] / (double)(1 << SBITS); + avg[1] = ps_total.loadavg[1] / (double)(1 << SBITS); + avg[2] = ps_total.loadavg[2] / (double)(1 << SBITS); +} + + +#ifdef HAVE_SYS_AHAFS_EVPRODS_H +static char *uv__rawname(char *cp) { + static char rawbuf[FILENAME_MAX+1]; + char *dp = rindex(cp, '/'); + + if (dp == 0) + return 0; + + *dp = 0; + strcpy(rawbuf, cp); + *dp = '/'; + strcat(rawbuf, "/r"); + strcat(rawbuf, dp+1); + return rawbuf; +} + + +/* + * Determine whether given pathname is a directory + * Returns 0 if the path is a directory, -1 if not + * + * Note: Opportunity here for more detailed error information but + * that requires changing callers of this function as well + */ +static int uv__path_is_a_directory(char* filename) { + struct stat statbuf; + + if (stat(filename, &statbuf) < 0) + return -1; /* failed: not a directory, assume it is a file */ + + if (statbuf.st_type == VDIR) + return 0; + + return -1; +} + + +/* + * Check whether AHAFS is mounted. + * Returns 0 if AHAFS is mounted, or an error code < 0 on failure + */ +static int uv__is_ahafs_mounted(void){ + int rv, i = 2; + struct vmount *p; + int size_multiplier = 10; + size_t siz = sizeof(struct vmount)*size_multiplier; + struct vmount *vmt; + const char *dev = "/aha"; + char *obj, *stub; + + p = uv__malloc(siz); + if (p == NULL) + return -errno; + + /* Retrieve all mounted filesystems */ + rv = mntctl(MCTL_QUERY, siz, (char*)p); + if (rv < 0) + return -errno; + if (rv == 0) { + /* buffer was not large enough, reallocate to correct size */ + siz = *(int*)p; + uv__free(p); + p = uv__malloc(siz); + if (p == NULL) + return -errno; + rv = mntctl(MCTL_QUERY, siz, (char*)p); + if (rv < 0) + return -errno; + } + + /* Look for dev in filesystems mount info */ + for(vmt = p, i = 0; i < rv; i++) { + obj = vmt2dataptr(vmt, VMT_OBJECT); /* device */ + stub = vmt2dataptr(vmt, VMT_STUB); /* mount point */ + + if (EQ(obj, dev) || EQ(uv__rawname(obj), dev) || EQ(stub, dev)) { + uv__free(p); /* Found a match */ + return 0; + } + vmt = (struct vmount *) ((char *) vmt + vmt->vmt_length); + } + + /* /aha is required for monitoring filesystem changes */ + return -1; +} + +/* + * Recursive call to mkdir() to create intermediate folders, if any + * Returns code from mkdir call + */ +static int uv__makedir_p(const char *dir) { + char tmp[256]; + char *p = NULL; + size_t len; + int err; + + snprintf(tmp, sizeof(tmp),"%s",dir); + len = strlen(tmp); + if (tmp[len - 1] == '/') + tmp[len - 1] = 0; + for (p = tmp + 1; *p; p++) { + if (*p == '/') { + *p = 0; + err = mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); + if (err != 0 && errno != EEXIST) + return err; + *p = '/'; + } + } + return mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); +} + +/* + * Creates necessary subdirectories in the AIX Event Infrastructure + * file system for monitoring the object specified. + * Returns code from mkdir call + */ +static int uv__make_subdirs_p(const char *filename) { + char cmd[2048]; + char *p; + int rc = 0; + + /* Strip off the monitor file name */ + p = strrchr(filename, '/'); + + if (p == NULL) + return 0; + + if (uv__path_is_a_directory((char*)filename) == 0) { + sprintf(cmd, "/aha/fs/modDir.monFactory"); + } else { + sprintf(cmd, "/aha/fs/modFile.monFactory"); + } + + strncat(cmd, filename, (p - filename)); + rc = uv__makedir_p(cmd); + + if (rc == -1 && errno != EEXIST){ + return -errno; + } + + return rc; +} + + +/* + * Checks if /aha is mounted, then proceeds to set up the monitoring + * objects for the specified file. + * Returns 0 on success, or an error code < 0 on failure + */ +static int uv__setup_ahafs(const char* filename, int *fd) { + int rc = 0; + char mon_file_write_string[RDWR_BUF_SIZE]; + char mon_file[PATH_MAX]; + int file_is_directory = 0; /* -1 == NO, 0 == YES */ + + /* Create monitor file name for object */ + file_is_directory = uv__path_is_a_directory((char*)filename); + + if (file_is_directory == 0) + sprintf(mon_file, "/aha/fs/modDir.monFactory"); + else + sprintf(mon_file, "/aha/fs/modFile.monFactory"); + + if ((strlen(mon_file) + strlen(filename) + 5) > PATH_MAX) + return -ENAMETOOLONG; + + /* Make the necessary subdirectories for the monitor file */ + rc = uv__make_subdirs_p(filename); + if (rc == -1 && errno != EEXIST) + return rc; + + strcat(mon_file, filename); + strcat(mon_file, ".mon"); + + *fd = 0; errno = 0; + + /* Open the monitor file, creating it if necessary */ + *fd = open(mon_file, O_CREAT|O_RDWR); + if (*fd < 0) + return -errno; + + /* Write out the monitoring specifications. + * In this case, we are monitoring for a state change event type + * CHANGED=YES + * We will be waiting in select call, rather than a read: + * WAIT_TYPE=WAIT_IN_SELECT + * We only want minimal information for files: + * INFO_LVL=1 + * For directories, we want more information to track what file + * caused the change + * INFO_LVL=2 + */ + + if (file_is_directory == 0) + sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=2"); + else + sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=1"); + + rc = write(*fd, mon_file_write_string, strlen(mon_file_write_string)+1); + if (rc < 0) + return -errno; + + return 0; +} + +/* + * Skips a specified number of lines in the buffer passed in. + * Walks the buffer pointed to by p and attempts to skip n lines. + * Returns the total number of lines skipped + */ +static int uv__skip_lines(char **p, int n) { + int lines = 0; + + while(n > 0) { + *p = strchr(*p, '\n'); + if (!p) + return lines; + + (*p)++; + n--; + lines++; + } + return lines; +} + + +/* + * Parse the event occurrence data to figure out what event just occurred + * and take proper action. + * + * The buf is a pointer to the buffer containing the event occurrence data + * Returns 0 on success, -1 if unrecoverable error in parsing + * + */ +static int uv__parse_data(char *buf, int *events, uv_fs_event_t* handle) { + int evp_rc, i; + char *p; + char filename[PATH_MAX]; /* To be used when handling directories */ + + p = buf; + *events = 0; + + /* Clean the filename buffer*/ + for(i = 0; i < PATH_MAX; i++) { + filename[i] = 0; + } + i = 0; + + /* Check for BUF_WRAP */ + if (strncmp(buf, "BUF_WRAP", strlen("BUF_WRAP")) == 0) { + assert(0 && "Buffer wrap detected, Some event occurrences lost!"); + return 0; + } + + /* Since we are using the default buffer size (4K), and have specified + * INFO_LVL=1, we won't see any EVENT_OVERFLOW conditions. Applications + * should check for this keyword if they are using an INFO_LVL of 2 or + * higher, and have a buffer size of <= 4K + */ + + /* Skip to RC_FROM_EVPROD */ + if (uv__skip_lines(&p, 9) != 9) + return -1; + + if (sscanf(p, "RC_FROM_EVPROD=%d\nEND_EVENT_DATA", &evp_rc) == 1) { + if (uv__path_is_a_directory(handle->path) == 0) { /* Directory */ + if (evp_rc == AHAFS_MODDIR_UNMOUNT || evp_rc == AHAFS_MODDIR_REMOVE_SELF) { + /* The directory is no longer available for monitoring */ + *events = UV_RENAME; + handle->dir_filename = NULL; + } else { + /* A file was added/removed inside the directory */ + *events = UV_CHANGE; + + /* Get the EVPROD_INFO */ + if (uv__skip_lines(&p, 1) != 1) + return -1; + + /* Scan out the name of the file that triggered the event*/ + if (sscanf(p, "BEGIN_EVPROD_INFO\n%sEND_EVPROD_INFO", filename) == 1) { + handle->dir_filename = uv__strdup((const char*)&filename); + } else + return -1; + } + } else { /* Regular File */ + if (evp_rc == AHAFS_MODFILE_RENAME) + *events = UV_RENAME; + else + *events = UV_CHANGE; + } + } + else + return -1; + + return 0; +} + + +/* This is the internal callback */ +static void uv__ahafs_event(uv_loop_t* loop, uv__io_t* event_watch, unsigned int fflags) { + char result_data[RDWR_BUF_SIZE]; + int bytes, rc = 0; + uv_fs_event_t* handle; + int events = 0; + char fname[PATH_MAX]; + char *p; + + handle = container_of(event_watch, uv_fs_event_t, event_watcher); + + /* At this point, we assume that polling has been done on the + * file descriptor, so we can just read the AHAFS event occurrence + * data and parse its results without having to block anything + */ + bytes = pread(event_watch->fd, result_data, RDWR_BUF_SIZE, 0); + + assert((bytes >= 0) && "uv__ahafs_event - Error reading monitor file"); + + /* Parse the data */ + if(bytes > 0) + rc = uv__parse_data(result_data, &events, handle); + + /* Unrecoverable error */ + if (rc == -1) + return; + + /* For directory changes, the name of the files that triggered the change + * are never absolute pathnames + */ + if (uv__path_is_a_directory(handle->path) == 0) { + p = handle->dir_filename; + } else { + p = strrchr(handle->path, '/'); + if (p == NULL) + p = handle->path; + else + p++; + } + strncpy(fname, p, sizeof(fname) - 1); + /* Just in case */ + fname[sizeof(fname) - 1] = '\0'; + + handle->cb(handle, fname, events, 0); +} +#endif + + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { +#ifdef HAVE_SYS_AHAFS_EVPRODS_H + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); + return 0; +#else + return -ENOSYS; +#endif +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* filename, + unsigned int flags) { +#ifdef HAVE_SYS_AHAFS_EVPRODS_H + int fd, rc, str_offset = 0; + char cwd[PATH_MAX]; + char absolute_path[PATH_MAX]; + char readlink_cwd[PATH_MAX]; + + + /* Figure out whether filename is absolute or not */ + if (filename[0] == '/') { + /* We have absolute pathname */ + snprintf(absolute_path, sizeof(absolute_path), "%s", filename); + } else { + /* We have a relative pathname, compose the absolute pathname */ + snprintf(cwd, sizeof(cwd), "/proc/%lu/cwd", (unsigned long) getpid()); + rc = readlink(cwd, readlink_cwd, sizeof(readlink_cwd) - 1); + if (rc < 0) + return rc; + /* readlink does not null terminate our string */ + readlink_cwd[rc] = '\0'; + + if (filename[0] == '.' && filename[1] == '/') + str_offset = 2; + + snprintf(absolute_path, sizeof(absolute_path), "%s%s", readlink_cwd, + filename + str_offset); + } + + if (uv__is_ahafs_mounted() < 0) /* /aha checks failed */ + return UV_ENOSYS; + + /* Setup ahafs */ + rc = uv__setup_ahafs((const char *)absolute_path, &fd); + if (rc != 0) + return rc; + + /* Setup/Initialize all the libuv routines */ + uv__handle_start(handle); + uv__io_init(&handle->event_watcher, uv__ahafs_event, fd); + handle->path = uv__strdup(filename); + handle->cb = cb; + + uv__io_start(handle->loop, &handle->event_watcher, POLLIN); + + return 0; +#else + return -ENOSYS; +#endif +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { +#ifdef HAVE_SYS_AHAFS_EVPRODS_H + if (!uv__is_active(handle)) + return 0; + + uv__io_close(handle->loop, &handle->event_watcher); + uv__handle_stop(handle); + + if (uv__path_is_a_directory(handle->path) == 0) { + uv__free(handle->dir_filename); + handle->dir_filename = NULL; + } + + uv__free(handle->path); + handle->path = NULL; + uv__close(handle->event_watcher.fd); + handle->event_watcher.fd = -1; + + return 0; +#else + return -ENOSYS; +#endif +} + + +void uv__fs_event_close(uv_fs_event_t* handle) { +#ifdef HAVE_SYS_AHAFS_EVPRODS_H + uv_fs_event_stop(handle); +#else + UNREACHABLE(); +#endif +} + + +char** uv_setup_args(int argc, char** argv) { + return argv; +} + + +int uv_set_process_title(const char* title) { + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + if (buffer == NULL || size == 0) + return -EINVAL; + + buffer[0] = '\0'; + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + char pp[64]; + psinfo_t psinfo; + int err; + int fd; + + snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid()); + + fd = open(pp, O_RDONLY); + if (fd == -1) + return -errno; + + /* FIXME(bnoordhuis) Handle EINTR. */ + err = -EINVAL; + if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) { + *rss = (size_t)psinfo.pr_rssize * 1024; + err = 0; + } + uv__close(fd); + + return err; +} + + +int uv_uptime(double* uptime) { + struct utmp *utmp_buf; + size_t entries = 0; + time_t boot_time; + + utmpname(UTMP_FILE); + + setutent(); + + while ((utmp_buf = getutent()) != NULL) { + if (utmp_buf->ut_user[0] && utmp_buf->ut_type == USER_PROCESS) + ++entries; + if (utmp_buf->ut_type == BOOT_TIME) + boot_time = utmp_buf->ut_time; + } + + endutent(); + + if (boot_time == 0) + return -ENOSYS; + + *uptime = time(NULL) - boot_time; + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + uv_cpu_info_t* cpu_info; + perfstat_cpu_total_t ps_total; + perfstat_cpu_t* ps_cpus; + perfstat_id_t cpu_id; + int result, ncpus, idx = 0; + + result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1); + if (result == -1) { + return -ENOSYS; + } + + ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0); + if (result == -1) { + return -ENOSYS; + } + + ps_cpus = (perfstat_cpu_t*) uv__malloc(ncpus * sizeof(perfstat_cpu_t)); + if (!ps_cpus) { + return -ENOMEM; + } + + strcpy(cpu_id.name, FIRST_CPU); + result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus); + if (result == -1) { + uv__free(ps_cpus); + return -ENOSYS; + } + + *cpu_infos = (uv_cpu_info_t*) uv__malloc(ncpus * sizeof(uv_cpu_info_t)); + if (!*cpu_infos) { + uv__free(ps_cpus); + return -ENOMEM; + } + + *count = ncpus; + + cpu_info = *cpu_infos; + while (idx < ncpus) { + cpu_info->speed = (int)(ps_total.processorHZ / 1000000); + cpu_info->model = uv__strdup(ps_total.description); + cpu_info->cpu_times.user = ps_cpus[idx].user; + cpu_info->cpu_times.sys = ps_cpus[idx].sys; + cpu_info->cpu_times.idle = ps_cpus[idx].idle; + cpu_info->cpu_times.irq = ps_cpus[idx].wait; + cpu_info->cpu_times.nice = 0; + cpu_info++; + idx++; + } + + uv__free(ps_cpus); + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; ++i) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, + int* count) { + uv_interface_address_t* address; + int sockfd, size = 1; + struct ifconf ifc; + struct ifreq *ifr, *p, flg; + + *count = 0; + + if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP))) { + return -errno; + } + + if (ioctl(sockfd, SIOCGSIZIFCONF, &size) == -1) { + uv__close(sockfd); + return -errno; + } + + ifc.ifc_req = (struct ifreq*)uv__malloc(size); + ifc.ifc_len = size; + if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) { + uv__close(sockfd); + return -errno; + } + +#define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p)) + + /* Count all up and running ipv4/ipv6 addresses */ + ifr = ifc.ifc_req; + while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) { + p = ifr; + ifr = (struct ifreq*) + ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr)); + + if (!(p->ifr_addr.sa_family == AF_INET6 || + p->ifr_addr.sa_family == AF_INET)) + continue; + + memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name)); + if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) { + uv__close(sockfd); + return -errno; + } + + if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING)) + continue; + + (*count)++; + } + + /* Alloc the return interface structs */ + *addresses = (uv_interface_address_t*) + uv__malloc(*count * sizeof(uv_interface_address_t)); + if (!(*addresses)) { + uv__close(sockfd); + return -ENOMEM; + } + address = *addresses; + + ifr = ifc.ifc_req; + while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) { + p = ifr; + ifr = (struct ifreq*) + ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr)); + + if (!(p->ifr_addr.sa_family == AF_INET6 || + p->ifr_addr.sa_family == AF_INET)) + continue; + + memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name)); + if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) { + uv__close(sockfd); + return -ENOSYS; + } + + if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING)) + continue; + + /* All conditions above must match count loop */ + + address->name = uv__strdup(p->ifr_name); + + if (p->ifr_addr.sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr); + } + + /* TODO: Retrieve netmask using SIOCGIFNETMASK ioctl */ + + address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0; + + address++; + } + +#undef ADDR_SIZE + + uv__close(sockfd); + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; ++i) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + struct pollfd* events; + uintptr_t i; + uintptr_t nfds; + struct poll_ctl pc; + + assert(loop->watchers != NULL); + + events = (struct pollfd*) loop->watchers[loop->nwatchers]; + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; + + if (events != NULL) + /* Invalidate events with same file descriptor */ + for (i = 0; i < nfds; i++) + if ((int) events[i].fd == fd) + events[i].fd = -1; + + /* Remove the file descriptor from the poll set */ + pc.events = 0; + pc.cmd = PS_DELETE; + pc.fd = fd; + if(loop->backend_fd >= 0) + pollset_ctl(loop->backend_fd, &pc, 1); +} diff --git a/deps/libuv/src/unix/android-ifaddrs.c b/deps/libuv/src/unix/android-ifaddrs.c new file mode 100644 index 00000000..30f681b7 --- /dev/null +++ b/deps/libuv/src/unix/android-ifaddrs.c @@ -0,0 +1,703 @@ +/* +Copyright (c) 2013, Kenneth MacKay +Copyright (c) 2014, Emergya (Cloud4all, FP7/2007-2013 grant agreement #289016) +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "android-ifaddrs.h" +#include "uv-common.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct NetlinkList +{ + struct NetlinkList *m_next; + struct nlmsghdr *m_data; + unsigned int m_size; +} NetlinkList; + +static int netlink_socket(void) +{ + struct sockaddr_nl l_addr; + + int l_socket = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if(l_socket < 0) + { + return -1; + } + + memset(&l_addr, 0, sizeof(l_addr)); + l_addr.nl_family = AF_NETLINK; + if(bind(l_socket, (struct sockaddr *)&l_addr, sizeof(l_addr)) < 0) + { + close(l_socket); + return -1; + } + + return l_socket; +} + +static int netlink_send(int p_socket, int p_request) +{ + char l_buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))]; + + struct nlmsghdr *l_hdr; + struct rtgenmsg *l_msg; + struct sockaddr_nl l_addr; + + memset(l_buffer, 0, sizeof(l_buffer)); + + l_hdr = (struct nlmsghdr *)l_buffer; + l_msg = (struct rtgenmsg *)NLMSG_DATA(l_hdr); + + l_hdr->nlmsg_len = NLMSG_LENGTH(sizeof(*l_msg)); + l_hdr->nlmsg_type = p_request; + l_hdr->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST; + l_hdr->nlmsg_pid = 0; + l_hdr->nlmsg_seq = p_socket; + l_msg->rtgen_family = AF_UNSPEC; + + memset(&l_addr, 0, sizeof(l_addr)); + l_addr.nl_family = AF_NETLINK; + return (sendto(p_socket, l_hdr, l_hdr->nlmsg_len, 0, (struct sockaddr *)&l_addr, sizeof(l_addr))); +} + +static int netlink_recv(int p_socket, void *p_buffer, size_t p_len) +{ + struct sockaddr_nl l_addr; + struct msghdr l_msg; + + struct iovec l_iov; + l_iov.iov_base = p_buffer; + l_iov.iov_len = p_len; + + for(;;) + { + int l_result; + l_msg.msg_name = (void *)&l_addr; + l_msg.msg_namelen = sizeof(l_addr); + l_msg.msg_iov = &l_iov; + l_msg.msg_iovlen = 1; + l_msg.msg_control = NULL; + l_msg.msg_controllen = 0; + l_msg.msg_flags = 0; + l_result = recvmsg(p_socket, &l_msg, 0); + + if(l_result < 0) + { + if(errno == EINTR) + { + continue; + } + return -2; + } + + /* Buffer was too small */ + if(l_msg.msg_flags & MSG_TRUNC) + { + return -1; + } + return l_result; + } +} + +static struct nlmsghdr *getNetlinkResponse(int p_socket, int *p_size, int *p_done) +{ + size_t l_size = 4096; + void *l_buffer = NULL; + + for(;;) + { + int l_read; + + uv__free(l_buffer); + l_buffer = uv__malloc(l_size); + if (l_buffer == NULL) + { + return NULL; + } + + l_read = netlink_recv(p_socket, l_buffer, l_size); + *p_size = l_read; + if(l_read == -2) + { + uv__free(l_buffer); + return NULL; + } + if(l_read >= 0) + { + pid_t l_pid = getpid(); + struct nlmsghdr *l_hdr; + for(l_hdr = (struct nlmsghdr *)l_buffer; NLMSG_OK(l_hdr, (unsigned int)l_read); l_hdr = (struct nlmsghdr *)NLMSG_NEXT(l_hdr, l_read)) + { + if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket) + { + continue; + } + + if(l_hdr->nlmsg_type == NLMSG_DONE) + { + *p_done = 1; + break; + } + + if(l_hdr->nlmsg_type == NLMSG_ERROR) + { + uv__free(l_buffer); + return NULL; + } + } + return l_buffer; + } + + l_size *= 2; + } +} + +static NetlinkList *newListItem(struct nlmsghdr *p_data, unsigned int p_size) +{ + NetlinkList *l_item = uv__malloc(sizeof(NetlinkList)); + if (l_item == NULL) + { + return NULL; + } + + l_item->m_next = NULL; + l_item->m_data = p_data; + l_item->m_size = p_size; + return l_item; +} + +static void freeResultList(NetlinkList *p_list) +{ + NetlinkList *l_cur; + while(p_list) + { + l_cur = p_list; + p_list = p_list->m_next; + uv__free(l_cur->m_data); + uv__free(l_cur); + } +} + +static NetlinkList *getResultList(int p_socket, int p_request) +{ + int l_size; + int l_done; + NetlinkList *l_list; + NetlinkList *l_end; + + if(netlink_send(p_socket, p_request) < 0) + { + return NULL; + } + + l_list = NULL; + l_end = NULL; + + l_done = 0; + while(!l_done) + { + NetlinkList *l_item; + + struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, &l_size, &l_done); + /* Error */ + if(!l_hdr) + { + freeResultList(l_list); + return NULL; + } + + l_item = newListItem(l_hdr, l_size); + if (!l_item) + { + freeResultList(l_list); + return NULL; + } + if(!l_list) + { + l_list = l_item; + } + else + { + l_end->m_next = l_item; + } + l_end = l_item; + } + return l_list; +} + +static size_t maxSize(size_t a, size_t b) +{ + return (a > b ? a : b); +} + +static size_t calcAddrLen(sa_family_t p_family, int p_dataSize) +{ + switch(p_family) + { + case AF_INET: + return sizeof(struct sockaddr_in); + case AF_INET6: + return sizeof(struct sockaddr_in6); + case AF_PACKET: + return maxSize(sizeof(struct sockaddr_ll), offsetof(struct sockaddr_ll, sll_addr) + p_dataSize); + default: + return maxSize(sizeof(struct sockaddr), offsetof(struct sockaddr, sa_data) + p_dataSize); + } +} + +static void makeSockaddr(sa_family_t p_family, struct sockaddr *p_dest, void *p_data, size_t p_size) +{ + switch(p_family) + { + case AF_INET: + memcpy(&((struct sockaddr_in*)p_dest)->sin_addr, p_data, p_size); + break; + case AF_INET6: + memcpy(&((struct sockaddr_in6*)p_dest)->sin6_addr, p_data, p_size); + break; + case AF_PACKET: + memcpy(((struct sockaddr_ll*)p_dest)->sll_addr, p_data, p_size); + ((struct sockaddr_ll*)p_dest)->sll_halen = p_size; + break; + default: + memcpy(p_dest->sa_data, p_data, p_size); + break; + } + p_dest->sa_family = p_family; +} + +static void addToEnd(struct ifaddrs **p_resultList, struct ifaddrs *p_entry) +{ + if(!*p_resultList) + { + *p_resultList = p_entry; + } + else + { + struct ifaddrs *l_cur = *p_resultList; + while(l_cur->ifa_next) + { + l_cur = l_cur->ifa_next; + } + l_cur->ifa_next = p_entry; + } +} + +static int interpretLink(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList) +{ + struct ifaddrs *l_entry; + + char *l_index; + char *l_name; + char *l_addr; + char *l_data; + + struct ifinfomsg *l_info = (struct ifinfomsg *)NLMSG_DATA(p_hdr); + + size_t l_nameSize = 0; + size_t l_addrSize = 0; + size_t l_dataSize = 0; + + size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg)); + struct rtattr *l_rta; + for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize)) + { + size_t l_rtaDataSize = RTA_PAYLOAD(l_rta); + switch(l_rta->rta_type) + { + case IFLA_ADDRESS: + case IFLA_BROADCAST: + l_addrSize += NLMSG_ALIGN(calcAddrLen(AF_PACKET, l_rtaDataSize)); + break; + case IFLA_IFNAME: + l_nameSize += NLMSG_ALIGN(l_rtaSize + 1); + break; + case IFLA_STATS: + l_dataSize += NLMSG_ALIGN(l_rtaSize); + break; + default: + break; + } + } + + l_entry = uv__malloc(sizeof(struct ifaddrs) + sizeof(int) + l_nameSize + l_addrSize + l_dataSize); + if (l_entry == NULL) + { + return -1; + } + memset(l_entry, 0, sizeof(struct ifaddrs)); + l_entry->ifa_name = ""; + + l_index = ((char *)l_entry) + sizeof(struct ifaddrs); + l_name = l_index + sizeof(int); + l_addr = l_name + l_nameSize; + l_data = l_addr + l_addrSize; + + /* Save the interface index so we can look it up when handling the + * addresses. + */ + memcpy(l_index, &l_info->ifi_index, sizeof(int)); + + l_entry->ifa_flags = l_info->ifi_flags; + + l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg)); + for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize)) + { + void *l_rtaData = RTA_DATA(l_rta); + size_t l_rtaDataSize = RTA_PAYLOAD(l_rta); + switch(l_rta->rta_type) + { + case IFLA_ADDRESS: + case IFLA_BROADCAST: + { + size_t l_addrLen = calcAddrLen(AF_PACKET, l_rtaDataSize); + makeSockaddr(AF_PACKET, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize); + ((struct sockaddr_ll *)l_addr)->sll_ifindex = l_info->ifi_index; + ((struct sockaddr_ll *)l_addr)->sll_hatype = l_info->ifi_type; + if(l_rta->rta_type == IFLA_ADDRESS) + { + l_entry->ifa_addr = (struct sockaddr *)l_addr; + } + else + { + l_entry->ifa_broadaddr = (struct sockaddr *)l_addr; + } + l_addr += NLMSG_ALIGN(l_addrLen); + break; + } + case IFLA_IFNAME: + strncpy(l_name, l_rtaData, l_rtaDataSize); + l_name[l_rtaDataSize] = '\0'; + l_entry->ifa_name = l_name; + break; + case IFLA_STATS: + memcpy(l_data, l_rtaData, l_rtaDataSize); + l_entry->ifa_data = l_data; + break; + default: + break; + } + } + + addToEnd(p_resultList, l_entry); + return 0; +} + +static struct ifaddrs *findInterface(int p_index, struct ifaddrs **p_links, int p_numLinks) +{ + int l_num = 0; + struct ifaddrs *l_cur = *p_links; + while(l_cur && l_num < p_numLinks) + { + char *l_indexPtr = ((char *)l_cur) + sizeof(struct ifaddrs); + int l_index; + memcpy(&l_index, l_indexPtr, sizeof(int)); + if(l_index == p_index) + { + return l_cur; + } + + l_cur = l_cur->ifa_next; + ++l_num; + } + return NULL; +} + +static int interpretAddr(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList, int p_numLinks) +{ + struct ifaddrmsg *l_info = (struct ifaddrmsg *)NLMSG_DATA(p_hdr); + struct ifaddrs *l_interface = findInterface(l_info->ifa_index, p_resultList, p_numLinks); + + size_t l_nameSize = 0; + size_t l_addrSize = 0; + + int l_addedNetmask = 0; + + size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg)); + struct rtattr *l_rta; + struct ifaddrs *l_entry; + + char *l_name; + char *l_addr; + + for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize)) + { + size_t l_rtaDataSize = RTA_PAYLOAD(l_rta); + if(l_info->ifa_family == AF_PACKET) + { + continue; + } + + switch(l_rta->rta_type) + { + case IFA_ADDRESS: + case IFA_LOCAL: + if((l_info->ifa_family == AF_INET || l_info->ifa_family == AF_INET6) && !l_addedNetmask) + { + /* Make room for netmask */ + l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize)); + l_addedNetmask = 1; + } + case IFA_BROADCAST: + l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize)); + break; + case IFA_LABEL: + l_nameSize += NLMSG_ALIGN(l_rtaSize + 1); + break; + default: + break; + } + } + + l_entry = uv__malloc(sizeof(struct ifaddrs) + l_nameSize + l_addrSize); + if (l_entry == NULL) + { + return -1; + } + memset(l_entry, 0, sizeof(struct ifaddrs)); + l_entry->ifa_name = (l_interface ? l_interface->ifa_name : ""); + + l_name = ((char *)l_entry) + sizeof(struct ifaddrs); + l_addr = l_name + l_nameSize; + + l_entry->ifa_flags = l_info->ifa_flags; + if(l_interface) + { + l_entry->ifa_flags |= l_interface->ifa_flags; + } + + l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg)); + for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize)) + { + void *l_rtaData = RTA_DATA(l_rta); + size_t l_rtaDataSize = RTA_PAYLOAD(l_rta); + switch(l_rta->rta_type) + { + case IFA_ADDRESS: + case IFA_BROADCAST: + case IFA_LOCAL: + { + size_t l_addrLen = calcAddrLen(l_info->ifa_family, l_rtaDataSize); + makeSockaddr(l_info->ifa_family, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize); + if(l_info->ifa_family == AF_INET6) + { + if(IN6_IS_ADDR_LINKLOCAL((struct in6_addr *)l_rtaData) || IN6_IS_ADDR_MC_LINKLOCAL((struct in6_addr *)l_rtaData)) + { + ((struct sockaddr_in6 *)l_addr)->sin6_scope_id = l_info->ifa_index; + } + } + + /* Apparently in a point-to-point network IFA_ADDRESS contains + * the dest address and IFA_LOCAL contains the local address + */ + if(l_rta->rta_type == IFA_ADDRESS) + { + if(l_entry->ifa_addr) + { + l_entry->ifa_dstaddr = (struct sockaddr *)l_addr; + } + else + { + l_entry->ifa_addr = (struct sockaddr *)l_addr; + } + } + else if(l_rta->rta_type == IFA_LOCAL) + { + if(l_entry->ifa_addr) + { + l_entry->ifa_dstaddr = l_entry->ifa_addr; + } + l_entry->ifa_addr = (struct sockaddr *)l_addr; + } + else + { + l_entry->ifa_broadaddr = (struct sockaddr *)l_addr; + } + l_addr += NLMSG_ALIGN(l_addrLen); + break; + } + case IFA_LABEL: + strncpy(l_name, l_rtaData, l_rtaDataSize); + l_name[l_rtaDataSize] = '\0'; + l_entry->ifa_name = l_name; + break; + default: + break; + } + } + + if(l_entry->ifa_addr && (l_entry->ifa_addr->sa_family == AF_INET || l_entry->ifa_addr->sa_family == AF_INET6)) + { + unsigned l_maxPrefix = (l_entry->ifa_addr->sa_family == AF_INET ? 32 : 128); + unsigned l_prefix = (l_info->ifa_prefixlen > l_maxPrefix ? l_maxPrefix : l_info->ifa_prefixlen); + char l_mask[16] = {0}; + unsigned i; + for(i=0; i<(l_prefix/8); ++i) + { + l_mask[i] = 0xff; + } + if(l_prefix % 8) + { + l_mask[i] = 0xff << (8 - (l_prefix % 8)); + } + + makeSockaddr(l_entry->ifa_addr->sa_family, (struct sockaddr *)l_addr, l_mask, l_maxPrefix / 8); + l_entry->ifa_netmask = (struct sockaddr *)l_addr; + } + + addToEnd(p_resultList, l_entry); + return 0; +} + +static int interpretLinks(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList) +{ + + int l_numLinks = 0; + pid_t l_pid = getpid(); + for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next) + { + unsigned int l_nlsize = p_netlinkList->m_size; + struct nlmsghdr *l_hdr; + for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize)) + { + if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket) + { + continue; + } + + if(l_hdr->nlmsg_type == NLMSG_DONE) + { + break; + } + + if(l_hdr->nlmsg_type == RTM_NEWLINK) + { + if(interpretLink(l_hdr, p_resultList) == -1) + { + return -1; + } + ++l_numLinks; + } + } + } + return l_numLinks; +} + +static int interpretAddrs(int p_socket, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList, int p_numLinks) +{ + pid_t l_pid = getpid(); + for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next) + { + unsigned int l_nlsize = p_netlinkList->m_size; + struct nlmsghdr *l_hdr; + for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize)) + { + if((pid_t)l_hdr->nlmsg_pid != l_pid || (int)l_hdr->nlmsg_seq != p_socket) + { + continue; + } + + if(l_hdr->nlmsg_type == NLMSG_DONE) + { + break; + } + + if(l_hdr->nlmsg_type == RTM_NEWADDR) + { + if (interpretAddr(l_hdr, p_resultList, p_numLinks) == -1) + { + return -1; + } + } + } + } + return 0; +} + +int getifaddrs(struct ifaddrs **ifap) +{ + int l_socket; + int l_result; + int l_numLinks; + NetlinkList *l_linkResults; + NetlinkList *l_addrResults; + + if(!ifap) + { + return -1; + } + *ifap = NULL; + + l_socket = netlink_socket(); + if(l_socket < 0) + { + return -1; + } + + l_linkResults = getResultList(l_socket, RTM_GETLINK); + if(!l_linkResults) + { + close(l_socket); + return -1; + } + + l_addrResults = getResultList(l_socket, RTM_GETADDR); + if(!l_addrResults) + { + close(l_socket); + freeResultList(l_linkResults); + return -1; + } + + l_result = 0; + l_numLinks = interpretLinks(l_socket, l_linkResults, ifap); + if(l_numLinks == -1 || interpretAddrs(l_socket, l_addrResults, ifap, l_numLinks) == -1) + { + l_result = -1; + } + + freeResultList(l_linkResults); + freeResultList(l_addrResults); + close(l_socket); + return l_result; +} + +void freeifaddrs(struct ifaddrs *ifa) +{ + struct ifaddrs *l_cur; + while(ifa) + { + l_cur = ifa; + ifa = ifa->ifa_next; + uv__free(l_cur); + } +} diff --git a/deps/libuv/src/unix/async.c b/deps/libuv/src/unix/async.c new file mode 100644 index 00000000..393cdebd --- /dev/null +++ b/deps/libuv/src/unix/async.c @@ -0,0 +1,290 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* This file contains both the uv__async internal infrastructure and the + * user-facing uv_async_t functions. + */ + +#include "uv.h" +#include "internal.h" +#include "atomic-ops.h" + +#include +#include /* snprintf() */ +#include +#include +#include +#include + +static void uv__async_event(uv_loop_t* loop, + struct uv__async* w, + unsigned int nevents); +static int uv__async_eventfd(void); + + +int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { + int err; + + err = uv__async_start(loop, &loop->async_watcher, uv__async_event); + if (err) + return err; + + uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC); + handle->async_cb = async_cb; + handle->pending = 0; + + QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue); + uv__handle_start(handle); + + return 0; +} + + +int uv_async_send(uv_async_t* handle) { + /* Do a cheap read first. */ + if (ACCESS_ONCE(int, handle->pending) != 0) + return 0; + + if (cmpxchgi(&handle->pending, 0, 1) == 0) + uv__async_send(&handle->loop->async_watcher); + + return 0; +} + + +void uv__async_close(uv_async_t* handle) { + QUEUE_REMOVE(&handle->queue); + uv__handle_stop(handle); +} + + +static void uv__async_event(uv_loop_t* loop, + struct uv__async* w, + unsigned int nevents) { + QUEUE queue; + QUEUE* q; + uv_async_t* h; + + QUEUE_MOVE(&loop->async_handles, &queue); + while (!QUEUE_EMPTY(&queue)) { + q = QUEUE_HEAD(&queue); + h = QUEUE_DATA(q, uv_async_t, queue); + + QUEUE_REMOVE(q); + QUEUE_INSERT_TAIL(&loop->async_handles, q); + + if (cmpxchgi(&h->pending, 1, 0) == 0) + continue; + + if (h->async_cb == NULL) + continue; + h->async_cb(h); + } +} + + +static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + struct uv__async* wa; + char buf[1024]; + unsigned n; + ssize_t r; + + n = 0; + for (;;) { + r = read(w->fd, buf, sizeof(buf)); + + if (r > 0) + n += r; + + if (r == sizeof(buf)) + continue; + + if (r != -1) + break; + + if (errno == EAGAIN || errno == EWOULDBLOCK) + break; + + if (errno == EINTR) + continue; + + abort(); + } + + wa = container_of(w, struct uv__async, io_watcher); + +#if defined(__linux__) + if (wa->wfd == -1) { + uint64_t val; + assert(n == sizeof(val)); + memcpy(&val, buf, sizeof(val)); /* Avoid alignment issues. */ + wa->cb(loop, wa, val); + return; + } +#endif + + wa->cb(loop, wa, n); +} + + +void uv__async_send(struct uv__async* wa) { + const void* buf; + ssize_t len; + int fd; + int r; + + buf = ""; + len = 1; + fd = wa->wfd; + +#if defined(__linux__) + if (fd == -1) { + static const uint64_t val = 1; + buf = &val; + len = sizeof(val); + fd = wa->io_watcher.fd; /* eventfd */ + } +#endif + + do + r = write(fd, buf, len); + while (r == -1 && errno == EINTR); + + if (r == len) + return; + + if (r == -1) + if (errno == EAGAIN || errno == EWOULDBLOCK) + return; + + abort(); +} + + +void uv__async_init(struct uv__async* wa) { + wa->io_watcher.fd = -1; + wa->wfd = -1; +} + + +int uv__async_start(uv_loop_t* loop, struct uv__async* wa, uv__async_cb cb) { + int pipefd[2]; + int err; + + if (wa->io_watcher.fd != -1) + return 0; + + err = uv__async_eventfd(); + if (err >= 0) { + pipefd[0] = err; + pipefd[1] = -1; + } + else if (err == -ENOSYS) { + err = uv__make_pipe(pipefd, UV__F_NONBLOCK); +#if defined(__linux__) + /* Save a file descriptor by opening one of the pipe descriptors as + * read/write through the procfs. That file descriptor can then + * function as both ends of the pipe. + */ + if (err == 0) { + char buf[32]; + int fd; + + snprintf(buf, sizeof(buf), "/proc/self/fd/%d", pipefd[0]); + fd = uv__open_cloexec(buf, O_RDWR); + if (fd >= 0) { + uv__close(pipefd[0]); + uv__close(pipefd[1]); + pipefd[0] = fd; + pipefd[1] = fd; + } + } +#endif + } + + if (err < 0) + return err; + + uv__io_init(&wa->io_watcher, uv__async_io, pipefd[0]); + uv__io_start(loop, &wa->io_watcher, POLLIN); + wa->wfd = pipefd[1]; + wa->cb = cb; + + return 0; +} + + +void uv__async_stop(uv_loop_t* loop, struct uv__async* wa) { + if (wa->io_watcher.fd == -1) + return; + + if (wa->wfd != -1) { + if (wa->wfd != wa->io_watcher.fd) + uv__close(wa->wfd); + wa->wfd = -1; + } + + uv__io_stop(loop, &wa->io_watcher, POLLIN); + uv__close(wa->io_watcher.fd); + wa->io_watcher.fd = -1; +} + + +static int uv__async_eventfd() { +#if defined(__linux__) + static int no_eventfd2; + static int no_eventfd; + int fd; + + if (no_eventfd2) + goto skip_eventfd2; + + fd = uv__eventfd2(0, UV__EFD_CLOEXEC | UV__EFD_NONBLOCK); + if (fd != -1) + return fd; + + if (errno != ENOSYS) + return -errno; + + no_eventfd2 = 1; + +skip_eventfd2: + + if (no_eventfd) + goto skip_eventfd; + + fd = uv__eventfd(0); + if (fd != -1) { + uv__cloexec(fd, 1); + uv__nonblock(fd, 1); + return fd; + } + + if (errno != ENOSYS) + return -errno; + + no_eventfd = 1; + +skip_eventfd: + +#endif + + return -ENOSYS; +} diff --git a/deps/libuv/src/unix/atomic-ops.h b/deps/libuv/src/unix/atomic-ops.h new file mode 100644 index 00000000..815e3552 --- /dev/null +++ b/deps/libuv/src/unix/atomic-ops.h @@ -0,0 +1,88 @@ +/* Copyright (c) 2013, Ben Noordhuis + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef UV_ATOMIC_OPS_H_ +#define UV_ATOMIC_OPS_H_ + +#include "internal.h" /* UV_UNUSED */ + +#if defined(__SUNPRO_C) || defined(__SUNPRO_CC) +#include +#define __sync_val_compare_and_swap(p, o, n) atomic_cas_ptr(p, o, n) +#endif + +UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)); +UV_UNUSED(static long cmpxchgl(long* ptr, long oldval, long newval)); +UV_UNUSED(static void cpu_relax(void)); + +/* Prefer hand-rolled assembly over the gcc builtins because the latter also + * issue full memory barriers. + */ +UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) { +#if defined(__i386__) || defined(__x86_64__) + int out; + __asm__ __volatile__ ("lock; cmpxchg %2, %1;" + : "=a" (out), "+m" (*(volatile int*) ptr) + : "r" (newval), "0" (oldval) + : "memory"); + return out; +#elif defined(_AIX) && defined(__xlC__) + const int out = (*(volatile int*) ptr); + __compare_and_swap(ptr, &oldval, newval); + return out; +#elif defined(__MVS__) + return __plo_CS(ptr, (unsigned int*) ptr, + oldval, (unsigned int*) &newval); +#else + return __sync_val_compare_and_swap(ptr, oldval, newval); +#endif +} + +UV_UNUSED(static long cmpxchgl(long* ptr, long oldval, long newval)) { +#if defined(__i386__) || defined(__x86_64__) + long out; + __asm__ __volatile__ ("lock; cmpxchg %2, %1;" + : "=a" (out), "+m" (*(volatile long*) ptr) + : "r" (newval), "0" (oldval) + : "memory"); + return out; +#elif defined(_AIX) && defined(__xlC__) + const long out = (*(volatile int*) ptr); +# if defined(__64BIT__) + __compare_and_swaplp(ptr, &oldval, newval); +# else + __compare_and_swap(ptr, &oldval, newval); +# endif /* if defined(__64BIT__) */ + return out; +#elif defined (__MVS__) +# ifdef _LP64 + return __plo_CSGR(ptr, (unsigned long long*) ptr, + oldval, (unsigned long long*) &newval); +# else + return __plo_CS(ptr, (unsigned int*) ptr, + oldval, (unsigned int*) &newval); +# endif +#else + return __sync_val_compare_and_swap(ptr, oldval, newval); +#endif +} + +UV_UNUSED(static void cpu_relax(void)) { +#if defined(__i386__) || defined(__x86_64__) + __asm__ __volatile__ ("rep; nop"); /* a.k.a. PAUSE */ +#endif +} + +#endif /* UV_ATOMIC_OPS_H_ */ diff --git a/deps/libuv/src/unix/core.c b/deps/libuv/src/unix/core.c new file mode 100644 index 00000000..a2c07e6a --- /dev/null +++ b/deps/libuv/src/unix/core.c @@ -0,0 +1,1244 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include /* NULL */ +#include /* printf */ +#include +#include /* strerror */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* INT_MAX, PATH_MAX, IOV_MAX */ +#include /* writev */ +#include /* getrusage */ +#include + +#ifdef __sun +# include +# include +# include +#endif + +#ifdef __APPLE__ +# include /* _NSGetExecutablePath */ +# include +# if defined(O_CLOEXEC) +# define UV__O_CLOEXEC O_CLOEXEC +# endif +#endif + +#if defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel__) +# include +# include +# include +# define UV__O_CLOEXEC O_CLOEXEC +# if defined(__FreeBSD__) && __FreeBSD__ >= 10 +# define uv__accept4 accept4 +# define UV__SOCK_NONBLOCK SOCK_NONBLOCK +# define UV__SOCK_CLOEXEC SOCK_CLOEXEC +# endif +# if !defined(F_DUP2FD_CLOEXEC) && defined(_F_DUP2FD_CLOEXEC) +# define F_DUP2FD_CLOEXEC _F_DUP2FD_CLOEXEC +# endif +#endif + +#if defined(__ANDROID_API__) && __ANDROID_API__ < 21 +# include /* for dlsym */ +#endif + +#if defined(__MVS__) +#include +#endif + +static int uv__run_pending(uv_loop_t* loop); + +/* Verify that uv_buf_t is ABI-compatible with struct iovec. */ +STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec)); +STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) == + sizeof(((struct iovec*) 0)->iov_base)); +STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) == + sizeof(((struct iovec*) 0)->iov_len)); +STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base)); +STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len)); + + +uint64_t uv_hrtime(void) { + return uv__hrtime(UV_CLOCK_PRECISE); +} + + +void uv_close(uv_handle_t* handle, uv_close_cb close_cb) { + assert(!(handle->flags & (UV_CLOSING | UV_CLOSED))); + + handle->flags |= UV_CLOSING; + handle->close_cb = close_cb; + + switch (handle->type) { + case UV_NAMED_PIPE: + uv__pipe_close((uv_pipe_t*)handle); + break; + + case UV_TTY: + uv__stream_close((uv_stream_t*)handle); + break; + + case UV_TCP: + uv__tcp_close((uv_tcp_t*)handle); + break; + + case UV_UDP: + uv__udp_close((uv_udp_t*)handle); + break; + + case UV_PREPARE: + uv__prepare_close((uv_prepare_t*)handle); + break; + + case UV_CHECK: + uv__check_close((uv_check_t*)handle); + break; + + case UV_IDLE: + uv__idle_close((uv_idle_t*)handle); + break; + + case UV_ASYNC: + uv__async_close((uv_async_t*)handle); + break; + + case UV_TIMER: + uv__timer_close((uv_timer_t*)handle); + break; + + case UV_PROCESS: + uv__process_close((uv_process_t*)handle); + break; + + case UV_FS_EVENT: + uv__fs_event_close((uv_fs_event_t*)handle); + break; + + case UV_POLL: + uv__poll_close((uv_poll_t*)handle); + break; + + case UV_FS_POLL: + uv__fs_poll_close((uv_fs_poll_t*)handle); + break; + + case UV_SIGNAL: + uv__signal_close((uv_signal_t*) handle); + /* Signal handles may not be closed immediately. The signal code will */ + /* itself close uv__make_close_pending whenever appropriate. */ + return; + + default: + assert(0); + } + + uv__make_close_pending(handle); +} + +int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) { + int r; + int fd; + socklen_t len; + + if (handle == NULL || value == NULL) + return -EINVAL; + + if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE) + fd = uv__stream_fd((uv_stream_t*) handle); + else if (handle->type == UV_UDP) + fd = ((uv_udp_t *) handle)->io_watcher.fd; + else + return -ENOTSUP; + + len = sizeof(*value); + + if (*value == 0) + r = getsockopt(fd, SOL_SOCKET, optname, value, &len); + else + r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len); + + if (r < 0) + return -errno; + + return 0; +} + +void uv__make_close_pending(uv_handle_t* handle) { + assert(handle->flags & UV_CLOSING); + assert(!(handle->flags & UV_CLOSED)); + handle->next_closing = handle->loop->closing_handles; + handle->loop->closing_handles = handle; +} + +int uv__getiovmax(void) { +#if defined(IOV_MAX) + return IOV_MAX; +#elif defined(_SC_IOV_MAX) + static int iovmax = -1; + if (iovmax == -1) { + iovmax = sysconf(_SC_IOV_MAX); + /* On some embedded devices (arm-linux-uclibc based ip camera), + * sysconf(_SC_IOV_MAX) can not get the correct value. The return + * value is -1 and the errno is EINPROGRESS. Degrade the value to 1. + */ + if (iovmax == -1) iovmax = 1; + } + return iovmax; +#else + return 1024; +#endif +} + + +static void uv__finish_close(uv_handle_t* handle) { + /* Note: while the handle is in the UV_CLOSING state now, it's still possible + * for it to be active in the sense that uv__is_active() returns true. + * A good example is when the user calls uv_shutdown(), immediately followed + * by uv_close(). The handle is considered active at this point because the + * completion of the shutdown req is still pending. + */ + assert(handle->flags & UV_CLOSING); + assert(!(handle->flags & UV_CLOSED)); + handle->flags |= UV_CLOSED; + + switch (handle->type) { + case UV_PREPARE: + case UV_CHECK: + case UV_IDLE: + case UV_ASYNC: + case UV_TIMER: + case UV_PROCESS: + case UV_FS_EVENT: + case UV_FS_POLL: + case UV_POLL: + case UV_SIGNAL: + break; + + case UV_NAMED_PIPE: + case UV_TCP: + case UV_TTY: + uv__stream_destroy((uv_stream_t*)handle); + break; + + case UV_UDP: + uv__udp_finish_close((uv_udp_t*)handle); + break; + + default: + assert(0); + break; + } + + uv__handle_unref(handle); + QUEUE_REMOVE(&handle->handle_queue); + + if (handle->close_cb) { + handle->close_cb(handle); + } +} + + +static void uv__run_closing_handles(uv_loop_t* loop) { + uv_handle_t* p; + uv_handle_t* q; + + p = loop->closing_handles; + loop->closing_handles = NULL; + + while (p) { + q = p->next_closing; + uv__finish_close(p); + p = q; + } +} + + +int uv_is_closing(const uv_handle_t* handle) { + return uv__is_closing(handle); +} + + +int uv_backend_fd(const uv_loop_t* loop) { + return loop->backend_fd; +} + + +int uv_backend_timeout(const uv_loop_t* loop) { + if (loop->stop_flag != 0) + return 0; + + if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop)) + return 0; + + if (!QUEUE_EMPTY(&loop->idle_handles)) + return 0; + + if (!QUEUE_EMPTY(&loop->pending_queue)) + return 0; + + if (loop->closing_handles) + return 0; + + return uv__next_timeout(loop); +} + + +static int uv__loop_alive(const uv_loop_t* loop) { + return uv__has_active_handles(loop) || + uv__has_active_reqs(loop) || + loop->closing_handles != NULL; +} + + +int uv_loop_alive(const uv_loop_t* loop) { + return uv__loop_alive(loop); +} + + +int uv_run(uv_loop_t* loop, uv_run_mode mode) { + int timeout; + int r; + int ran_pending; + + r = uv__loop_alive(loop); + if (!r) + uv__update_time(loop); + + while (r != 0 && loop->stop_flag == 0) { + uv__update_time(loop); + uv__run_timers(loop); + ran_pending = uv__run_pending(loop); + uv__run_idle(loop); + uv__run_prepare(loop); + + timeout = 0; + if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT) + timeout = uv_backend_timeout(loop); + + uv__io_poll(loop, timeout); + uv__run_check(loop); + uv__run_closing_handles(loop); + + if (mode == UV_RUN_ONCE) { + /* UV_RUN_ONCE implies forward progress: at least one callback must have + * been invoked when it returns. uv__io_poll() can return without doing + * I/O (meaning: no callbacks) when its timeout expires - which means we + * have pending timers that satisfy the forward progress constraint. + * + * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from + * the check. + */ + uv__update_time(loop); + uv__run_timers(loop); + } + + r = uv__loop_alive(loop); + if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT) + break; + } + + /* The if statement lets gcc compile it to a conditional store. Avoids + * dirtying a cache line. + */ + if (loop->stop_flag != 0) + loop->stop_flag = 0; + + return r; +} + + +void uv_update_time(uv_loop_t* loop) { + uv__update_time(loop); +} + + +int uv_is_active(const uv_handle_t* handle) { + return uv__is_active(handle); +} + + +/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */ +int uv__socket(int domain, int type, int protocol) { + int sockfd; + int err; + +#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC) + sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol); + if (sockfd != -1) + return sockfd; + + if (errno != EINVAL) + return -errno; +#endif + + sockfd = socket(domain, type, protocol); + if (sockfd == -1) + return -errno; + + err = uv__nonblock(sockfd, 1); + if (err == 0) + err = uv__cloexec(sockfd, 1); + + if (err) { + uv__close(sockfd); + return err; + } + +#if defined(SO_NOSIGPIPE) + { + int on = 1; + setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on)); + } +#endif + + return sockfd; +} + +/* get a file pointer to a file in read-only and close-on-exec mode */ +FILE* uv__open_file(const char* path) { + int fd; + FILE* fp; + + fd = uv__open_cloexec(path, O_RDONLY); + if (fd < 0) + return NULL; + + fp = fdopen(fd, "r"); + if (fp == NULL) + uv__close(fd); + + return fp; +} + + +int uv__accept(int sockfd) { + int peerfd; + int err; + + assert(sockfd >= 0); + + while (1) { +#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD__ >= 10) + static int no_accept4; + + if (no_accept4) + goto skip; + + peerfd = uv__accept4(sockfd, + NULL, + NULL, + UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC); + if (peerfd != -1) + return peerfd; + + if (errno == EINTR) + continue; + + if (errno != ENOSYS) + return -errno; + + no_accept4 = 1; +skip: +#endif + + peerfd = accept(sockfd, NULL, NULL); + if (peerfd == -1) { + if (errno == EINTR) + continue; + return -errno; + } + + err = uv__cloexec(peerfd, 1); + if (err == 0) + err = uv__nonblock(peerfd, 1); + + if (err) { + uv__close(peerfd); + return err; + } + + return peerfd; + } +} + + +int uv__close_nocheckstdio(int fd) { + int saved_errno; + int rc; + + assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */ + + saved_errno = errno; + rc = close(fd); + if (rc == -1) { + rc = -errno; + if (rc == -EINTR || rc == -EINPROGRESS) + rc = 0; /* The close is in progress, not an error. */ + errno = saved_errno; + } + + return rc; +} + + +int uv__close(int fd) { + assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */ + return uv__close_nocheckstdio(fd); +} + + +int uv__nonblock_ioctl(int fd, int set) { + int r; + + do + r = ioctl(fd, FIONBIO, &set); + while (r == -1 && errno == EINTR); + + if (r) + return -errno; + + return 0; +} + + +int uv__cloexec_ioctl(int fd, int set) { + int r; + + do + r = ioctl(fd, set ? FIOCLEX : FIONCLEX); + while (r == -1 && errno == EINTR); + + if (r) + return -errno; + + return 0; +} + + +int uv__nonblock_fcntl(int fd, int set) { + int flags; + int r; + + do + r = fcntl(fd, F_GETFL); + while (r == -1 && errno == EINTR); + + if (r == -1) + return -errno; + + /* Bail out now if already set/clear. */ + if (!!(r & O_NONBLOCK) == !!set) + return 0; + + if (set) + flags = r | O_NONBLOCK; + else + flags = r & ~O_NONBLOCK; + + do + r = fcntl(fd, F_SETFL, flags); + while (r == -1 && errno == EINTR); + + if (r) + return -errno; + + return 0; +} + + +int uv__cloexec_fcntl(int fd, int set) { + int flags; + int r; + + do + r = fcntl(fd, F_GETFD); + while (r == -1 && errno == EINTR); + + if (r == -1) + return -errno; + + /* Bail out now if already set/clear. */ + if (!!(r & FD_CLOEXEC) == !!set) + return 0; + + if (set) + flags = r | FD_CLOEXEC; + else + flags = r & ~FD_CLOEXEC; + + do + r = fcntl(fd, F_SETFD, flags); + while (r == -1 && errno == EINTR); + + if (r) + return -errno; + + return 0; +} + + +/* This function is not execve-safe, there is a race window + * between the call to dup() and fcntl(FD_CLOEXEC). + */ +int uv__dup(int fd) { + int err; + + fd = dup(fd); + + if (fd == -1) + return -errno; + + err = uv__cloexec(fd, 1); + if (err) { + uv__close(fd); + return err; + } + + return fd; +} + + +ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) { + struct cmsghdr* cmsg; + ssize_t rc; + int* pfd; + int* end; +#if defined(__linux__) + static int no_msg_cmsg_cloexec; + if (no_msg_cmsg_cloexec == 0) { + rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */ + if (rc != -1) + return rc; + if (errno != EINVAL) + return -errno; + rc = recvmsg(fd, msg, flags); + if (rc == -1) + return -errno; + no_msg_cmsg_cloexec = 1; + } else { + rc = recvmsg(fd, msg, flags); + } +#else + rc = recvmsg(fd, msg, flags); +#endif + if (rc == -1) + return -errno; + if (msg->msg_controllen == 0) + return rc; + for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) + if (cmsg->cmsg_type == SCM_RIGHTS) + for (pfd = (int*) CMSG_DATA(cmsg), + end = (int*) ((char*) cmsg + cmsg->cmsg_len); + pfd < end; + pfd += 1) + uv__cloexec(*pfd, 1); + return rc; +} + + +int uv_cwd(char* buffer, size_t* size) { + if (buffer == NULL || size == NULL) + return -EINVAL; + + if (getcwd(buffer, *size) == NULL) + return -errno; + + *size = strlen(buffer); + if (*size > 1 && buffer[*size - 1] == '/') { + buffer[*size-1] = '\0'; + (*size)--; + } + + return 0; +} + + +int uv_chdir(const char* dir) { + if (chdir(dir)) + return -errno; + + return 0; +} + + +void uv_disable_stdio_inheritance(void) { + int fd; + + /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the + * first 16 file descriptors. After that, bail out after the first error. + */ + for (fd = 0; ; fd++) + if (uv__cloexec(fd, 1) && fd > 15) + break; +} + + +int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) { + int fd_out; + + switch (handle->type) { + case UV_TCP: + case UV_NAMED_PIPE: + case UV_TTY: + fd_out = uv__stream_fd((uv_stream_t*) handle); + break; + + case UV_UDP: + fd_out = ((uv_udp_t *) handle)->io_watcher.fd; + break; + + case UV_POLL: + fd_out = ((uv_poll_t *) handle)->io_watcher.fd; + break; + + default: + return -EINVAL; + } + + if (uv__is_closing(handle) || fd_out == -1) + return -EBADF; + + *fd = fd_out; + return 0; +} + + +static int uv__run_pending(uv_loop_t* loop) { + QUEUE* q; + QUEUE pq; + uv__io_t* w; + + if (QUEUE_EMPTY(&loop->pending_queue)) + return 0; + + QUEUE_MOVE(&loop->pending_queue, &pq); + + while (!QUEUE_EMPTY(&pq)) { + q = QUEUE_HEAD(&pq); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + w = QUEUE_DATA(q, uv__io_t, pending_queue); + w->cb(loop, w, POLLOUT); + } + + return 1; +} + + +static unsigned int next_power_of_two(unsigned int val) { + val -= 1; + val |= val >> 1; + val |= val >> 2; + val |= val >> 4; + val |= val >> 8; + val |= val >> 16; + val += 1; + return val; +} + +static void maybe_resize(uv_loop_t* loop, unsigned int len) { + uv__io_t** watchers; + void* fake_watcher_list; + void* fake_watcher_count; + unsigned int nwatchers; + unsigned int i; + + if (len <= loop->nwatchers) + return; + + /* Preserve fake watcher list and count at the end of the watchers */ + if (loop->watchers != NULL) { + fake_watcher_list = loop->watchers[loop->nwatchers]; + fake_watcher_count = loop->watchers[loop->nwatchers + 1]; + } else { + fake_watcher_list = NULL; + fake_watcher_count = NULL; + } + + nwatchers = next_power_of_two(len + 2) - 2; + watchers = uv__realloc(loop->watchers, + (nwatchers + 2) * sizeof(loop->watchers[0])); + + if (watchers == NULL) + abort(); + for (i = loop->nwatchers; i < nwatchers; i++) + watchers[i] = NULL; + watchers[nwatchers] = fake_watcher_list; + watchers[nwatchers + 1] = fake_watcher_count; + + loop->watchers = watchers; + loop->nwatchers = nwatchers; +} + + +void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) { + assert(cb != NULL); + assert(fd >= -1); + QUEUE_INIT(&w->pending_queue); + QUEUE_INIT(&w->watcher_queue); + w->cb = cb; + w->fd = fd; + w->events = 0; + w->pevents = 0; + +#if defined(UV_HAVE_KQUEUE) + w->rcount = 0; + w->wcount = 0; +#endif /* defined(UV_HAVE_KQUEUE) */ +} + + +void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP))); + assert(0 != events); + assert(w->fd >= 0); + assert(w->fd < INT_MAX); + + w->pevents |= events; + maybe_resize(loop, w->fd + 1); + +#if !defined(__sun) + /* The event ports backend needs to rearm all file descriptors on each and + * every tick of the event loop but the other backends allow us to + * short-circuit here if the event mask is unchanged. + */ + if (w->events == w->pevents) { + if (w->events == 0 && !QUEUE_EMPTY(&w->watcher_queue)) { + QUEUE_REMOVE(&w->watcher_queue); + QUEUE_INIT(&w->watcher_queue); + } + return; + } +#endif + + if (QUEUE_EMPTY(&w->watcher_queue)) + QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); + + if (loop->watchers[w->fd] == NULL) { + loop->watchers[w->fd] = w; + loop->nfds++; + } +} + + +void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP))); + assert(0 != events); + + if (w->fd == -1) + return; + + assert(w->fd >= 0); + + /* Happens when uv__io_stop() is called on a handle that was never started. */ + if ((unsigned) w->fd >= loop->nwatchers) + return; + + w->pevents &= ~events; + + if (w->pevents == 0) { + QUEUE_REMOVE(&w->watcher_queue); + QUEUE_INIT(&w->watcher_queue); + + if (loop->watchers[w->fd] != NULL) { + assert(loop->watchers[w->fd] == w); + assert(loop->nfds > 0); + loop->watchers[w->fd] = NULL; + loop->nfds--; + w->events = 0; + } + } + else if (QUEUE_EMPTY(&w->watcher_queue)) + QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); +} + + +void uv__io_close(uv_loop_t* loop, uv__io_t* w) { + uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP); + QUEUE_REMOVE(&w->pending_queue); + + /* Remove stale events for this file descriptor */ + uv__platform_invalidate_fd(loop, w->fd); +} + + +void uv__io_feed(uv_loop_t* loop, uv__io_t* w) { + if (QUEUE_EMPTY(&w->pending_queue)) + QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue); +} + + +int uv__io_active(const uv__io_t* w, unsigned int events) { + assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP))); + assert(0 != events); + return 0 != (w->pevents & events); +} + + +int uv_getrusage(uv_rusage_t* rusage) { + struct rusage usage; + + if (getrusage(RUSAGE_SELF, &usage)) + return -errno; + + rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec; + rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec; + + rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec; + rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec; + +#if !defined(__MVS__) + rusage->ru_maxrss = usage.ru_maxrss; + rusage->ru_ixrss = usage.ru_ixrss; + rusage->ru_idrss = usage.ru_idrss; + rusage->ru_isrss = usage.ru_isrss; + rusage->ru_minflt = usage.ru_minflt; + rusage->ru_majflt = usage.ru_majflt; + rusage->ru_nswap = usage.ru_nswap; + rusage->ru_inblock = usage.ru_inblock; + rusage->ru_oublock = usage.ru_oublock; + rusage->ru_msgsnd = usage.ru_msgsnd; + rusage->ru_msgrcv = usage.ru_msgrcv; + rusage->ru_nsignals = usage.ru_nsignals; + rusage->ru_nvcsw = usage.ru_nvcsw; + rusage->ru_nivcsw = usage.ru_nivcsw; +#endif + + return 0; +} + + +int uv__open_cloexec(const char* path, int flags) { + int err; + int fd; + +#if defined(UV__O_CLOEXEC) + static int no_cloexec; + + if (!no_cloexec) { + fd = open(path, flags | UV__O_CLOEXEC); + if (fd != -1) + return fd; + + if (errno != EINVAL) + return -errno; + + /* O_CLOEXEC not supported. */ + no_cloexec = 1; + } +#endif + + fd = open(path, flags); + if (fd == -1) + return -errno; + + err = uv__cloexec(fd, 1); + if (err) { + uv__close(fd); + return err; + } + + return fd; +} + + +int uv__dup2_cloexec(int oldfd, int newfd) { + int r; +#if defined(__FreeBSD__) && __FreeBSD__ >= 10 + r = dup3(oldfd, newfd, O_CLOEXEC); + if (r == -1) + return -errno; + return r; +#elif defined(__FreeBSD__) && defined(F_DUP2FD_CLOEXEC) + r = fcntl(oldfd, F_DUP2FD_CLOEXEC, newfd); + if (r != -1) + return r; + if (errno != EINVAL) + return -errno; + /* Fall through. */ +#elif defined(__linux__) + static int no_dup3; + if (!no_dup3) { + do + r = uv__dup3(oldfd, newfd, UV__O_CLOEXEC); + while (r == -1 && errno == EBUSY); + if (r != -1) + return r; + if (errno != ENOSYS) + return -errno; + /* Fall through. */ + no_dup3 = 1; + } +#endif + { + int err; + do + r = dup2(oldfd, newfd); +#if defined(__linux__) + while (r == -1 && errno == EBUSY); +#else + while (0); /* Never retry. */ +#endif + + if (r == -1) + return -errno; + + err = uv__cloexec(newfd, 1); + if (err) { + uv__close(newfd); + return err; + } + + return r; + } +} + + +int uv_os_homedir(char* buffer, size_t* size) { + uv_passwd_t pwd; + char* buf; + size_t len; + int r; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + /* Check if the HOME environment variable is set first */ + buf = getenv("HOME"); + + if (buf != NULL) { + len = strlen(buf); + + if (len >= *size) { + *size = len + 1; + return -ENOBUFS; + } + + memcpy(buffer, buf, len + 1); + *size = len; + + return 0; + } + + /* HOME is not set, so call uv__getpwuid_r() */ + r = uv__getpwuid_r(&pwd); + + if (r != 0) { + return r; + } + + len = strlen(pwd.homedir); + + if (len >= *size) { + *size = len + 1; + uv_os_free_passwd(&pwd); + return -ENOBUFS; + } + + memcpy(buffer, pwd.homedir, len + 1); + *size = len; + uv_os_free_passwd(&pwd); + + return 0; +} + + +int uv_os_tmpdir(char* buffer, size_t* size) { + const char* buf; + size_t len; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + +#define CHECK_ENV_VAR(name) \ + do { \ + buf = getenv(name); \ + if (buf != NULL) \ + goto return_buffer; \ + } \ + while (0) + + /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */ + CHECK_ENV_VAR("TMPDIR"); + CHECK_ENV_VAR("TMP"); + CHECK_ENV_VAR("TEMP"); + CHECK_ENV_VAR("TEMPDIR"); + +#undef CHECK_ENV_VAR + + /* No temp environment variables defined */ + #if defined(__ANDROID__) + buf = "/data/local/tmp"; + #else + buf = "/tmp"; + #endif + +return_buffer: + len = strlen(buf); + + if (len >= *size) { + *size = len + 1; + return -ENOBUFS; + } + + /* The returned directory should not have a trailing slash. */ + if (len > 1 && buf[len - 1] == '/') { + len--; + } + + memcpy(buffer, buf, len + 1); + buffer[len] = '\0'; + *size = len; + + return 0; +} + + +int uv__getpwuid_r(uv_passwd_t* pwd) { + struct passwd pw; + struct passwd* result; + char* buf; + uid_t uid; + size_t bufsize; + size_t name_size; + size_t homedir_size; + size_t shell_size; + long initsize; + int r; +#if defined(__ANDROID_API__) && __ANDROID_API__ < 21 + int (*getpwuid_r)(uid_t, struct passwd*, char*, size_t, struct passwd**); + + getpwuid_r = dlsym(RTLD_DEFAULT, "getpwuid_r"); + if (getpwuid_r == NULL) + return -ENOSYS; +#endif + + if (pwd == NULL) + return -EINVAL; + + initsize = sysconf(_SC_GETPW_R_SIZE_MAX); + + if (initsize <= 0) + bufsize = 4096; + else + bufsize = (size_t) initsize; + + uid = geteuid(); + buf = NULL; + + for (;;) { + uv__free(buf); + buf = uv__malloc(bufsize); + + if (buf == NULL) + return -ENOMEM; + + r = getpwuid_r(uid, &pw, buf, bufsize, &result); + + if (r != ERANGE) + break; + + bufsize *= 2; + } + + if (r != 0) { + uv__free(buf); + return -r; + } + + if (result == NULL) { + uv__free(buf); + return -ENOENT; + } + + /* Allocate memory for the username, shell, and home directory */ + name_size = strlen(pw.pw_name) + 1; + homedir_size = strlen(pw.pw_dir) + 1; + shell_size = strlen(pw.pw_shell) + 1; + pwd->username = uv__malloc(name_size + homedir_size + shell_size); + + if (pwd->username == NULL) { + uv__free(buf); + return -ENOMEM; + } + + /* Copy the username */ + memcpy(pwd->username, pw.pw_name, name_size); + + /* Copy the home directory */ + pwd->homedir = pwd->username + name_size; + memcpy(pwd->homedir, pw.pw_dir, homedir_size); + + /* Copy the shell */ + pwd->shell = pwd->homedir + homedir_size; + memcpy(pwd->shell, pw.pw_shell, shell_size); + + /* Copy the uid and gid */ + pwd->uid = pw.pw_uid; + pwd->gid = pw.pw_gid; + + uv__free(buf); + + return 0; +} + + +void uv_os_free_passwd(uv_passwd_t* pwd) { + if (pwd == NULL) + return; + + /* + The memory for name, shell, and homedir are allocated in a single + uv__malloc() call. The base of the pointer is stored in pwd->username, so + that is the field that needs to be freed. + */ + uv__free(pwd->username); + pwd->username = NULL; + pwd->shell = NULL; + pwd->homedir = NULL; +} + + +int uv_os_get_passwd(uv_passwd_t* pwd) { + return uv__getpwuid_r(pwd); +} + + +int uv_translate_sys_error(int sys_errno) { + /* If < 0 then it's already a libuv error. */ + return sys_errno <= 0 ? sys_errno : -sys_errno; +} diff --git a/deps/libuv/src/unix/darwin-proctitle.c b/deps/libuv/src/unix/darwin-proctitle.c new file mode 100644 index 00000000..11423116 --- /dev/null +++ b/deps/libuv/src/unix/darwin-proctitle.c @@ -0,0 +1,206 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include + +#if !TARGET_OS_IPHONE +# include +# include +#endif + + +static int uv__pthread_setname_np(const char* name) { + int (*dynamic_pthread_setname_np)(const char* name); + char namebuf[64]; /* MAXTHREADNAMESIZE */ + int err; + + /* pthread_setname_np() first appeared in OS X 10.6 and iOS 3.2. */ + *(void **)(&dynamic_pthread_setname_np) = + dlsym(RTLD_DEFAULT, "pthread_setname_np"); + + if (dynamic_pthread_setname_np == NULL) + return -ENOSYS; + + strncpy(namebuf, name, sizeof(namebuf) - 1); + namebuf[sizeof(namebuf) - 1] = '\0'; + + err = dynamic_pthread_setname_np(namebuf); + if (err) + return -err; + + return 0; +} + + +int uv__set_process_title(const char* title) { +#if TARGET_OS_IPHONE + return uv__pthread_setname_np(title); +#else + CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef, + const char*, + CFStringEncoding); + CFBundleRef (*pCFBundleGetBundleWithIdentifier)(CFStringRef); + void *(*pCFBundleGetDataPointerForName)(CFBundleRef, CFStringRef); + void *(*pCFBundleGetFunctionPointerForName)(CFBundleRef, CFStringRef); + CFTypeRef (*pLSGetCurrentApplicationASN)(void); + OSStatus (*pLSSetApplicationInformationItem)(int, + CFTypeRef, + CFStringRef, + CFStringRef, + CFDictionaryRef*); + void* application_services_handle; + void* core_foundation_handle; + CFBundleRef launch_services_bundle; + CFStringRef* display_name_key; + CFDictionaryRef (*pCFBundleGetInfoDictionary)(CFBundleRef); + CFBundleRef (*pCFBundleGetMainBundle)(void); + CFBundleRef hi_services_bundle; + OSStatus (*pSetApplicationIsDaemon)(int); + CFDictionaryRef (*pLSApplicationCheckIn)(int, CFDictionaryRef); + void (*pLSSetApplicationLaunchServicesServerConnectionStatus)(uint64_t, + void*); + CFTypeRef asn; + int err; + + err = -ENOENT; + application_services_handle = dlopen("/System/Library/Frameworks/" + "ApplicationServices.framework/" + "Versions/A/ApplicationServices", + RTLD_LAZY | RTLD_LOCAL); + core_foundation_handle = dlopen("/System/Library/Frameworks/" + "CoreFoundation.framework/" + "Versions/A/CoreFoundation", + RTLD_LAZY | RTLD_LOCAL); + + if (application_services_handle == NULL || core_foundation_handle == NULL) + goto out; + + *(void **)(&pCFStringCreateWithCString) = + dlsym(core_foundation_handle, "CFStringCreateWithCString"); + *(void **)(&pCFBundleGetBundleWithIdentifier) = + dlsym(core_foundation_handle, "CFBundleGetBundleWithIdentifier"); + *(void **)(&pCFBundleGetDataPointerForName) = + dlsym(core_foundation_handle, "CFBundleGetDataPointerForName"); + *(void **)(&pCFBundleGetFunctionPointerForName) = + dlsym(core_foundation_handle, "CFBundleGetFunctionPointerForName"); + + if (pCFStringCreateWithCString == NULL || + pCFBundleGetBundleWithIdentifier == NULL || + pCFBundleGetDataPointerForName == NULL || + pCFBundleGetFunctionPointerForName == NULL) { + goto out; + } + +#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8) + + launch_services_bundle = + pCFBundleGetBundleWithIdentifier(S("com.apple.LaunchServices")); + + if (launch_services_bundle == NULL) + goto out; + + *(void **)(&pLSGetCurrentApplicationASN) = + pCFBundleGetFunctionPointerForName(launch_services_bundle, + S("_LSGetCurrentApplicationASN")); + + if (pLSGetCurrentApplicationASN == NULL) + goto out; + + *(void **)(&pLSSetApplicationInformationItem) = + pCFBundleGetFunctionPointerForName(launch_services_bundle, + S("_LSSetApplicationInformationItem")); + + if (pLSSetApplicationInformationItem == NULL) + goto out; + + display_name_key = pCFBundleGetDataPointerForName(launch_services_bundle, + S("_kLSDisplayNameKey")); + + if (display_name_key == NULL || *display_name_key == NULL) + goto out; + + *(void **)(&pCFBundleGetInfoDictionary) = dlsym(core_foundation_handle, + "CFBundleGetInfoDictionary"); + *(void **)(&pCFBundleGetMainBundle) = dlsym(core_foundation_handle, + "CFBundleGetMainBundle"); + if (pCFBundleGetInfoDictionary == NULL || pCFBundleGetMainBundle == NULL) + goto out; + + /* Black 10.9 magic, to remove (Not responding) mark in Activity Monitor */ + hi_services_bundle = + pCFBundleGetBundleWithIdentifier(S("com.apple.HIServices")); + err = -ENOENT; + if (hi_services_bundle == NULL) + goto out; + + *(void **)(&pSetApplicationIsDaemon) = pCFBundleGetFunctionPointerForName( + hi_services_bundle, + S("SetApplicationIsDaemon")); + *(void **)(&pLSApplicationCheckIn) = pCFBundleGetFunctionPointerForName( + launch_services_bundle, + S("_LSApplicationCheckIn")); + *(void **)(&pLSSetApplicationLaunchServicesServerConnectionStatus) = + pCFBundleGetFunctionPointerForName( + launch_services_bundle, + S("_LSSetApplicationLaunchServicesServerConnectionStatus")); + if (pSetApplicationIsDaemon == NULL || + pLSApplicationCheckIn == NULL || + pLSSetApplicationLaunchServicesServerConnectionStatus == NULL) { + goto out; + } + + if (pSetApplicationIsDaemon(1) != noErr) + goto out; + + pLSSetApplicationLaunchServicesServerConnectionStatus(0, NULL); + + /* Check into process manager?! */ + pLSApplicationCheckIn(-2, + pCFBundleGetInfoDictionary(pCFBundleGetMainBundle())); + + asn = pLSGetCurrentApplicationASN(); + + err = -EINVAL; + if (pLSSetApplicationInformationItem(-2, /* Magic value. */ + asn, + *display_name_key, + S(title), + NULL) != noErr) { + goto out; + } + + uv__pthread_setname_np(title); /* Don't care if it fails. */ + err = 0; + +out: + if (core_foundation_handle != NULL) + dlclose(core_foundation_handle); + + if (application_services_handle != NULL) + dlclose(application_services_handle); + + return err; +#endif /* !TARGET_OS_IPHONE */ +} diff --git a/deps/libuv/src/unix/darwin.c b/deps/libuv/src/unix/darwin.c new file mode 100644 index 00000000..cf95da21 --- /dev/null +++ b/deps/libuv/src/unix/darwin.c @@ -0,0 +1,335 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include /* _NSGetExecutablePath */ +#include +#include +#include /* sysconf */ + + +int uv__platform_loop_init(uv_loop_t* loop) { + loop->cf_state = NULL; + + if (uv__kqueue_init(loop)) + return -errno; + + return 0; +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { + uv__fsevents_loop_delete(loop); +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + static mach_timebase_info_data_t info; + + if ((ACCESS_ONCE(uint32_t, info.numer) == 0 || + ACCESS_ONCE(uint32_t, info.denom) == 0) && + mach_timebase_info(&info) != KERN_SUCCESS) + abort(); + + return mach_absolute_time() * info.numer / info.denom; +} + + +int uv_exepath(char* buffer, size_t* size) { + /* realpath(exepath) may be > PATH_MAX so double it to be on the safe side. */ + char abspath[PATH_MAX * 2 + 1]; + char exepath[PATH_MAX + 1]; + uint32_t exepath_size; + size_t abspath_size; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + exepath_size = sizeof(exepath); + if (_NSGetExecutablePath(exepath, &exepath_size)) + return -EIO; + + if (realpath(exepath, abspath) != abspath) + return -errno; + + abspath_size = strlen(abspath); + if (abspath_size == 0) + return -EIO; + + *size -= 1; + if (*size > abspath_size) + *size = abspath_size; + + memcpy(buffer, abspath, *size); + buffer[*size] = '\0'; + + return 0; +} + + +uint64_t uv_get_free_memory(void) { + vm_statistics_data_t info; + mach_msg_type_number_t count = sizeof(info) / sizeof(integer_t); + + if (host_statistics(mach_host_self(), HOST_VM_INFO, + (host_info_t)&info, &count) != KERN_SUCCESS) { + return -EINVAL; /* FIXME(bnoordhuis) Translate error. */ + } + + return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE); +} + + +uint64_t uv_get_total_memory(void) { + uint64_t info; + int which[] = {CTL_HW, HW_MEMSIZE}; + size_t size = sizeof(info); + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info; +} + + +void uv_loadavg(double avg[3]) { + struct loadavg info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_LOADAVG}; + + if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return; + + avg[0] = (double) info.ldavg[0] / info.fscale; + avg[1] = (double) info.ldavg[1] / info.fscale; + avg[2] = (double) info.ldavg[2] / info.fscale; +} + + +int uv_resident_set_memory(size_t* rss) { + mach_msg_type_number_t count; + task_basic_info_data_t info; + kern_return_t err; + + count = TASK_BASIC_INFO_COUNT; + err = task_info(mach_task_self(), + TASK_BASIC_INFO, + (task_info_t) &info, + &count); + (void) &err; + /* task_info(TASK_BASIC_INFO) cannot really fail. Anything other than + * KERN_SUCCESS implies a libuv bug. + */ + assert(err == KERN_SUCCESS); + *rss = info.resident_size; + + return 0; +} + + +int uv_uptime(double* uptime) { + time_t now; + struct timeval info; + size_t size = sizeof(info); + static int which[] = {CTL_KERN, KERN_BOOTTIME}; + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + now = time(NULL); + *uptime = now - info.tv_sec; + + return 0; +} + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK), + multiplier = ((uint64_t)1000L / ticks); + char model[512]; + uint64_t cpuspeed; + size_t size; + unsigned int i; + natural_t numcpus; + mach_msg_type_number_t msg_type; + processor_cpu_load_info_data_t *info; + uv_cpu_info_t* cpu_info; + + size = sizeof(model); + if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) && + sysctlbyname("hw.model", &model, &size, NULL, 0)) { + return -errno; + } + + size = sizeof(cpuspeed); + if (sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0)) + return -errno; + + if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus, + (processor_info_array_t*)&info, + &msg_type) != KERN_SUCCESS) { + return -EINVAL; /* FIXME(bnoordhuis) Translate error. */ + } + + *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos)); + if (!(*cpu_infos)) { + vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type); + return -ENOMEM; + } + + *count = numcpus; + + for (i = 0; i < numcpus; i++) { + cpu_info = &(*cpu_infos)[i]; + + cpu_info->cpu_times.user = (uint64_t)(info[i].cpu_ticks[0]) * multiplier; + cpu_info->cpu_times.nice = (uint64_t)(info[i].cpu_ticks[3]) * multiplier; + cpu_info->cpu_times.sys = (uint64_t)(info[i].cpu_ticks[1]) * multiplier; + cpu_info->cpu_times.idle = (uint64_t)(info[i].cpu_ticks[2]) * multiplier; + cpu_info->cpu_times.irq = 0; + + cpu_info->model = uv__strdup(model); + cpu_info->speed = cpuspeed/1000000; + } + vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type); + + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_dl *sa_addr; + + if (getifaddrs(&addrs)) + return -errno; + + *count = 0; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family == AF_LINK)) { + continue; + } + + (*count)++; + } + + *addresses = uv__malloc(*count * sizeof(**addresses)); + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + /* + * On Mac OS X getifaddrs returns information related to Mac Addresses for + * various devices, such as firewire, etc. These are not relevant here. + */ + if (ent->ifa_addr->sa_family == AF_LINK) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != AF_LINK)) { + continue; + } + + address = *addresses; + + for (i = 0; i < (*count); i++) { + if (strcmp(address->name, ent->ifa_name) == 0) { + sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} diff --git a/deps/libuv/src/unix/dl.c b/deps/libuv/src/unix/dl.c new file mode 100644 index 00000000..fc1c052b --- /dev/null +++ b/deps/libuv/src/unix/dl.c @@ -0,0 +1,80 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include + +static int uv__dlerror(uv_lib_t* lib); + + +int uv_dlopen(const char* filename, uv_lib_t* lib) { + dlerror(); /* Reset error status. */ + lib->errmsg = NULL; + lib->handle = dlopen(filename, RTLD_LAZY); + return lib->handle ? 0 : uv__dlerror(lib); +} + + +void uv_dlclose(uv_lib_t* lib) { + uv__free(lib->errmsg); + lib->errmsg = NULL; + + if (lib->handle) { + /* Ignore errors. No good way to signal them without leaking memory. */ + dlclose(lib->handle); + lib->handle = NULL; + } +} + + +int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) { + dlerror(); /* Reset error status. */ + *ptr = dlsym(lib->handle, name); + return uv__dlerror(lib); +} + + +const char* uv_dlerror(const uv_lib_t* lib) { + return lib->errmsg ? lib->errmsg : "no error"; +} + + +static int uv__dlerror(uv_lib_t* lib) { + const char* errmsg; + + uv__free(lib->errmsg); + + errmsg = dlerror(); + + if (errmsg) { + lib->errmsg = uv__strdup(errmsg); + return -1; + } + else { + lib->errmsg = NULL; + return 0; + } +} diff --git a/deps/libuv/src/unix/freebsd.c b/deps/libuv/src/unix/freebsd.c new file mode 100644 index 00000000..cba44a3e --- /dev/null +++ b/deps/libuv/src/unix/freebsd.c @@ -0,0 +1,460 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include /* VM_LOADAVG */ +#include +#include +#include /* sysconf */ +#include + +#undef NANOSEC +#define NANOSEC ((uint64_t) 1e9) + +#ifndef CPUSTATES +# define CPUSTATES 5U +#endif +#ifndef CP_USER +# define CP_USER 0 +# define CP_NICE 1 +# define CP_SYS 2 +# define CP_IDLE 3 +# define CP_INTR 4 +#endif + +static char *process_title; + + +int uv__platform_loop_init(uv_loop_t* loop) { + return uv__kqueue_init(loop); +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec); +} + + +#ifdef __DragonFly__ +int uv_exepath(char* buffer, size_t* size) { + char abspath[PATH_MAX * 2 + 1]; + ssize_t abspath_size; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + abspath_size = readlink("/proc/curproc/file", abspath, sizeof(abspath)); + if (abspath_size < 0) + return -errno; + + assert(abspath_size > 0); + *size -= 1; + + if (*size > abspath_size) + *size = abspath_size; + + memcpy(buffer, abspath, *size); + buffer[*size] = '\0'; + + return 0; +} +#else +int uv_exepath(char* buffer, size_t* size) { + char abspath[PATH_MAX * 2 + 1]; + int mib[4]; + size_t abspath_size; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_PATHNAME; + mib[3] = -1; + + abspath_size = sizeof abspath; + if (sysctl(mib, 4, abspath, &abspath_size, NULL, 0)) + return -errno; + + assert(abspath_size > 0); + abspath_size -= 1; + *size -= 1; + + if (*size > abspath_size) + *size = abspath_size; + + memcpy(buffer, abspath, *size); + buffer[*size] = '\0'; + + return 0; +} +#endif + +uint64_t uv_get_free_memory(void) { + int freecount; + size_t size = sizeof(freecount); + + if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0)) + return -errno; + + return (uint64_t) freecount * sysconf(_SC_PAGESIZE); + +} + + +uint64_t uv_get_total_memory(void) { + unsigned long info; + int which[] = {CTL_HW, HW_PHYSMEM}; + + size_t size = sizeof(info); + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info; +} + + +void uv_loadavg(double avg[3]) { + struct loadavg info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_LOADAVG}; + + if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return; + + avg[0] = (double) info.ldavg[0] / info.fscale; + avg[1] = (double) info.ldavg[1] / info.fscale; + avg[2] = (double) info.ldavg[2] / info.fscale; +} + + +char** uv_setup_args(int argc, char** argv) { + process_title = argc ? uv__strdup(argv[0]) : NULL; + return argv; +} + + +int uv_set_process_title(const char* title) { + int oid[4]; + + uv__free(process_title); + process_title = uv__strdup(title); + + oid[0] = CTL_KERN; + oid[1] = KERN_PROC; + oid[2] = KERN_PROC_ARGS; + oid[3] = getpid(); + + sysctl(oid, + ARRAY_SIZE(oid), + NULL, + NULL, + process_title, + strlen(process_title) + 1); + + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + size_t len; + + if (buffer == NULL || size == 0) + return -EINVAL; + + if (process_title) { + len = strlen(process_title) + 1; + + if (size < len) + return -ENOBUFS; + + memcpy(buffer, process_title, len); + } else { + len = 0; + } + + buffer[len] = '\0'; + + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + kvm_t *kd = NULL; + struct kinfo_proc *kinfo = NULL; + pid_t pid; + int nprocs; + size_t page_size = getpagesize(); + + pid = getpid(); + + kd = kvm_open(NULL, _PATH_DEVNULL, NULL, O_RDONLY, "kvm_open"); + if (kd == NULL) goto error; + + kinfo = kvm_getprocs(kd, KERN_PROC_PID, pid, &nprocs); + if (kinfo == NULL) goto error; + +#ifdef __DragonFly__ + *rss = kinfo->kp_vm_rssize * page_size; +#else + *rss = kinfo->ki_rssize * page_size; +#endif + + kvm_close(kd); + + return 0; + +error: + if (kd) kvm_close(kd); + return -EPERM; +} + + +int uv_uptime(double* uptime) { + int r; + struct timespec sp; + r = clock_gettime(CLOCK_MONOTONIC, &sp); + if (r) + return -errno; + + *uptime = sp.tv_sec; + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK), + multiplier = ((uint64_t)1000L / ticks), cpuspeed, maxcpus, + cur = 0; + uv_cpu_info_t* cpu_info; + const char* maxcpus_key; + const char* cptimes_key; + char model[512]; + long* cp_times; + int numcpus; + size_t size; + int i; + +#if defined(__DragonFly__) + /* This is not quite correct but DragonFlyBSD doesn't seem to have anything + * comparable to kern.smp.maxcpus or kern.cp_times (kern.cp_time is a total, + * not per CPU). At least this stops uv_cpu_info() from failing completely. + */ + maxcpus_key = "hw.ncpu"; + cptimes_key = "kern.cp_time"; +#else + maxcpus_key = "kern.smp.maxcpus"; + cptimes_key = "kern.cp_times"; +#endif + + size = sizeof(model); + if (sysctlbyname("hw.model", &model, &size, NULL, 0)) + return -errno; + + size = sizeof(numcpus); + if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0)) + return -errno; + + *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos)); + if (!(*cpu_infos)) + return -ENOMEM; + + *count = numcpus; + + size = sizeof(cpuspeed); + if (sysctlbyname("hw.clockrate", &cpuspeed, &size, NULL, 0)) { + uv__free(*cpu_infos); + return -errno; + } + + /* kern.cp_times on FreeBSD i386 gives an array up to maxcpus instead of + * ncpu. + */ + size = sizeof(maxcpus); + if (sysctlbyname(maxcpus_key, &maxcpus, &size, NULL, 0)) { + uv__free(*cpu_infos); + return -errno; + } + + size = maxcpus * CPUSTATES * sizeof(long); + + cp_times = uv__malloc(size); + if (cp_times == NULL) { + uv__free(*cpu_infos); + return -ENOMEM; + } + + if (sysctlbyname(cptimes_key, cp_times, &size, NULL, 0)) { + uv__free(cp_times); + uv__free(*cpu_infos); + return -errno; + } + + for (i = 0; i < numcpus; i++) { + cpu_info = &(*cpu_infos)[i]; + + cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier; + cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier; + cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier; + cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier; + cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier; + + cpu_info->model = uv__strdup(model); + cpu_info->speed = cpuspeed; + + cur+=CPUSTATES; + } + + uv__free(cp_times); + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_dl *sa_addr; + + if (getifaddrs(&addrs)) + return -errno; + + *count = 0; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family == AF_LINK)) { + continue; + } + + (*count)++; + } + + *addresses = uv__malloc(*count * sizeof(**addresses)); + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + /* + * On FreeBSD getifaddrs returns information related to the raw underlying + * devices. We're not interested in this information yet. + */ + if (ent->ifa_addr->sa_family == AF_LINK) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != AF_LINK)) { + continue; + } + + address = *addresses; + + for (i = 0; i < (*count); i++) { + if (strcmp(address->name, ent->ifa_name) == 0) { + sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} diff --git a/deps/libuv/src/unix/fs.c b/deps/libuv/src/unix/fs.c new file mode 100644 index 00000000..3d478b79 --- /dev/null +++ b/deps/libuv/src/unix/fs.c @@ -0,0 +1,1364 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* Caveat emptor: this file deviates from the libuv convention of returning + * negated errno codes. Most uv_fs_*() functions map directly to the system + * call of the same name. For more complex wrappers, it's easier to just + * return -1 with errno set. The dispatcher in uv__fs_work() takes care of + * getting the errno to the right place (req->result or as the return value.) + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include +#include /* PATH_MAX */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel_) || \ + defined(__OpenBSD__) || \ + defined(__NetBSD__) +# define HAVE_PREADV 1 +#else +# define HAVE_PREADV 0 +#endif + +#if defined(__linux__) || defined(__sun) +# include +#endif + +#define INIT(subtype) \ + do { \ + req->type = UV_FS; \ + if (cb != NULL) \ + uv__req_init(loop, req, UV_FS); \ + req->fs_type = UV_FS_ ## subtype; \ + req->result = 0; \ + req->ptr = NULL; \ + req->loop = loop; \ + req->path = NULL; \ + req->new_path = NULL; \ + req->cb = cb; \ + } \ + while (0) + +#define PATH \ + do { \ + assert(path != NULL); \ + if (cb == NULL) { \ + req->path = path; \ + } else { \ + req->path = uv__strdup(path); \ + if (req->path == NULL) { \ + uv__req_unregister(loop, req); \ + return -ENOMEM; \ + } \ + } \ + } \ + while (0) + +#define PATH2 \ + do { \ + if (cb == NULL) { \ + req->path = path; \ + req->new_path = new_path; \ + } else { \ + size_t path_len; \ + size_t new_path_len; \ + path_len = strlen(path) + 1; \ + new_path_len = strlen(new_path) + 1; \ + req->path = uv__malloc(path_len + new_path_len); \ + if (req->path == NULL) { \ + uv__req_unregister(loop, req); \ + return -ENOMEM; \ + } \ + req->new_path = req->path + path_len; \ + memcpy((void*) req->path, path, path_len); \ + memcpy((void*) req->new_path, new_path, new_path_len); \ + } \ + } \ + while (0) + +#define POST \ + do { \ + if (cb != NULL) { \ + uv__work_submit(loop, &req->work_req, uv__fs_work, uv__fs_done); \ + return 0; \ + } \ + else { \ + uv__fs_work(&req->work_req); \ + return req->result; \ + } \ + } \ + while (0) + + +static ssize_t uv__fs_fdatasync(uv_fs_t* req) { +#if defined(__linux__) || defined(__sun) || defined(__NetBSD__) + return fdatasync(req->file); +#elif defined(__APPLE__) && defined(SYS_fdatasync) + return syscall(SYS_fdatasync, req->file); +#else + return fsync(req->file); +#endif +} + + +static ssize_t uv__fs_futime(uv_fs_t* req) { +#if defined(__linux__) + /* utimesat() has nanosecond resolution but we stick to microseconds + * for the sake of consistency with other platforms. + */ + static int no_utimesat; + struct timespec ts[2]; + struct timeval tv[2]; + char path[sizeof("/proc/self/fd/") + 3 * sizeof(int)]; + int r; + + if (no_utimesat) + goto skip; + + ts[0].tv_sec = req->atime; + ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000; + ts[1].tv_sec = req->mtime; + ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000; + + r = uv__utimesat(req->file, NULL, ts, 0); + if (r == 0) + return r; + + if (errno != ENOSYS) + return r; + + no_utimesat = 1; + +skip: + + tv[0].tv_sec = req->atime; + tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000; + tv[1].tv_sec = req->mtime; + tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000; + snprintf(path, sizeof(path), "/proc/self/fd/%d", (int) req->file); + + r = utimes(path, tv); + if (r == 0) + return r; + + switch (errno) { + case ENOENT: + if (fcntl(req->file, F_GETFL) == -1 && errno == EBADF) + break; + /* Fall through. */ + + case EACCES: + case ENOTDIR: + errno = ENOSYS; + break; + } + + return r; + +#elif defined(__APPLE__) \ + || defined(__DragonFly__) \ + || defined(__FreeBSD__) \ + || defined(__FreeBSD_kernel__) \ + || defined(__NetBSD__) \ + || defined(__OpenBSD__) \ + || defined(__sun) + struct timeval tv[2]; + tv[0].tv_sec = req->atime; + tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000; + tv[1].tv_sec = req->mtime; + tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000; +# if defined(__sun) + return futimesat(req->file, NULL, tv); +# else + return futimes(req->file, tv); +# endif +#elif defined(_AIX71) + struct timespec ts[2]; + ts[0].tv_sec = req->atime; + ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000; + ts[1].tv_sec = req->mtime; + ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000; + return futimens(req->file, ts); +#elif defined(__MVS__) + attrib_t atr; + memset(&atr, 0, sizeof(atr)); + atr.att_mtimechg = 1; + atr.att_atimechg = 1; + atr.att_mtime = req->mtime; + atr.att_atime = req->atime; + return __fchattr(req->file, &atr, sizeof(atr)); +#else + errno = ENOSYS; + return -1; +#endif +} + + +static ssize_t uv__fs_mkdtemp(uv_fs_t* req) { + return mkdtemp((char*) req->path) ? 0 : -1; +} + + +static ssize_t uv__fs_open(uv_fs_t* req) { + static int no_cloexec_support; + int r; + + /* Try O_CLOEXEC before entering locks */ + if (no_cloexec_support == 0) { +#ifdef O_CLOEXEC + r = open(req->path, req->flags | O_CLOEXEC, req->mode); + if (r >= 0) + return r; + if (errno != EINVAL) + return r; + no_cloexec_support = 1; +#endif /* O_CLOEXEC */ + } + + if (req->cb != NULL) + uv_rwlock_rdlock(&req->loop->cloexec_lock); + + r = open(req->path, req->flags, req->mode); + + /* In case of failure `uv__cloexec` will leave error in `errno`, + * so it is enough to just set `r` to `-1`. + */ + if (r >= 0 && uv__cloexec(r, 1) != 0) { + r = uv__close(r); + if (r != 0) + abort(); + r = -1; + } + + if (req->cb != NULL) + uv_rwlock_rdunlock(&req->loop->cloexec_lock); + + return r; +} + + +static ssize_t uv__fs_read(uv_fs_t* req) { +#if defined(__linux__) + static int no_preadv; +#endif + ssize_t result; + +#if defined(_AIX) + struct stat buf; + if(fstat(req->file, &buf)) + return -1; + if(S_ISDIR(buf.st_mode)) { + errno = EISDIR; + return -1; + } +#endif /* defined(_AIX) */ + if (req->off < 0) { + if (req->nbufs == 1) + result = read(req->file, req->bufs[0].base, req->bufs[0].len); + else + result = readv(req->file, (struct iovec*) req->bufs, req->nbufs); + } else { + if (req->nbufs == 1) { + result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off); + goto done; + } + +#if HAVE_PREADV + result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); +#else +# if defined(__linux__) + if (no_preadv) retry: +# endif + { + off_t nread; + size_t index; + + nread = 0; + index = 0; + result = 1; + do { + if (req->bufs[index].len > 0) { + result = pread(req->file, + req->bufs[index].base, + req->bufs[index].len, + req->off + nread); + if (result > 0) + nread += result; + } + index++; + } while (index < req->nbufs && result > 0); + if (nread > 0) + result = nread; + } +# if defined(__linux__) + else { + result = uv__preadv(req->file, + (struct iovec*)req->bufs, + req->nbufs, + req->off); + if (result == -1 && errno == ENOSYS) { + no_preadv = 1; + goto retry; + } + } +# endif +#endif + } + +done: + return result; +} + + +#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8) +#define UV_CONST_DIRENT uv__dirent_t +#else +#define UV_CONST_DIRENT const uv__dirent_t +#endif + + +static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) { + return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0; +} + + +static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) { + return strcmp((*a)->d_name, (*b)->d_name); +} + + +static ssize_t uv__fs_scandir(uv_fs_t* req) { + uv__dirent_t **dents; + int saved_errno; + int n; + + dents = NULL; + n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort); + + /* NOTE: We will use nbufs as an index field */ + req->nbufs = 0; + + if (n == 0) + goto out; /* osx still needs to deallocate some memory */ + else if (n == -1) + return n; + + req->ptr = dents; + + return n; + +out: + saved_errno = errno; + if (dents != NULL) { + int i; + + /* Memory was allocated using the system allocator, so use free() here. */ + for (i = 0; i < n; i++) + free(dents[i]); + free(dents); + } + errno = saved_errno; + + req->ptr = NULL; + + return n; +} + + +static ssize_t uv__fs_pathmax_size(const char* path) { + ssize_t pathmax; + + pathmax = pathconf(path, _PC_PATH_MAX); + + if (pathmax == -1) { +#if defined(PATH_MAX) + return PATH_MAX; +#else +#error "PATH_MAX undefined in the current platform" +#endif + } + + return pathmax; +} + +static ssize_t uv__fs_readlink(uv_fs_t* req) { + ssize_t len; + char* buf; + + len = uv__fs_pathmax_size(req->path); + buf = uv__malloc(len + 1); + + if (buf == NULL) { + errno = ENOMEM; + return -1; + } + + len = readlink(req->path, buf, len); + + if (len == -1) { + uv__free(buf); + return -1; + } + + buf[len] = '\0'; + req->ptr = buf; + + return 0; +} + +static ssize_t uv__fs_realpath(uv_fs_t* req) { + ssize_t len; + char* buf; + + len = uv__fs_pathmax_size(req->path); + buf = uv__malloc(len + 1); + + if (buf == NULL) { + errno = ENOMEM; + return -1; + } + + if (realpath(req->path, buf) == NULL) { + uv__free(buf); + return -1; + } + + req->ptr = buf; + + return 0; +} + +static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) { + struct pollfd pfd; + int use_pread; + off_t offset; + ssize_t nsent; + ssize_t nread; + ssize_t nwritten; + size_t buflen; + size_t len; + ssize_t n; + int in_fd; + int out_fd; + char buf[8192]; + + len = req->bufsml[0].len; + in_fd = req->flags; + out_fd = req->file; + offset = req->off; + use_pread = 1; + + /* Here are the rules regarding errors: + * + * 1. Read errors are reported only if nsent==0, otherwise we return nsent. + * The user needs to know that some data has already been sent, to stop + * them from sending it twice. + * + * 2. Write errors are always reported. Write errors are bad because they + * mean data loss: we've read data but now we can't write it out. + * + * We try to use pread() and fall back to regular read() if the source fd + * doesn't support positional reads, for example when it's a pipe fd. + * + * If we get EAGAIN when writing to the target fd, we poll() on it until + * it becomes writable again. + * + * FIXME: If we get a write error when use_pread==1, it should be safe to + * return the number of sent bytes instead of an error because pread() + * is, in theory, idempotent. However, special files in /dev or /proc + * may support pread() but not necessarily return the same data on + * successive reads. + * + * FIXME: There is no way now to signal that we managed to send *some* data + * before a write error. + */ + for (nsent = 0; (size_t) nsent < len; ) { + buflen = len - nsent; + + if (buflen > sizeof(buf)) + buflen = sizeof(buf); + + do + if (use_pread) + nread = pread(in_fd, buf, buflen, offset); + else + nread = read(in_fd, buf, buflen); + while (nread == -1 && errno == EINTR); + + if (nread == 0) + goto out; + + if (nread == -1) { + if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) { + use_pread = 0; + continue; + } + + if (nsent == 0) + nsent = -1; + + goto out; + } + + for (nwritten = 0; nwritten < nread; ) { + do + n = write(out_fd, buf + nwritten, nread - nwritten); + while (n == -1 && errno == EINTR); + + if (n != -1) { + nwritten += n; + continue; + } + + if (errno != EAGAIN && errno != EWOULDBLOCK) { + nsent = -1; + goto out; + } + + pfd.fd = out_fd; + pfd.events = POLLOUT; + pfd.revents = 0; + + do + n = poll(&pfd, 1, -1); + while (n == -1 && errno == EINTR); + + if (n == -1 || (pfd.revents & ~POLLOUT) != 0) { + errno = EIO; + nsent = -1; + goto out; + } + } + + offset += nread; + nsent += nread; + } + +out: + if (nsent != -1) + req->off = offset; + + return nsent; +} + + +static ssize_t uv__fs_sendfile(uv_fs_t* req) { + int in_fd; + int out_fd; + + in_fd = req->flags; + out_fd = req->file; + +#if defined(__linux__) || defined(__sun) + { + off_t off; + ssize_t r; + + off = req->off; + r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len); + + /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but + * it still writes out data. Fortunately, we can detect it by checking if + * the offset has been updated. + */ + if (r != -1 || off > req->off) { + r = off - req->off; + req->off = off; + return r; + } + + if (errno == EINVAL || + errno == EIO || + errno == ENOTSOCK || + errno == EXDEV) { + errno = 0; + return uv__fs_sendfile_emul(req); + } + + return -1; + } +#elif defined(__APPLE__) || \ + defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel__) + { + off_t len; + ssize_t r; + + /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in + * non-blocking mode and not all data could be written. If a non-zero + * number of bytes have been sent, we don't consider it an error. + */ + +#if defined(__FreeBSD__) || defined(__DragonFly__) + len = 0; + r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0); +#elif defined(__FreeBSD_kernel__) + len = 0; + r = bsd_sendfile(in_fd, + out_fd, + req->off, + req->bufsml[0].len, + NULL, + &len, + 0); +#else + /* The darwin sendfile takes len as an input for the length to send, + * so make sure to initialize it with the caller's value. */ + len = req->bufsml[0].len; + r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0); +#endif + + /* + * The man page for sendfile(2) on DragonFly states that `len` contains + * a meaningful value ONLY in case of EAGAIN and EINTR. + * Nothing is said about it's value in case of other errors, so better + * not depend on the potential wrong assumption that is was not modified + * by the syscall. + */ + if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) { + req->off += len; + return (ssize_t) len; + } + + if (errno == EINVAL || + errno == EIO || + errno == ENOTSOCK || + errno == EXDEV) { + errno = 0; + return uv__fs_sendfile_emul(req); + } + + return -1; + } +#else + /* Squelch compiler warnings. */ + (void) &in_fd; + (void) &out_fd; + + return uv__fs_sendfile_emul(req); +#endif +} + + +static ssize_t uv__fs_utime(uv_fs_t* req) { + struct utimbuf buf; + buf.actime = req->atime; + buf.modtime = req->mtime; + return utime(req->path, &buf); /* TODO use utimes() where available */ +} + + +static ssize_t uv__fs_write(uv_fs_t* req) { +#if defined(__linux__) + static int no_pwritev; +#endif + ssize_t r; + + /* Serialize writes on OS X, concurrent write() and pwrite() calls result in + * data loss. We can't use a per-file descriptor lock, the descriptor may be + * a dup(). + */ +#if defined(__APPLE__) + static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; + + if (pthread_mutex_lock(&lock)) + abort(); +#endif + + if (req->off < 0) { + if (req->nbufs == 1) + r = write(req->file, req->bufs[0].base, req->bufs[0].len); + else + r = writev(req->file, (struct iovec*) req->bufs, req->nbufs); + } else { + if (req->nbufs == 1) { + r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off); + goto done; + } +#if HAVE_PREADV + r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); +#else +# if defined(__linux__) + if (no_pwritev) retry: +# endif + { + off_t written; + size_t index; + + written = 0; + index = 0; + r = 0; + do { + if (req->bufs[index].len > 0) { + r = pwrite(req->file, + req->bufs[index].base, + req->bufs[index].len, + req->off + written); + if (r > 0) + written += r; + } + index++; + } while (index < req->nbufs && r >= 0); + if (written > 0) + r = written; + } +# if defined(__linux__) + else { + r = uv__pwritev(req->file, + (struct iovec*) req->bufs, + req->nbufs, + req->off); + if (r == -1 && errno == ENOSYS) { + no_pwritev = 1; + goto retry; + } + } +# endif +#endif + } + +done: +#if defined(__APPLE__) + if (pthread_mutex_unlock(&lock)) + abort(); +#endif + + return r; +} + +static void uv__to_stat(struct stat* src, uv_stat_t* dst) { + dst->st_dev = src->st_dev; + dst->st_mode = src->st_mode; + dst->st_nlink = src->st_nlink; + dst->st_uid = src->st_uid; + dst->st_gid = src->st_gid; + dst->st_rdev = src->st_rdev; + dst->st_ino = src->st_ino; + dst->st_size = src->st_size; + dst->st_blksize = src->st_blksize; + dst->st_blocks = src->st_blocks; + +#if defined(__APPLE__) + dst->st_atim.tv_sec = src->st_atimespec.tv_sec; + dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec; + dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec; + dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec; + dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec; + dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec; + dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec; + dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec; + dst->st_flags = src->st_flags; + dst->st_gen = src->st_gen; +#elif defined(__ANDROID__) + dst->st_atim.tv_sec = src->st_atime; + dst->st_atim.tv_nsec = src->st_atimensec; + dst->st_mtim.tv_sec = src->st_mtime; + dst->st_mtim.tv_nsec = src->st_mtimensec; + dst->st_ctim.tv_sec = src->st_ctime; + dst->st_ctim.tv_nsec = src->st_ctimensec; + dst->st_birthtim.tv_sec = src->st_ctime; + dst->st_birthtim.tv_nsec = src->st_ctimensec; + dst->st_flags = 0; + dst->st_gen = 0; +#elif !defined(_AIX) && ( \ + defined(_GNU_SOURCE) || \ + defined(_BSD_SOURCE) || \ + defined(_SVID_SOURCE) || \ + defined(_XOPEN_SOURCE) || \ + defined(_DEFAULT_SOURCE)) + dst->st_atim.tv_sec = src->st_atim.tv_sec; + dst->st_atim.tv_nsec = src->st_atim.tv_nsec; + dst->st_mtim.tv_sec = src->st_mtim.tv_sec; + dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec; + dst->st_ctim.tv_sec = src->st_ctim.tv_sec; + dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec; +# if defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__OpenBSD__) || \ + defined(__NetBSD__) + dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec; + dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec; + dst->st_flags = src->st_flags; + dst->st_gen = src->st_gen; +# else + dst->st_birthtim.tv_sec = src->st_ctim.tv_sec; + dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec; + dst->st_flags = 0; + dst->st_gen = 0; +# endif +#else + dst->st_atim.tv_sec = src->st_atime; + dst->st_atim.tv_nsec = 0; + dst->st_mtim.tv_sec = src->st_mtime; + dst->st_mtim.tv_nsec = 0; + dst->st_ctim.tv_sec = src->st_ctime; + dst->st_ctim.tv_nsec = 0; + dst->st_birthtim.tv_sec = src->st_ctime; + dst->st_birthtim.tv_nsec = 0; + dst->st_flags = 0; + dst->st_gen = 0; +#endif +} + + +static int uv__fs_stat(const char *path, uv_stat_t *buf) { + struct stat pbuf; + int ret; + + ret = stat(path, &pbuf); + if (ret == 0) + uv__to_stat(&pbuf, buf); + + return ret; +} + + +static int uv__fs_lstat(const char *path, uv_stat_t *buf) { + struct stat pbuf; + int ret; + + ret = lstat(path, &pbuf); + if (ret == 0) + uv__to_stat(&pbuf, buf); + + return ret; +} + + +static int uv__fs_fstat(int fd, uv_stat_t *buf) { + struct stat pbuf; + int ret; + + ret = fstat(fd, &pbuf); + if (ret == 0) + uv__to_stat(&pbuf, buf); + + return ret; +} + + +typedef ssize_t (*uv__fs_buf_iter_processor)(uv_fs_t* req); +static ssize_t uv__fs_buf_iter(uv_fs_t* req, uv__fs_buf_iter_processor process) { + unsigned int iovmax; + unsigned int nbufs; + uv_buf_t* bufs; + ssize_t total; + ssize_t result; + + iovmax = uv__getiovmax(); + nbufs = req->nbufs; + bufs = req->bufs; + total = 0; + + while (nbufs > 0) { + req->nbufs = nbufs; + if (req->nbufs > iovmax) + req->nbufs = iovmax; + + result = process(req); + if (result <= 0) { + if (total == 0) + total = result; + break; + } + + if (req->off >= 0) + req->off += result; + + req->bufs += req->nbufs; + nbufs -= req->nbufs; + total += result; + } + + if (errno == EINTR && total == -1) + return total; + + if (bufs != req->bufsml) + uv__free(bufs); + + req->bufs = NULL; + req->nbufs = 0; + + return total; +} + + +static void uv__fs_work(struct uv__work* w) { + int retry_on_eintr; + uv_fs_t* req; + ssize_t r; + + req = container_of(w, uv_fs_t, work_req); + retry_on_eintr = !(req->fs_type == UV_FS_CLOSE); + + do { + errno = 0; + +#define X(type, action) \ + case UV_FS_ ## type: \ + r = action; \ + break; + + switch (req->fs_type) { + X(ACCESS, access(req->path, req->flags)); + X(CHMOD, chmod(req->path, req->mode)); + X(CHOWN, chown(req->path, req->uid, req->gid)); + X(CLOSE, close(req->file)); + X(FCHMOD, fchmod(req->file, req->mode)); + X(FCHOWN, fchown(req->file, req->uid, req->gid)); + X(FDATASYNC, uv__fs_fdatasync(req)); + X(FSTAT, uv__fs_fstat(req->file, &req->statbuf)); + X(FSYNC, fsync(req->file)); + X(FTRUNCATE, ftruncate(req->file, req->off)); + X(FUTIME, uv__fs_futime(req)); + X(LSTAT, uv__fs_lstat(req->path, &req->statbuf)); + X(LINK, link(req->path, req->new_path)); + X(MKDIR, mkdir(req->path, req->mode)); + X(MKDTEMP, uv__fs_mkdtemp(req)); + X(OPEN, uv__fs_open(req)); + X(READ, uv__fs_buf_iter(req, uv__fs_read)); + X(SCANDIR, uv__fs_scandir(req)); + X(READLINK, uv__fs_readlink(req)); + X(REALPATH, uv__fs_realpath(req)); + X(RENAME, rename(req->path, req->new_path)); + X(RMDIR, rmdir(req->path)); + X(SENDFILE, uv__fs_sendfile(req)); + X(STAT, uv__fs_stat(req->path, &req->statbuf)); + X(SYMLINK, symlink(req->path, req->new_path)); + X(UNLINK, unlink(req->path)); + X(UTIME, uv__fs_utime(req)); + X(WRITE, uv__fs_buf_iter(req, uv__fs_write)); + default: abort(); + } +#undef X + } while (r == -1 && errno == EINTR && retry_on_eintr); + + if (r == -1) + req->result = -errno; + else + req->result = r; + + if (r == 0 && (req->fs_type == UV_FS_STAT || + req->fs_type == UV_FS_FSTAT || + req->fs_type == UV_FS_LSTAT)) { + req->ptr = &req->statbuf; + } +} + + +static void uv__fs_done(struct uv__work* w, int status) { + uv_fs_t* req; + + req = container_of(w, uv_fs_t, work_req); + uv__req_unregister(req->loop, req); + + if (status == -ECANCELED) { + assert(req->result == 0); + req->result = -ECANCELED; + } + + req->cb(req); +} + + +int uv_fs_access(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + uv_fs_cb cb) { + INIT(ACCESS); + PATH; + req->flags = flags; + POST; +} + + +int uv_fs_chmod(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int mode, + uv_fs_cb cb) { + INIT(CHMOD); + PATH; + req->mode = mode; + POST; +} + + +int uv_fs_chown(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_uid_t uid, + uv_gid_t gid, + uv_fs_cb cb) { + INIT(CHOWN); + PATH; + req->uid = uid; + req->gid = gid; + POST; +} + + +int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { + INIT(CLOSE); + req->file = file; + POST; +} + + +int uv_fs_fchmod(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + int mode, + uv_fs_cb cb) { + INIT(FCHMOD); + req->file = file; + req->mode = mode; + POST; +} + + +int uv_fs_fchown(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + uv_uid_t uid, + uv_gid_t gid, + uv_fs_cb cb) { + INIT(FCHOWN); + req->file = file; + req->uid = uid; + req->gid = gid; + POST; +} + + +int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { + INIT(FDATASYNC); + req->file = file; + POST; +} + + +int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { + INIT(FSTAT); + req->file = file; + POST; +} + + +int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { + INIT(FSYNC); + req->file = file; + POST; +} + + +int uv_fs_ftruncate(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + int64_t off, + uv_fs_cb cb) { + INIT(FTRUNCATE); + req->file = file; + req->off = off; + POST; +} + + +int uv_fs_futime(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + double atime, + double mtime, + uv_fs_cb cb) { + INIT(FUTIME); + req->file = file; + req->atime = atime; + req->mtime = mtime; + POST; +} + + +int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + INIT(LSTAT); + PATH; + POST; +} + + +int uv_fs_link(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + uv_fs_cb cb) { + INIT(LINK); + PATH2; + POST; +} + + +int uv_fs_mkdir(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int mode, + uv_fs_cb cb) { + INIT(MKDIR); + PATH; + req->mode = mode; + POST; +} + + +int uv_fs_mkdtemp(uv_loop_t* loop, + uv_fs_t* req, + const char* tpl, + uv_fs_cb cb) { + INIT(MKDTEMP); + req->path = uv__strdup(tpl); + if (req->path == NULL) { + if (cb != NULL) + uv__req_unregister(loop, req); + return -ENOMEM; + } + POST; +} + + +int uv_fs_open(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + int mode, + uv_fs_cb cb) { + INIT(OPEN); + PATH; + req->flags = flags; + req->mode = mode; + POST; +} + + +int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, + uv_file file, + const uv_buf_t bufs[], + unsigned int nbufs, + int64_t off, + uv_fs_cb cb) { + if (bufs == NULL || nbufs == 0) + return -EINVAL; + + INIT(READ); + req->file = file; + + req->nbufs = nbufs; + req->bufs = req->bufsml; + if (nbufs > ARRAY_SIZE(req->bufsml)) + req->bufs = uv__malloc(nbufs * sizeof(*bufs)); + + if (req->bufs == NULL) { + if (cb != NULL) + uv__req_unregister(loop, req); + return -ENOMEM; + } + + memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); + + req->off = off; + POST; +} + + +int uv_fs_scandir(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + uv_fs_cb cb) { + INIT(SCANDIR); + PATH; + req->flags = flags; + POST; +} + + +int uv_fs_readlink(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + uv_fs_cb cb) { + INIT(READLINK); + PATH; + POST; +} + + +int uv_fs_realpath(uv_loop_t* loop, + uv_fs_t* req, + const char * path, + uv_fs_cb cb) { + INIT(REALPATH); + PATH; + POST; +} + + +int uv_fs_rename(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + uv_fs_cb cb) { + INIT(RENAME); + PATH2; + POST; +} + + +int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + INIT(RMDIR); + PATH; + POST; +} + + +int uv_fs_sendfile(uv_loop_t* loop, + uv_fs_t* req, + uv_file out_fd, + uv_file in_fd, + int64_t off, + size_t len, + uv_fs_cb cb) { + INIT(SENDFILE); + req->flags = in_fd; /* hack */ + req->file = out_fd; + req->off = off; + req->bufsml[0].len = len; + POST; +} + + +int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + INIT(STAT); + PATH; + POST; +} + + +int uv_fs_symlink(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + const char* new_path, + int flags, + uv_fs_cb cb) { + INIT(SYMLINK); + PATH2; + req->flags = flags; + POST; +} + + +int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + INIT(UNLINK); + PATH; + POST; +} + + +int uv_fs_utime(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + double atime, + double mtime, + uv_fs_cb cb) { + INIT(UTIME); + PATH; + req->atime = atime; + req->mtime = mtime; + POST; +} + + +int uv_fs_write(uv_loop_t* loop, + uv_fs_t* req, + uv_file file, + const uv_buf_t bufs[], + unsigned int nbufs, + int64_t off, + uv_fs_cb cb) { + if (bufs == NULL || nbufs == 0) + return -EINVAL; + + INIT(WRITE); + req->file = file; + + req->nbufs = nbufs; + req->bufs = req->bufsml; + if (nbufs > ARRAY_SIZE(req->bufsml)) + req->bufs = uv__malloc(nbufs * sizeof(*bufs)); + + if (req->bufs == NULL) { + if (cb != NULL) + uv__req_unregister(loop, req); + return -ENOMEM; + } + + memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); + + req->off = off; + POST; +} + + +void uv_fs_req_cleanup(uv_fs_t* req) { + /* Only necessary for asychronous requests, i.e., requests with a callback. + * Synchronous ones don't copy their arguments and have req->path and + * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP is the + * exception to the rule, it always allocates memory. + */ + if (req->path != NULL && (req->cb != NULL || req->fs_type == UV_FS_MKDTEMP)) + uv__free((void*) req->path); /* Memory is shared with req->new_path. */ + + req->path = NULL; + req->new_path = NULL; + + if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL) + uv__fs_scandir_cleanup(req); + + if (req->ptr != &req->statbuf) + uv__free(req->ptr); + req->ptr = NULL; +} diff --git a/deps/libuv/src/unix/fsevents.c b/deps/libuv/src/unix/fsevents.c new file mode 100644 index 00000000..d331a131 --- /dev/null +++ b/deps/libuv/src/unix/fsevents.c @@ -0,0 +1,904 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#if TARGET_OS_IPHONE + +/* iOS (currently) doesn't provide the FSEvents-API (nor CoreServices) */ + +int uv__fsevents_init(uv_fs_event_t* handle) { + return 0; +} + + +int uv__fsevents_close(uv_fs_event_t* handle) { + return 0; +} + + +void uv__fsevents_loop_delete(uv_loop_t* loop) { +} + +#else /* TARGET_OS_IPHONE */ + +#include +#include +#include +#include + +#include +#include + +/* These are macros to avoid "initializer element is not constant" errors + * with old versions of gcc. + */ +#define kFSEventsModified (kFSEventStreamEventFlagItemFinderInfoMod | \ + kFSEventStreamEventFlagItemModified | \ + kFSEventStreamEventFlagItemInodeMetaMod | \ + kFSEventStreamEventFlagItemChangeOwner | \ + kFSEventStreamEventFlagItemXattrMod) + +#define kFSEventsRenamed (kFSEventStreamEventFlagItemCreated | \ + kFSEventStreamEventFlagItemRemoved | \ + kFSEventStreamEventFlagItemRenamed) + +#define kFSEventsSystem (kFSEventStreamEventFlagUserDropped | \ + kFSEventStreamEventFlagKernelDropped | \ + kFSEventStreamEventFlagEventIdsWrapped | \ + kFSEventStreamEventFlagHistoryDone | \ + kFSEventStreamEventFlagMount | \ + kFSEventStreamEventFlagUnmount | \ + kFSEventStreamEventFlagRootChanged) + +typedef struct uv__fsevents_event_s uv__fsevents_event_t; +typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t; +typedef struct uv__cf_loop_state_s uv__cf_loop_state_t; + +enum uv__cf_loop_signal_type_e { + kUVCFLoopSignalRegular, + kUVCFLoopSignalClosing +}; +typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t; + +struct uv__cf_loop_signal_s { + QUEUE member; + uv_fs_event_t* handle; + uv__cf_loop_signal_type_t type; +}; + +struct uv__fsevents_event_s { + QUEUE member; + int events; + char path[1]; +}; + +struct uv__cf_loop_state_s { + CFRunLoopRef loop; + CFRunLoopSourceRef signal_source; + int fsevent_need_reschedule; + FSEventStreamRef fsevent_stream; + uv_sem_t fsevent_sem; + uv_mutex_t fsevent_mutex; + void* fsevent_handles[2]; + unsigned int fsevent_handle_count; +}; + +/* Forward declarations */ +static void uv__cf_loop_cb(void* arg); +static void* uv__cf_loop_runner(void* arg); +static int uv__cf_loop_signal(uv_loop_t* loop, + uv_fs_event_t* handle, + uv__cf_loop_signal_type_t type); + +/* Lazy-loaded by uv__fsevents_global_init(). */ +static CFArrayRef (*pCFArrayCreate)(CFAllocatorRef, + const void**, + CFIndex, + const CFArrayCallBacks*); +static void (*pCFRelease)(CFTypeRef); +static void (*pCFRunLoopAddSource)(CFRunLoopRef, + CFRunLoopSourceRef, + CFStringRef); +static CFRunLoopRef (*pCFRunLoopGetCurrent)(void); +static void (*pCFRunLoopRemoveSource)(CFRunLoopRef, + CFRunLoopSourceRef, + CFStringRef); +static void (*pCFRunLoopRun)(void); +static CFRunLoopSourceRef (*pCFRunLoopSourceCreate)(CFAllocatorRef, + CFIndex, + CFRunLoopSourceContext*); +static void (*pCFRunLoopSourceSignal)(CFRunLoopSourceRef); +static void (*pCFRunLoopStop)(CFRunLoopRef); +static void (*pCFRunLoopWakeUp)(CFRunLoopRef); +static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)( + CFAllocatorRef, + const char*); +static CFStringEncoding (*pCFStringGetSystemEncoding)(void); +static CFStringRef (*pkCFRunLoopDefaultMode); +static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef, + FSEventStreamCallback, + FSEventStreamContext*, + CFArrayRef, + FSEventStreamEventId, + CFTimeInterval, + FSEventStreamCreateFlags); +static void (*pFSEventStreamFlushSync)(FSEventStreamRef); +static void (*pFSEventStreamInvalidate)(FSEventStreamRef); +static void (*pFSEventStreamRelease)(FSEventStreamRef); +static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef, + CFRunLoopRef, + CFStringRef); +static Boolean (*pFSEventStreamStart)(FSEventStreamRef); +static void (*pFSEventStreamStop)(FSEventStreamRef); + +#define UV__FSEVENTS_PROCESS(handle, block) \ + do { \ + QUEUE events; \ + QUEUE* q; \ + uv__fsevents_event_t* event; \ + int err; \ + uv_mutex_lock(&(handle)->cf_mutex); \ + /* Split-off all events and empty original queue */ \ + QUEUE_MOVE(&(handle)->cf_events, &events); \ + /* Get error (if any) and zero original one */ \ + err = (handle)->cf_error; \ + (handle)->cf_error = 0; \ + uv_mutex_unlock(&(handle)->cf_mutex); \ + /* Loop through events, deallocating each after processing */ \ + while (!QUEUE_EMPTY(&events)) { \ + q = QUEUE_HEAD(&events); \ + event = QUEUE_DATA(q, uv__fsevents_event_t, member); \ + QUEUE_REMOVE(q); \ + /* NOTE: Checking uv__is_active() is required here, because handle \ + * callback may close handle and invoking it after it will lead to \ + * incorrect behaviour */ \ + if (!uv__is_closing((handle)) && uv__is_active((handle))) \ + block \ + /* Free allocated data */ \ + uv__free(event); \ + } \ + if (err != 0 && !uv__is_closing((handle)) && uv__is_active((handle))) \ + (handle)->cb((handle), NULL, 0, err); \ + } while (0) + + +/* Runs in UV loop's thread, when there're events to report to handle */ +static void uv__fsevents_cb(uv_async_t* cb) { + uv_fs_event_t* handle; + + handle = cb->data; + + UV__FSEVENTS_PROCESS(handle, { + handle->cb(handle, event->path[0] ? event->path : NULL, event->events, 0); + }); +} + + +/* Runs in CF thread, pushed event into handle's event list */ +static void uv__fsevents_push_event(uv_fs_event_t* handle, + QUEUE* events, + int err) { + assert(events != NULL || err != 0); + uv_mutex_lock(&handle->cf_mutex); + + /* Concatenate two queues */ + if (events != NULL) + QUEUE_ADD(&handle->cf_events, events); + + /* Propagate error */ + if (err != 0) + handle->cf_error = err; + uv_mutex_unlock(&handle->cf_mutex); + + uv_async_send(handle->cf_cb); +} + + +/* Runs in CF thread, when there're events in FSEventStream */ +static void uv__fsevents_event_cb(ConstFSEventStreamRef streamRef, + void* info, + size_t numEvents, + void* eventPaths, + const FSEventStreamEventFlags eventFlags[], + const FSEventStreamEventId eventIds[]) { + size_t i; + int len; + char** paths; + char* path; + char* pos; + uv_fs_event_t* handle; + QUEUE* q; + uv_loop_t* loop; + uv__cf_loop_state_t* state; + uv__fsevents_event_t* event; + QUEUE head; + + loop = info; + state = loop->cf_state; + assert(state != NULL); + paths = eventPaths; + + /* For each handle */ + uv_mutex_lock(&state->fsevent_mutex); + QUEUE_FOREACH(q, &state->fsevent_handles) { + handle = QUEUE_DATA(q, uv_fs_event_t, cf_member); + QUEUE_INIT(&head); + + /* Process and filter out events */ + for (i = 0; i < numEvents; i++) { + /* Ignore system events */ + if (eventFlags[i] & kFSEventsSystem) + continue; + + path = paths[i]; + len = strlen(path); + + /* Filter out paths that are outside handle's request */ + if (strncmp(path, handle->realpath, handle->realpath_len) != 0) + continue; + + if (handle->realpath_len > 1 || *handle->realpath != '/') { + path += handle->realpath_len; + len -= handle->realpath_len; + + /* Skip forward slash */ + if (*path != '\0') { + path++; + len--; + } + } + +#ifdef MAC_OS_X_VERSION_10_7 + /* Ignore events with path equal to directory itself */ + if (len == 0) + continue; +#endif /* MAC_OS_X_VERSION_10_7 */ + + /* Do not emit events from subdirectories (without option set) */ + if ((handle->cf_flags & UV_FS_EVENT_RECURSIVE) == 0 && *path != 0) { + pos = strchr(path + 1, '/'); + if (pos != NULL) + continue; + } + +#ifndef MAC_OS_X_VERSION_10_7 + path = ""; + len = 0; +#endif /* MAC_OS_X_VERSION_10_7 */ + + event = uv__malloc(sizeof(*event) + len); + if (event == NULL) + break; + + memset(event, 0, sizeof(*event)); + memcpy(event->path, path, len + 1); + + if ((eventFlags[i] & kFSEventsModified) != 0 && + (eventFlags[i] & kFSEventsRenamed) == 0) + event->events = UV_CHANGE; + else + event->events = UV_RENAME; + + QUEUE_INSERT_TAIL(&head, &event->member); + } + + if (!QUEUE_EMPTY(&head)) + uv__fsevents_push_event(handle, &head, 0); + } + uv_mutex_unlock(&state->fsevent_mutex); +} + + +/* Runs in CF thread */ +static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) { + uv__cf_loop_state_t* state; + FSEventStreamContext ctx; + FSEventStreamRef ref; + CFAbsoluteTime latency; + FSEventStreamCreateFlags flags; + + /* Initialize context */ + ctx.version = 0; + ctx.info = loop; + ctx.retain = NULL; + ctx.release = NULL; + ctx.copyDescription = NULL; + + latency = 0.05; + + /* Explanation of selected flags: + * 1. NoDefer - without this flag, events that are happening continuously + * (i.e. each event is happening after time interval less than `latency`, + * counted from previous event), will be deferred and passed to callback + * once they'll either fill whole OS buffer, or when this continuous stream + * will stop (i.e. there'll be delay between events, bigger than + * `latency`). + * Specifying this flag will invoke callback after `latency` time passed + * since event. + * 2. FileEvents - fire callback for file changes too (by default it is firing + * it only for directory changes). + */ + flags = kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents; + + /* + * NOTE: It might sound like a good idea to remember last seen StreamEventId, + * but in reality one dir might have last StreamEventId less than, the other, + * that is being watched now. Which will cause FSEventStream API to report + * changes to files from the past. + */ + ref = pFSEventStreamCreate(NULL, + &uv__fsevents_event_cb, + &ctx, + paths, + kFSEventStreamEventIdSinceNow, + latency, + flags); + assert(ref != NULL); + + state = loop->cf_state; + pFSEventStreamScheduleWithRunLoop(ref, + state->loop, + *pkCFRunLoopDefaultMode); + if (!pFSEventStreamStart(ref)) { + pFSEventStreamInvalidate(ref); + pFSEventStreamRelease(ref); + return -EMFILE; + } + + state->fsevent_stream = ref; + return 0; +} + + +/* Runs in CF thread */ +static void uv__fsevents_destroy_stream(uv_loop_t* loop) { + uv__cf_loop_state_t* state; + + state = loop->cf_state; + + if (state->fsevent_stream == NULL) + return; + + /* Flush all accumulated events */ + pFSEventStreamFlushSync(state->fsevent_stream); + + /* Stop emitting events */ + pFSEventStreamStop(state->fsevent_stream); + + /* Release stream */ + pFSEventStreamInvalidate(state->fsevent_stream); + pFSEventStreamRelease(state->fsevent_stream); + state->fsevent_stream = NULL; +} + + +/* Runs in CF thread, when there're new fsevent handles to add to stream */ +static void uv__fsevents_reschedule(uv_fs_event_t* handle, + uv__cf_loop_signal_type_t type) { + uv__cf_loop_state_t* state; + QUEUE* q; + uv_fs_event_t* curr; + CFArrayRef cf_paths; + CFStringRef* paths; + unsigned int i; + int err; + unsigned int path_count; + + state = handle->loop->cf_state; + paths = NULL; + cf_paths = NULL; + err = 0; + /* NOTE: `i` is used in deallocation loop below */ + i = 0; + + /* Optimization to prevent O(n^2) time spent when starting to watch + * many files simultaneously + */ + uv_mutex_lock(&state->fsevent_mutex); + if (state->fsevent_need_reschedule == 0) { + uv_mutex_unlock(&state->fsevent_mutex); + goto final; + } + state->fsevent_need_reschedule = 0; + uv_mutex_unlock(&state->fsevent_mutex); + + /* Destroy previous FSEventStream */ + uv__fsevents_destroy_stream(handle->loop); + + /* Any failure below will be a memory failure */ + err = -ENOMEM; + + /* Create list of all watched paths */ + uv_mutex_lock(&state->fsevent_mutex); + path_count = state->fsevent_handle_count; + if (path_count != 0) { + paths = uv__malloc(sizeof(*paths) * path_count); + if (paths == NULL) { + uv_mutex_unlock(&state->fsevent_mutex); + goto final; + } + + q = &state->fsevent_handles; + for (; i < path_count; i++) { + q = QUEUE_NEXT(q); + assert(q != &state->fsevent_handles); + curr = QUEUE_DATA(q, uv_fs_event_t, cf_member); + + assert(curr->realpath != NULL); + paths[i] = + pCFStringCreateWithFileSystemRepresentation(NULL, curr->realpath); + if (paths[i] == NULL) { + uv_mutex_unlock(&state->fsevent_mutex); + goto final; + } + } + } + uv_mutex_unlock(&state->fsevent_mutex); + err = 0; + + if (path_count != 0) { + /* Create new FSEventStream */ + cf_paths = pCFArrayCreate(NULL, (const void**) paths, path_count, NULL); + if (cf_paths == NULL) { + err = -ENOMEM; + goto final; + } + err = uv__fsevents_create_stream(handle->loop, cf_paths); + } + +final: + /* Deallocate all paths in case of failure */ + if (err != 0) { + if (cf_paths == NULL) { + while (i != 0) + pCFRelease(paths[--i]); + uv__free(paths); + } else { + /* CFArray takes ownership of both strings and original C-array */ + pCFRelease(cf_paths); + } + + /* Broadcast error to all handles */ + uv_mutex_lock(&state->fsevent_mutex); + QUEUE_FOREACH(q, &state->fsevent_handles) { + curr = QUEUE_DATA(q, uv_fs_event_t, cf_member); + uv__fsevents_push_event(curr, NULL, err); + } + uv_mutex_unlock(&state->fsevent_mutex); + } + + /* + * Main thread will block until the removal of handle from the list, + * we must tell it when we're ready. + * + * NOTE: This is coupled with `uv_sem_wait()` in `uv__fsevents_close` + */ + if (type == kUVCFLoopSignalClosing) + uv_sem_post(&state->fsevent_sem); +} + + +static int uv__fsevents_global_init(void) { + static pthread_mutex_t global_init_mutex = PTHREAD_MUTEX_INITIALIZER; + static void* core_foundation_handle; + static void* core_services_handle; + int err; + + err = 0; + pthread_mutex_lock(&global_init_mutex); + if (core_foundation_handle != NULL) + goto out; + + /* The libraries are never unloaded because we currently don't have a good + * mechanism for keeping a reference count. It's unlikely to be an issue + * but if it ever becomes one, we can turn the dynamic library handles into + * per-event loop properties and have the dynamic linker keep track for us. + */ + err = -ENOSYS; + core_foundation_handle = dlopen("/System/Library/Frameworks/" + "CoreFoundation.framework/" + "Versions/A/CoreFoundation", + RTLD_LAZY | RTLD_LOCAL); + if (core_foundation_handle == NULL) + goto out; + + core_services_handle = dlopen("/System/Library/Frameworks/" + "CoreServices.framework/" + "Versions/A/CoreServices", + RTLD_LAZY | RTLD_LOCAL); + if (core_services_handle == NULL) + goto out; + + err = -ENOENT; +#define V(handle, symbol) \ + do { \ + *(void **)(&p ## symbol) = dlsym((handle), #symbol); \ + if (p ## symbol == NULL) \ + goto out; \ + } \ + while (0) + V(core_foundation_handle, CFArrayCreate); + V(core_foundation_handle, CFRelease); + V(core_foundation_handle, CFRunLoopAddSource); + V(core_foundation_handle, CFRunLoopGetCurrent); + V(core_foundation_handle, CFRunLoopRemoveSource); + V(core_foundation_handle, CFRunLoopRun); + V(core_foundation_handle, CFRunLoopSourceCreate); + V(core_foundation_handle, CFRunLoopSourceSignal); + V(core_foundation_handle, CFRunLoopStop); + V(core_foundation_handle, CFRunLoopWakeUp); + V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation); + V(core_foundation_handle, CFStringGetSystemEncoding); + V(core_foundation_handle, kCFRunLoopDefaultMode); + V(core_services_handle, FSEventStreamCreate); + V(core_services_handle, FSEventStreamFlushSync); + V(core_services_handle, FSEventStreamInvalidate); + V(core_services_handle, FSEventStreamRelease); + V(core_services_handle, FSEventStreamScheduleWithRunLoop); + V(core_services_handle, FSEventStreamStart); + V(core_services_handle, FSEventStreamStop); +#undef V + err = 0; + +out: + if (err && core_services_handle != NULL) { + dlclose(core_services_handle); + core_services_handle = NULL; + } + + if (err && core_foundation_handle != NULL) { + dlclose(core_foundation_handle); + core_foundation_handle = NULL; + } + + pthread_mutex_unlock(&global_init_mutex); + return err; +} + + +/* Runs in UV loop */ +static int uv__fsevents_loop_init(uv_loop_t* loop) { + CFRunLoopSourceContext ctx; + uv__cf_loop_state_t* state; + pthread_attr_t attr_storage; + pthread_attr_t* attr; + int err; + + if (loop->cf_state != NULL) + return 0; + + err = uv__fsevents_global_init(); + if (err) + return err; + + state = uv__calloc(1, sizeof(*state)); + if (state == NULL) + return -ENOMEM; + + err = uv_mutex_init(&loop->cf_mutex); + if (err) + goto fail_mutex_init; + + err = uv_sem_init(&loop->cf_sem, 0); + if (err) + goto fail_sem_init; + + QUEUE_INIT(&loop->cf_signals); + + err = uv_sem_init(&state->fsevent_sem, 0); + if (err) + goto fail_fsevent_sem_init; + + err = uv_mutex_init(&state->fsevent_mutex); + if (err) + goto fail_fsevent_mutex_init; + + QUEUE_INIT(&state->fsevent_handles); + state->fsevent_need_reschedule = 0; + state->fsevent_handle_count = 0; + + memset(&ctx, 0, sizeof(ctx)); + ctx.info = loop; + ctx.perform = uv__cf_loop_cb; + state->signal_source = pCFRunLoopSourceCreate(NULL, 0, &ctx); + if (state->signal_source == NULL) { + err = -ENOMEM; + goto fail_signal_source_create; + } + + /* In the unlikely event that pthread_attr_init() fails, create the thread + * with the default stack size. We'll use a little more address space but + * that in itself is not a fatal error. + */ + attr = &attr_storage; + if (pthread_attr_init(attr)) + attr = NULL; + + if (attr != NULL) + if (pthread_attr_setstacksize(attr, 4 * PTHREAD_STACK_MIN)) + abort(); + + loop->cf_state = state; + + /* uv_thread_t is an alias for pthread_t. */ + err = -pthread_create(&loop->cf_thread, attr, uv__cf_loop_runner, loop); + + if (attr != NULL) + pthread_attr_destroy(attr); + + if (err) + goto fail_thread_create; + + /* Synchronize threads */ + uv_sem_wait(&loop->cf_sem); + return 0; + +fail_thread_create: + loop->cf_state = NULL; + +fail_signal_source_create: + uv_mutex_destroy(&state->fsevent_mutex); + +fail_fsevent_mutex_init: + uv_sem_destroy(&state->fsevent_sem); + +fail_fsevent_sem_init: + uv_sem_destroy(&loop->cf_sem); + +fail_sem_init: + uv_mutex_destroy(&loop->cf_mutex); + +fail_mutex_init: + uv__free(state); + return err; +} + + +/* Runs in UV loop */ +void uv__fsevents_loop_delete(uv_loop_t* loop) { + uv__cf_loop_signal_t* s; + uv__cf_loop_state_t* state; + QUEUE* q; + + if (loop->cf_state == NULL) + return; + + if (uv__cf_loop_signal(loop, NULL, kUVCFLoopSignalRegular) != 0) + abort(); + + uv_thread_join(&loop->cf_thread); + uv_sem_destroy(&loop->cf_sem); + uv_mutex_destroy(&loop->cf_mutex); + + /* Free any remaining data */ + while (!QUEUE_EMPTY(&loop->cf_signals)) { + q = QUEUE_HEAD(&loop->cf_signals); + s = QUEUE_DATA(q, uv__cf_loop_signal_t, member); + QUEUE_REMOVE(q); + uv__free(s); + } + + /* Destroy state */ + state = loop->cf_state; + uv_sem_destroy(&state->fsevent_sem); + uv_mutex_destroy(&state->fsevent_mutex); + pCFRelease(state->signal_source); + uv__free(state); + loop->cf_state = NULL; +} + + +/* Runs in CF thread. This is the CF loop's body */ +static void* uv__cf_loop_runner(void* arg) { + uv_loop_t* loop; + uv__cf_loop_state_t* state; + + loop = arg; + state = loop->cf_state; + state->loop = pCFRunLoopGetCurrent(); + + pCFRunLoopAddSource(state->loop, + state->signal_source, + *pkCFRunLoopDefaultMode); + + uv_sem_post(&loop->cf_sem); + + pCFRunLoopRun(); + pCFRunLoopRemoveSource(state->loop, + state->signal_source, + *pkCFRunLoopDefaultMode); + + return NULL; +} + + +/* Runs in CF thread, executed after `uv__cf_loop_signal()` */ +static void uv__cf_loop_cb(void* arg) { + uv_loop_t* loop; + uv__cf_loop_state_t* state; + QUEUE* item; + QUEUE split_head; + uv__cf_loop_signal_t* s; + + loop = arg; + state = loop->cf_state; + + uv_mutex_lock(&loop->cf_mutex); + QUEUE_MOVE(&loop->cf_signals, &split_head); + uv_mutex_unlock(&loop->cf_mutex); + + while (!QUEUE_EMPTY(&split_head)) { + item = QUEUE_HEAD(&split_head); + QUEUE_REMOVE(item); + + s = QUEUE_DATA(item, uv__cf_loop_signal_t, member); + + /* This was a termination signal */ + if (s->handle == NULL) + pCFRunLoopStop(state->loop); + else + uv__fsevents_reschedule(s->handle, s->type); + + uv__free(s); + } +} + + +/* Runs in UV loop to notify CF thread */ +int uv__cf_loop_signal(uv_loop_t* loop, + uv_fs_event_t* handle, + uv__cf_loop_signal_type_t type) { + uv__cf_loop_signal_t* item; + uv__cf_loop_state_t* state; + + item = uv__malloc(sizeof(*item)); + if (item == NULL) + return -ENOMEM; + + item->handle = handle; + item->type = type; + + uv_mutex_lock(&loop->cf_mutex); + QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member); + uv_mutex_unlock(&loop->cf_mutex); + + state = loop->cf_state; + assert(state != NULL); + pCFRunLoopSourceSignal(state->signal_source); + pCFRunLoopWakeUp(state->loop); + + return 0; +} + + +/* Runs in UV loop to initialize handle */ +int uv__fsevents_init(uv_fs_event_t* handle) { + int err; + uv__cf_loop_state_t* state; + + err = uv__fsevents_loop_init(handle->loop); + if (err) + return err; + + /* Get absolute path to file */ + handle->realpath = realpath(handle->path, NULL); + if (handle->realpath == NULL) + return -errno; + handle->realpath_len = strlen(handle->realpath); + + /* Initialize event queue */ + QUEUE_INIT(&handle->cf_events); + handle->cf_error = 0; + + /* + * Events will occur in other thread. + * Initialize callback for getting them back into event loop's thread + */ + handle->cf_cb = uv__malloc(sizeof(*handle->cf_cb)); + if (handle->cf_cb == NULL) { + err = -ENOMEM; + goto fail_cf_cb_malloc; + } + + handle->cf_cb->data = handle; + uv_async_init(handle->loop, handle->cf_cb, uv__fsevents_cb); + handle->cf_cb->flags |= UV__HANDLE_INTERNAL; + uv_unref((uv_handle_t*) handle->cf_cb); + + err = uv_mutex_init(&handle->cf_mutex); + if (err) + goto fail_cf_mutex_init; + + /* Insert handle into the list */ + state = handle->loop->cf_state; + uv_mutex_lock(&state->fsevent_mutex); + QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member); + state->fsevent_handle_count++; + state->fsevent_need_reschedule = 1; + uv_mutex_unlock(&state->fsevent_mutex); + + /* Reschedule FSEventStream */ + assert(handle != NULL); + err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalRegular); + if (err) + goto fail_loop_signal; + + return 0; + +fail_loop_signal: + uv_mutex_destroy(&handle->cf_mutex); + +fail_cf_mutex_init: + uv__free(handle->cf_cb); + handle->cf_cb = NULL; + +fail_cf_cb_malloc: + uv__free(handle->realpath); + handle->realpath = NULL; + handle->realpath_len = 0; + + return err; +} + + +/* Runs in UV loop to de-initialize handle */ +int uv__fsevents_close(uv_fs_event_t* handle) { + int err; + uv__cf_loop_state_t* state; + + if (handle->cf_cb == NULL) + return -EINVAL; + + /* Remove handle from the list */ + state = handle->loop->cf_state; + uv_mutex_lock(&state->fsevent_mutex); + QUEUE_REMOVE(&handle->cf_member); + state->fsevent_handle_count--; + state->fsevent_need_reschedule = 1; + uv_mutex_unlock(&state->fsevent_mutex); + + /* Reschedule FSEventStream */ + assert(handle != NULL); + err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalClosing); + if (err) + return -err; + + /* Wait for deinitialization */ + uv_sem_wait(&state->fsevent_sem); + + uv_close((uv_handle_t*) handle->cf_cb, (uv_close_cb) uv__free); + handle->cf_cb = NULL; + + /* Free data in queue */ + UV__FSEVENTS_PROCESS(handle, { + /* NOP */ + }); + + uv_mutex_destroy(&handle->cf_mutex); + uv__free(handle->realpath); + handle->realpath = NULL; + handle->realpath_len = 0; + + return 0; +} + +#endif /* TARGET_OS_IPHONE */ diff --git a/deps/libuv/src/unix/getaddrinfo.c b/deps/libuv/src/unix/getaddrinfo.c new file mode 100644 index 00000000..2049aea2 --- /dev/null +++ b/deps/libuv/src/unix/getaddrinfo.c @@ -0,0 +1,202 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* Expose glibc-specific EAI_* error codes. Needs to be defined before we + * include any headers. + */ +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif + +#include "uv.h" +#include "internal.h" + +#include +#include /* NULL */ +#include +#include + +/* EAI_* constants. */ +#include + + +int uv__getaddrinfo_translate_error(int sys_err) { + switch (sys_err) { + case 0: return 0; +#if defined(EAI_ADDRFAMILY) + case EAI_ADDRFAMILY: return UV_EAI_ADDRFAMILY; +#endif +#if defined(EAI_AGAIN) + case EAI_AGAIN: return UV_EAI_AGAIN; +#endif +#if defined(EAI_BADFLAGS) + case EAI_BADFLAGS: return UV_EAI_BADFLAGS; +#endif +#if defined(EAI_BADHINTS) + case EAI_BADHINTS: return UV_EAI_BADHINTS; +#endif +#if defined(EAI_CANCELED) + case EAI_CANCELED: return UV_EAI_CANCELED; +#endif +#if defined(EAI_FAIL) + case EAI_FAIL: return UV_EAI_FAIL; +#endif +#if defined(EAI_FAMILY) + case EAI_FAMILY: return UV_EAI_FAMILY; +#endif +#if defined(EAI_MEMORY) + case EAI_MEMORY: return UV_EAI_MEMORY; +#endif +#if defined(EAI_NODATA) + case EAI_NODATA: return UV_EAI_NODATA; +#endif +#if defined(EAI_NONAME) +# if !defined(EAI_NODATA) || EAI_NODATA != EAI_NONAME + case EAI_NONAME: return UV_EAI_NONAME; +# endif +#endif +#if defined(EAI_OVERFLOW) + case EAI_OVERFLOW: return UV_EAI_OVERFLOW; +#endif +#if defined(EAI_PROTOCOL) + case EAI_PROTOCOL: return UV_EAI_PROTOCOL; +#endif +#if defined(EAI_SERVICE) + case EAI_SERVICE: return UV_EAI_SERVICE; +#endif +#if defined(EAI_SOCKTYPE) + case EAI_SOCKTYPE: return UV_EAI_SOCKTYPE; +#endif +#if defined(EAI_SYSTEM) + case EAI_SYSTEM: return -errno; +#endif + } + assert(!"unknown EAI_* error code"); + abort(); + return 0; /* Pacify compiler. */ +} + + +static void uv__getaddrinfo_work(struct uv__work* w) { + uv_getaddrinfo_t* req; + int err; + + req = container_of(w, uv_getaddrinfo_t, work_req); + err = getaddrinfo(req->hostname, req->service, req->hints, &req->addrinfo); + req->retcode = uv__getaddrinfo_translate_error(err); +} + + +static void uv__getaddrinfo_done(struct uv__work* w, int status) { + uv_getaddrinfo_t* req; + + req = container_of(w, uv_getaddrinfo_t, work_req); + uv__req_unregister(req->loop, req); + + /* See initialization in uv_getaddrinfo(). */ + if (req->hints) + uv__free(req->hints); + else if (req->service) + uv__free(req->service); + else if (req->hostname) + uv__free(req->hostname); + else + assert(0); + + req->hints = NULL; + req->service = NULL; + req->hostname = NULL; + + if (status == -ECANCELED) { + assert(req->retcode == 0); + req->retcode = UV_EAI_CANCELED; + } + + if (req->cb) + req->cb(req, req->retcode, req->addrinfo); +} + + +int uv_getaddrinfo(uv_loop_t* loop, + uv_getaddrinfo_t* req, + uv_getaddrinfo_cb cb, + const char* hostname, + const char* service, + const struct addrinfo* hints) { + size_t hostname_len; + size_t service_len; + size_t hints_len; + size_t len; + char* buf; + + if (req == NULL || (hostname == NULL && service == NULL)) + return -EINVAL; + + hostname_len = hostname ? strlen(hostname) + 1 : 0; + service_len = service ? strlen(service) + 1 : 0; + hints_len = hints ? sizeof(*hints) : 0; + buf = uv__malloc(hostname_len + service_len + hints_len); + + if (buf == NULL) + return -ENOMEM; + + uv__req_init(loop, req, UV_GETADDRINFO); + req->loop = loop; + req->cb = cb; + req->addrinfo = NULL; + req->hints = NULL; + req->service = NULL; + req->hostname = NULL; + req->retcode = 0; + + /* order matters, see uv_getaddrinfo_done() */ + len = 0; + + if (hints) { + req->hints = memcpy(buf + len, hints, sizeof(*hints)); + len += sizeof(*hints); + } + + if (service) { + req->service = memcpy(buf + len, service, service_len); + len += service_len; + } + + if (hostname) + req->hostname = memcpy(buf + len, hostname, hostname_len); + + if (cb) { + uv__work_submit(loop, + &req->work_req, + uv__getaddrinfo_work, + uv__getaddrinfo_done); + return 0; + } else { + uv__getaddrinfo_work(&req->work_req); + uv__getaddrinfo_done(&req->work_req, 0); + return req->retcode; + } +} + + +void uv_freeaddrinfo(struct addrinfo* ai) { + if (ai) + freeaddrinfo(ai); +} diff --git a/deps/libuv/src/unix/getnameinfo.c b/deps/libuv/src/unix/getnameinfo.c new file mode 100644 index 00000000..daa798a4 --- /dev/null +++ b/deps/libuv/src/unix/getnameinfo.c @@ -0,0 +1,120 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), to +* deal in the Software without restriction, including without limitation the +* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +* sell copies of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +* IN THE SOFTWARE. +*/ + +#include +#include +#include +#include + +#include "uv.h" +#include "internal.h" + + +static void uv__getnameinfo_work(struct uv__work* w) { + uv_getnameinfo_t* req; + int err; + socklen_t salen; + + req = container_of(w, uv_getnameinfo_t, work_req); + + if (req->storage.ss_family == AF_INET) + salen = sizeof(struct sockaddr_in); + else if (req->storage.ss_family == AF_INET6) + salen = sizeof(struct sockaddr_in6); + else + abort(); + + err = getnameinfo((struct sockaddr*) &req->storage, + salen, + req->host, + sizeof(req->host), + req->service, + sizeof(req->service), + req->flags); + req->retcode = uv__getaddrinfo_translate_error(err); +} + +static void uv__getnameinfo_done(struct uv__work* w, int status) { + uv_getnameinfo_t* req; + char* host; + char* service; + + req = container_of(w, uv_getnameinfo_t, work_req); + uv__req_unregister(req->loop, req); + host = service = NULL; + + if (status == -ECANCELED) { + assert(req->retcode == 0); + req->retcode = UV_EAI_CANCELED; + } else if (req->retcode == 0) { + host = req->host; + service = req->service; + } + + if (req->getnameinfo_cb) + req->getnameinfo_cb(req, req->retcode, host, service); +} + +/* +* Entry point for getnameinfo +* return 0 if a callback will be made +* return error code if validation fails +*/ +int uv_getnameinfo(uv_loop_t* loop, + uv_getnameinfo_t* req, + uv_getnameinfo_cb getnameinfo_cb, + const struct sockaddr* addr, + int flags) { + if (req == NULL || addr == NULL) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) { + memcpy(&req->storage, + addr, + sizeof(struct sockaddr_in)); + } else if (addr->sa_family == AF_INET6) { + memcpy(&req->storage, + addr, + sizeof(struct sockaddr_in6)); + } else { + return UV_EINVAL; + } + + uv__req_init(loop, (uv_req_t*)req, UV_GETNAMEINFO); + + req->getnameinfo_cb = getnameinfo_cb; + req->flags = flags; + req->type = UV_GETNAMEINFO; + req->loop = loop; + req->retcode = 0; + + if (getnameinfo_cb) { + uv__work_submit(loop, + &req->work_req, + uv__getnameinfo_work, + uv__getnameinfo_done); + return 0; + } else { + uv__getnameinfo_work(&req->work_req); + uv__getnameinfo_done(&req->work_req, 0); + return req->retcode; + } +} diff --git a/deps/libuv/src/unix/internal.h b/deps/libuv/src/unix/internal.h new file mode 100644 index 00000000..ee76c837 --- /dev/null +++ b/deps/libuv/src/unix/internal.h @@ -0,0 +1,325 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_UNIX_INTERNAL_H_ +#define UV_UNIX_INTERNAL_H_ + +#include "uv-common.h" + +#include +#include /* abort */ +#include /* strrchr */ +#include /* O_CLOEXEC, may be */ +#include + +#if defined(__STRICT_ANSI__) +# define inline __inline +#endif + +#if defined(__linux__) +# include "linux-syscalls.h" +#endif /* __linux__ */ + +#if defined(__sun) +# include +# include +#endif /* __sun */ + +#if defined(_AIX) +# define reqevents events +# define rtnevents revents +# include +#else +# include +#endif /* _AIX */ + +#if defined(__APPLE__) && !TARGET_OS_IPHONE +# include +#endif + +#if defined(__ANDROID__) +int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset); +# ifdef pthread_sigmask +# undef pthread_sigmask +# endif +# define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset) +#endif + +#define ACCESS_ONCE(type, var) \ + (*(volatile type*) &(var)) + +#define ROUND_UP(a, b) \ + ((a) % (b) ? ((a) + (b)) - ((a) % (b)) : (a)) + +#define UNREACHABLE() \ + do { \ + assert(0 && "unreachable code"); \ + abort(); \ + } \ + while (0) + +#define SAVE_ERRNO(block) \ + do { \ + int _saved_errno = errno; \ + do { block; } while (0); \ + errno = _saved_errno; \ + } \ + while (0) + +/* The __clang__ and __INTEL_COMPILER checks are superfluous because they + * define __GNUC__. They are here to convey to you, dear reader, that these + * macros are enabled when compiling with clang or icc. + */ +#if defined(__clang__) || \ + defined(__GNUC__) || \ + defined(__INTEL_COMPILER) || \ + defined(__SUNPRO_C) +# define UV_DESTRUCTOR(declaration) __attribute__((destructor)) declaration +# define UV_UNUSED(declaration) __attribute__((unused)) declaration +#else +# define UV_DESTRUCTOR(declaration) declaration +# define UV_UNUSED(declaration) declaration +#endif + +/* Leans on the fact that, on Linux, POLLRDHUP == EPOLLRDHUP. */ +#ifdef POLLRDHUP +# define UV__POLLRDHUP POLLRDHUP +#else +# define UV__POLLRDHUP 0x2000 +#endif + +#if !defined(O_CLOEXEC) && defined(__FreeBSD__) +/* + * It may be that we are just missing `__POSIX_VISIBLE >= 200809`. + * Try using fixed value const and give up, if it doesn't work + */ +# define O_CLOEXEC 0x00100000 +#endif + +typedef struct uv__stream_queued_fds_s uv__stream_queued_fds_t; + +/* handle flags */ +enum { + UV_CLOSING = 0x01, /* uv_close() called but not finished. */ + UV_CLOSED = 0x02, /* close(2) finished. */ + UV_STREAM_READING = 0x04, /* uv_read_start() called. */ + UV_STREAM_SHUTTING = 0x08, /* uv_shutdown() called but not complete. */ + UV_STREAM_SHUT = 0x10, /* Write side closed. */ + UV_STREAM_READABLE = 0x20, /* The stream is readable */ + UV_STREAM_WRITABLE = 0x40, /* The stream is writable */ + UV_STREAM_BLOCKING = 0x80, /* Synchronous writes. */ + UV_STREAM_READ_PARTIAL = 0x100, /* read(2) read less than requested. */ + UV_STREAM_READ_EOF = 0x200, /* read(2) read EOF. */ + UV_TCP_NODELAY = 0x400, /* Disable Nagle. */ + UV_TCP_KEEPALIVE = 0x800, /* Turn on keep-alive. */ + UV_TCP_SINGLE_ACCEPT = 0x1000, /* Only accept() when idle. */ + UV_HANDLE_IPV6 = 0x10000, /* Handle is bound to a IPv6 socket. */ + UV_UDP_PROCESSING = 0x20000, /* Handle is running the send callback queue. */ + UV_HANDLE_BOUND = 0x40000 /* Handle is bound to an address and port */ +}; + +/* loop flags */ +enum { + UV_LOOP_BLOCK_SIGPROF = 1 +}; + +typedef enum { + UV_CLOCK_PRECISE = 0, /* Use the highest resolution clock available. */ + UV_CLOCK_FAST = 1 /* Use the fastest clock with <= 1ms granularity. */ +} uv_clocktype_t; + +struct uv__stream_queued_fds_s { + unsigned int size; + unsigned int offset; + int fds[1]; +}; + + +#if defined(_AIX) || \ + defined(__APPLE__) || \ + defined(__DragonFly__) || \ + defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel__) || \ + defined(__linux__) +#define uv__cloexec uv__cloexec_ioctl +#define uv__nonblock uv__nonblock_ioctl +#else +#define uv__cloexec uv__cloexec_fcntl +#define uv__nonblock uv__nonblock_fcntl +#endif + +/* core */ +int uv__cloexec_ioctl(int fd, int set); +int uv__cloexec_fcntl(int fd, int set); +int uv__nonblock_ioctl(int fd, int set); +int uv__nonblock_fcntl(int fd, int set); +int uv__close(int fd); +int uv__close_nocheckstdio(int fd); +int uv__socket(int domain, int type, int protocol); +int uv__dup(int fd); +ssize_t uv__recvmsg(int fd, struct msghdr *msg, int flags); +void uv__make_close_pending(uv_handle_t* handle); +int uv__getiovmax(void); + +void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd); +void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events); +void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events); +void uv__io_close(uv_loop_t* loop, uv__io_t* w); +void uv__io_feed(uv_loop_t* loop, uv__io_t* w); +int uv__io_active(const uv__io_t* w, unsigned int events); +int uv__io_check_fd(uv_loop_t* loop, int fd); +void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */ + +/* async */ +void uv__async_send(struct uv__async* wa); +void uv__async_init(struct uv__async* wa); +int uv__async_start(uv_loop_t* loop, struct uv__async* wa, uv__async_cb cb); +void uv__async_stop(uv_loop_t* loop, struct uv__async* wa); + +/* loop */ +void uv__run_idle(uv_loop_t* loop); +void uv__run_check(uv_loop_t* loop); +void uv__run_prepare(uv_loop_t* loop); + +/* stream */ +void uv__stream_init(uv_loop_t* loop, uv_stream_t* stream, + uv_handle_type type); +int uv__stream_open(uv_stream_t*, int fd, int flags); +void uv__stream_destroy(uv_stream_t* stream); +#if defined(__APPLE__) +int uv__stream_try_select(uv_stream_t* stream, int* fd); +#endif /* defined(__APPLE__) */ +void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events); +int uv__accept(int sockfd); +int uv__dup2_cloexec(int oldfd, int newfd); +int uv__open_cloexec(const char* path, int flags); + +/* tcp */ +int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb); +int uv__tcp_nodelay(int fd, int on); +int uv__tcp_keepalive(int fd, int on, unsigned int delay); + +/* pipe */ +int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb); + +/* timer */ +void uv__run_timers(uv_loop_t* loop); +int uv__next_timeout(const uv_loop_t* loop); + +/* signal */ +void uv__signal_close(uv_signal_t* handle); +void uv__signal_global_once_init(void); +void uv__signal_loop_cleanup(uv_loop_t* loop); + +/* platform specific */ +uint64_t uv__hrtime(uv_clocktype_t type); +int uv__kqueue_init(uv_loop_t* loop); +int uv__platform_loop_init(uv_loop_t* loop); +void uv__platform_loop_delete(uv_loop_t* loop); +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd); + +/* various */ +void uv__async_close(uv_async_t* handle); +void uv__check_close(uv_check_t* handle); +void uv__fs_event_close(uv_fs_event_t* handle); +void uv__idle_close(uv_idle_t* handle); +void uv__pipe_close(uv_pipe_t* handle); +void uv__poll_close(uv_poll_t* handle); +void uv__prepare_close(uv_prepare_t* handle); +void uv__process_close(uv_process_t* handle); +void uv__stream_close(uv_stream_t* handle); +void uv__tcp_close(uv_tcp_t* handle); +void uv__timer_close(uv_timer_t* handle); +void uv__udp_close(uv_udp_t* handle); +void uv__udp_finish_close(uv_udp_t* handle); +uv_handle_type uv__handle_type(int fd); +FILE* uv__open_file(const char* path); +int uv__getpwuid_r(uv_passwd_t* pwd); + + +#if defined(__APPLE__) +int uv___stream_fd(const uv_stream_t* handle); +#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle))) +#else +#define uv__stream_fd(handle) ((handle)->io_watcher.fd) +#endif /* defined(__APPLE__) */ + +#ifdef UV__O_NONBLOCK +# define UV__F_NONBLOCK UV__O_NONBLOCK +#else +# define UV__F_NONBLOCK 1 +#endif + +int uv__make_socketpair(int fds[2], int flags); +int uv__make_pipe(int fds[2], int flags); + +#if defined(__APPLE__) + +int uv__fsevents_init(uv_fs_event_t* handle); +int uv__fsevents_close(uv_fs_event_t* handle); +void uv__fsevents_loop_delete(uv_loop_t* loop); + +/* OSX < 10.7 has no file events, polyfill them */ +#ifndef MAC_OS_X_VERSION_10_7 + +static const int kFSEventStreamCreateFlagFileEvents = 0x00000010; +static const int kFSEventStreamEventFlagItemCreated = 0x00000100; +static const int kFSEventStreamEventFlagItemRemoved = 0x00000200; +static const int kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400; +static const int kFSEventStreamEventFlagItemRenamed = 0x00000800; +static const int kFSEventStreamEventFlagItemModified = 0x00001000; +static const int kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000; +static const int kFSEventStreamEventFlagItemChangeOwner = 0x00004000; +static const int kFSEventStreamEventFlagItemXattrMod = 0x00008000; +static const int kFSEventStreamEventFlagItemIsFile = 0x00010000; +static const int kFSEventStreamEventFlagItemIsDir = 0x00020000; +static const int kFSEventStreamEventFlagItemIsSymlink = 0x00040000; + +#endif /* __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070 */ + +#endif /* defined(__APPLE__) */ + +UV_UNUSED(static void uv__req_init(uv_loop_t* loop, + uv_req_t* req, + uv_req_type type)) { + req->type = type; + uv__req_register(loop, req); +} +#define uv__req_init(loop, req, type) \ + uv__req_init((loop), (uv_req_t*)(req), (type)) + +UV_UNUSED(static void uv__update_time(uv_loop_t* loop)) { + /* Use a fast time source if available. We only need millisecond precision. + */ + loop->time = uv__hrtime(UV_CLOCK_FAST) / 1000000; +} + +UV_UNUSED(static char* uv__basename_r(const char* path)) { + char* s; + + s = strrchr(path, '/'); + if (s == NULL) + return (char*) path; + + return s + 1; +} + +#endif /* UV_UNIX_INTERNAL_H_ */ diff --git a/deps/libuv/src/unix/kqueue.c b/deps/libuv/src/unix/kqueue.c new file mode 100644 index 00000000..fffd4626 --- /dev/null +++ b/deps/libuv/src/unix/kqueue.c @@ -0,0 +1,463 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags); + + +int uv__kqueue_init(uv_loop_t* loop) { + loop->backend_fd = kqueue(); + if (loop->backend_fd == -1) + return -errno; + + uv__cloexec(loop->backend_fd, 1); + + return 0; +} + + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + struct kevent ev; + int rc; + + rc = 0; + EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0); + if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL)) + rc = -errno; + + EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0); + if (rc == 0) + if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL)) + abort(); + + return rc; +} + + +void uv__io_poll(uv_loop_t* loop, int timeout) { + struct kevent events[1024]; + struct kevent* ev; + struct timespec spec; + unsigned int nevents; + unsigned int revents; + QUEUE* q; + uv__io_t* w; + sigset_t* pset; + sigset_t set; + uint64_t base; + uint64_t diff; + int have_signals; + int filter; + int fflags; + int count; + int nfds; + int fd; + int op; + int i; + + if (loop->nfds == 0) { + assert(QUEUE_EMPTY(&loop->watcher_queue)); + return; + } + + nevents = 0; + + while (!QUEUE_EMPTY(&loop->watcher_queue)) { + q = QUEUE_HEAD(&loop->watcher_queue); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + + w = QUEUE_DATA(q, uv__io_t, watcher_queue); + assert(w->pevents != 0); + assert(w->fd >= 0); + assert(w->fd < (int) loop->nwatchers); + + if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) { + filter = EVFILT_READ; + fflags = 0; + op = EV_ADD; + + if (w->cb == uv__fs_event) { + filter = EVFILT_VNODE; + fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME + | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE; + op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */ + } + + EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0); + + if (++nevents == ARRAY_SIZE(events)) { + if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL)) + abort(); + nevents = 0; + } + } + + if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) { + EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0); + + if (++nevents == ARRAY_SIZE(events)) { + if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL)) + abort(); + nevents = 0; + } + } + + w->events = w->pevents; + } + + pset = NULL; + if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { + pset = &set; + sigemptyset(pset); + sigaddset(pset, SIGPROF); + } + + assert(timeout >= -1); + base = loop->time; + count = 48; /* Benchmarks suggest this gives the best throughput. */ + + for (;; nevents = 0) { + if (timeout != -1) { + spec.tv_sec = timeout / 1000; + spec.tv_nsec = (timeout % 1000) * 1000000; + } + + if (pset != NULL) + pthread_sigmask(SIG_BLOCK, pset, NULL); + + nfds = kevent(loop->backend_fd, + events, + nevents, + events, + ARRAY_SIZE(events), + timeout == -1 ? NULL : &spec); + + if (pset != NULL) + pthread_sigmask(SIG_UNBLOCK, pset, NULL); + + /* Update loop->time unconditionally. It's tempting to skip the update when + * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the + * operating system didn't reschedule our process while in the syscall. + */ + SAVE_ERRNO(uv__update_time(loop)); + + if (nfds == 0) { + assert(timeout != -1); + return; + } + + if (nfds == -1) { + if (errno != EINTR) + abort(); + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + + /* Interrupted by a signal. Update timeout and poll again. */ + goto update_timeout; + } + + have_signals = 0; + nevents = 0; + + assert(loop->watchers != NULL); + loop->watchers[loop->nwatchers] = (void*) events; + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; + for (i = 0; i < nfds; i++) { + ev = events + i; + fd = ev->ident; + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (fd == -1) + continue; + w = loop->watchers[fd]; + + if (w == NULL) { + /* File descriptor that we've stopped watching, disarm it. */ + /* TODO batch up */ + struct kevent events[1]; + + EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0); + if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL)) + if (errno != EBADF && errno != ENOENT) + abort(); + + continue; + } + + if (ev->filter == EVFILT_VNODE) { + assert(w->events == POLLIN); + assert(w->pevents == POLLIN); + w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */ + nevents++; + continue; + } + + revents = 0; + + if (ev->filter == EVFILT_READ) { + if (w->pevents & POLLIN) { + revents |= POLLIN; + w->rcount = ev->data; + } else { + /* TODO batch up */ + struct kevent events[1]; + EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0); + if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL)) + if (errno != ENOENT) + abort(); + } + } + + if (ev->filter == EVFILT_WRITE) { + if (w->pevents & POLLOUT) { + revents |= POLLOUT; + w->wcount = ev->data; + } else { + /* TODO batch up */ + struct kevent events[1]; + EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0); + if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL)) + if (errno != ENOENT) + abort(); + } + } + + if (ev->flags & EV_ERROR) + revents |= POLLERR; + + if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP)) + revents |= UV__POLLRDHUP; + + if (revents == 0) + continue; + + /* Run signal watchers last. This also affects child process watchers + * because those are implemented in terms of signal watchers. + */ + if (w == &loop->signal_io_watcher) + have_signals = 1; + else + w->cb(loop, w, revents); + + nevents++; + } + + if (have_signals != 0) + loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); + + loop->watchers[loop->nwatchers] = NULL; + loop->watchers[loop->nwatchers + 1] = NULL; + + if (have_signals != 0) + return; /* Event loop should cycle now so don't poll again. */ + + if (nevents != 0) { + if (nfds == ARRAY_SIZE(events) && --count != 0) { + /* Poll for more events but don't block this time. */ + timeout = 0; + continue; + } + return; + } + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + +update_timeout: + assert(timeout > 0); + + diff = loop->time - base; + if (diff >= (uint64_t) timeout) + return; + + timeout -= diff; + } +} + + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + struct kevent* events; + uintptr_t i; + uintptr_t nfds; + + assert(loop->watchers != NULL); + + events = (struct kevent*) loop->watchers[loop->nwatchers]; + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; + if (events == NULL) + return; + + /* Invalidate events with same file descriptor */ + for (i = 0; i < nfds; i++) + if ((int) events[i].ident == fd) + events[i].ident = -1; +} + + +static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) { + uv_fs_event_t* handle; + struct kevent ev; + int events; + const char* path; +#if defined(F_GETPATH) + /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */ + char pathbuf[MAXPATHLEN]; +#endif + + handle = container_of(w, uv_fs_event_t, event_watcher); + + if (fflags & (NOTE_ATTRIB | NOTE_EXTEND)) + events = UV_CHANGE; + else + events = UV_RENAME; + + path = NULL; +#if defined(F_GETPATH) + /* Also works when the file has been unlinked from the file system. Passing + * in the path when the file has been deleted is arguably a little strange + * but it's consistent with what the inotify backend does. + */ + if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0) + path = uv__basename_r(pathbuf); +#endif + handle->cb(handle, path, events, 0); + + if (handle->event_watcher.fd == -1) + return; + + /* Watcher operates in one-shot mode, re-arm it. */ + fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME + | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE; + + EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0); + + if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL)) + abort(); +} + + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); + return 0; +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* path, + unsigned int flags) { +#if defined(__APPLE__) + struct stat statbuf; +#endif /* defined(__APPLE__) */ + int fd; + + if (uv__is_active(handle)) + return -EINVAL; + + /* TODO open asynchronously - but how do we report back errors? */ + fd = open(path, O_RDONLY); + if (fd == -1) + return -errno; + + uv__handle_start(handle); + uv__io_init(&handle->event_watcher, uv__fs_event, fd); + handle->path = uv__strdup(path); + handle->cb = cb; + +#if defined(__APPLE__) + /* Nullify field to perform checks later */ + handle->cf_cb = NULL; + handle->realpath = NULL; + handle->realpath_len = 0; + handle->cf_flags = flags; + + if (fstat(fd, &statbuf)) + goto fallback; + /* FSEvents works only with directories */ + if (!(statbuf.st_mode & S_IFDIR)) + goto fallback; + + /* The fallback fd is no longer needed */ + uv__close(fd); + handle->event_watcher.fd = -1; + + return uv__fsevents_init(handle); + +fallback: +#endif /* defined(__APPLE__) */ + + uv__io_start(handle->loop, &handle->event_watcher, POLLIN); + + return 0; +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { + if (!uv__is_active(handle)) + return 0; + + uv__handle_stop(handle); + +#if defined(__APPLE__) + if (uv__fsevents_close(handle)) +#endif /* defined(__APPLE__) */ + { + uv__io_close(handle->loop, &handle->event_watcher); + } + + uv__free(handle->path); + handle->path = NULL; + + if (handle->event_watcher.fd != -1) { + /* When FSEvents is used, we don't use the event_watcher's fd under certain + * confitions. (see uv_fs_event_start) */ + uv__close(handle->event_watcher.fd); + handle->event_watcher.fd = -1; + } + + return 0; +} + + +void uv__fs_event_close(uv_fs_event_t* handle) { + uv_fs_event_stop(handle); +} diff --git a/deps/libuv/src/unix/linux-core.c b/deps/libuv/src/unix/linux-core.c new file mode 100644 index 00000000..58dd813d --- /dev/null +++ b/deps/libuv/src/unix/linux-core.c @@ -0,0 +1,985 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their + * EPOLL* counterparts. We use the POLL* variants in this file because that + * is what libuv uses elsewhere and it avoids a dependency on . + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define HAVE_IFADDRS_H 1 + +#ifdef __UCLIBC__ +# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32 +# undef HAVE_IFADDRS_H +# endif +#endif + +#ifdef HAVE_IFADDRS_H +# if defined(__ANDROID__) +# include "android-ifaddrs.h" +# else +# include +# endif +# include +# include +# include +#endif /* HAVE_IFADDRS_H */ + +/* Available from 2.6.32 onwards. */ +#ifndef CLOCK_MONOTONIC_COARSE +# define CLOCK_MONOTONIC_COARSE 6 +#endif + +/* This is rather annoying: CLOCK_BOOTTIME lives in but we can't + * include that file because it conflicts with . We'll just have to + * define it ourselves. + */ +#ifndef CLOCK_BOOTTIME +# define CLOCK_BOOTTIME 7 +#endif + +static int read_models(unsigned int numcpus, uv_cpu_info_t* ci); +static int read_times(FILE* statfile_fp, + unsigned int numcpus, + uv_cpu_info_t* ci); +static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci); +static unsigned long read_cpufreq(unsigned int cpunum); + + +int uv__platform_loop_init(uv_loop_t* loop) { + int fd; + + fd = uv__epoll_create1(UV__EPOLL_CLOEXEC); + + /* epoll_create1() can fail either because it's not implemented (old kernel) + * or because it doesn't understand the EPOLL_CLOEXEC flag. + */ + if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) { + fd = uv__epoll_create(256); + + if (fd != -1) + uv__cloexec(fd, 1); + } + + loop->backend_fd = fd; + loop->inotify_fd = -1; + loop->inotify_watchers = NULL; + + if (fd == -1) + return -errno; + + return 0; +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { + if (loop->inotify_fd == -1) return; + uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN); + uv__close(loop->inotify_fd); + loop->inotify_fd = -1; +} + + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + struct uv__epoll_event* events; + struct uv__epoll_event dummy; + uintptr_t i; + uintptr_t nfds; + + assert(loop->watchers != NULL); + + events = (struct uv__epoll_event*) loop->watchers[loop->nwatchers]; + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; + if (events != NULL) + /* Invalidate events with same file descriptor */ + for (i = 0; i < nfds; i++) + if ((int) events[i].data == fd) + events[i].data = -1; + + /* Remove the file descriptor from the epoll. + * This avoids a problem where the same file description remains open + * in another process, causing repeated junk epoll events. + * + * We pass in a dummy epoll_event, to work around a bug in old kernels. + */ + if (loop->backend_fd >= 0) { + /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that + * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings. + */ + memset(&dummy, 0, sizeof(dummy)); + uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &dummy); + } +} + + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + struct uv__epoll_event e; + int rc; + + e.events = POLLIN; + e.data = -1; + + rc = 0; + if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_ADD, fd, &e)) + if (errno != EEXIST) + rc = -errno; + + if (rc == 0) + if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, &e)) + abort(); + + return rc; +} + + +void uv__io_poll(uv_loop_t* loop, int timeout) { + /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes + * effectively infinite on 32 bits architectures. To avoid blocking + * indefinitely, we cap the timeout and poll again if necessary. + * + * Note that "30 minutes" is a simplification because it depends on + * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200, + * that being the largest value I have seen in the wild (and only once.) + */ + static const int max_safe_timeout = 1789569; + static int no_epoll_pwait; + static int no_epoll_wait; + struct uv__epoll_event events[1024]; + struct uv__epoll_event* pe; + struct uv__epoll_event e; + int real_timeout; + QUEUE* q; + uv__io_t* w; + sigset_t sigset; + uint64_t sigmask; + uint64_t base; + int have_signals; + int nevents; + int count; + int nfds; + int fd; + int op; + int i; + + if (loop->nfds == 0) { + assert(QUEUE_EMPTY(&loop->watcher_queue)); + return; + } + + while (!QUEUE_EMPTY(&loop->watcher_queue)) { + q = QUEUE_HEAD(&loop->watcher_queue); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + + w = QUEUE_DATA(q, uv__io_t, watcher_queue); + assert(w->pevents != 0); + assert(w->fd >= 0); + assert(w->fd < (int) loop->nwatchers); + + e.events = w->pevents; + e.data = w->fd; + + if (w->events == 0) + op = UV__EPOLL_CTL_ADD; + else + op = UV__EPOLL_CTL_MOD; + + /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching + * events, skip the syscall and squelch the events after epoll_wait(). + */ + if (uv__epoll_ctl(loop->backend_fd, op, w->fd, &e)) { + if (errno != EEXIST) + abort(); + + assert(op == UV__EPOLL_CTL_ADD); + + /* We've reactivated a file descriptor that's been watched before. */ + if (uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_MOD, w->fd, &e)) + abort(); + } + + w->events = w->pevents; + } + + sigmask = 0; + if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { + sigemptyset(&sigset); + sigaddset(&sigset, SIGPROF); + sigmask |= 1 << (SIGPROF - 1); + } + + assert(timeout >= -1); + base = loop->time; + count = 48; /* Benchmarks suggest this gives the best throughput. */ + real_timeout = timeout; + + for (;;) { + /* See the comment for max_safe_timeout for an explanation of why + * this is necessary. Executive summary: kernel bug workaround. + */ + if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout) + timeout = max_safe_timeout; + + if (sigmask != 0 && no_epoll_pwait != 0) + if (pthread_sigmask(SIG_BLOCK, &sigset, NULL)) + abort(); + + if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) { + nfds = uv__epoll_pwait(loop->backend_fd, + events, + ARRAY_SIZE(events), + timeout, + sigmask); + if (nfds == -1 && errno == ENOSYS) + no_epoll_pwait = 1; + } else { + nfds = uv__epoll_wait(loop->backend_fd, + events, + ARRAY_SIZE(events), + timeout); + if (nfds == -1 && errno == ENOSYS) + no_epoll_wait = 1; + } + + if (sigmask != 0 && no_epoll_pwait != 0) + if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL)) + abort(); + + /* Update loop->time unconditionally. It's tempting to skip the update when + * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the + * operating system didn't reschedule our process while in the syscall. + */ + SAVE_ERRNO(uv__update_time(loop)); + + if (nfds == 0) { + assert(timeout != -1); + + if (timeout == 0) + return; + + /* We may have been inside the system call for longer than |timeout| + * milliseconds so we need to update the timestamp to avoid drift. + */ + goto update_timeout; + } + + if (nfds == -1) { + if (errno == ENOSYS) { + /* epoll_wait() or epoll_pwait() failed, try the other system call. */ + assert(no_epoll_wait == 0 || no_epoll_pwait == 0); + continue; + } + + if (errno != EINTR) + abort(); + + if (timeout == -1) + continue; + + if (timeout == 0) + return; + + /* Interrupted by a signal. Update timeout and poll again. */ + goto update_timeout; + } + + have_signals = 0; + nevents = 0; + + assert(loop->watchers != NULL); + loop->watchers[loop->nwatchers] = (void*) events; + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; + for (i = 0; i < nfds; i++) { + pe = events + i; + fd = pe->data; + + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (fd == -1) + continue; + + assert(fd >= 0); + assert((unsigned) fd < loop->nwatchers); + + w = loop->watchers[fd]; + + if (w == NULL) { + /* File descriptor that we've stopped watching, disarm it. + * + * Ignore all errors because we may be racing with another thread + * when the file descriptor is closed. + */ + uv__epoll_ctl(loop->backend_fd, UV__EPOLL_CTL_DEL, fd, pe); + continue; + } + + /* Give users only events they're interested in. Prevents spurious + * callbacks when previous callback invocation in this loop has stopped + * the current watcher. Also, filters out events that users has not + * requested us to watch. + */ + pe->events &= w->pevents | POLLERR | POLLHUP; + + /* Work around an epoll quirk where it sometimes reports just the + * EPOLLERR or EPOLLHUP event. In order to force the event loop to + * move forward, we merge in the read/write events that the watcher + * is interested in; uv__read() and uv__write() will then deal with + * the error or hangup in the usual fashion. + * + * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user + * reads the available data, calls uv_read_stop(), then sometime later + * calls uv_read_start() again. By then, libuv has forgotten about the + * hangup and the kernel won't report EPOLLIN again because there's + * nothing left to read. If anything, libuv is to blame here. The + * current hack is just a quick bandaid; to properly fix it, libuv + * needs to remember the error/hangup event. We should get that for + * free when we switch over to edge-triggered I/O. + */ + if (pe->events == POLLERR || pe->events == POLLHUP) + pe->events |= w->pevents & (POLLIN | POLLOUT); + + if (pe->events != 0) { + /* Run signal watchers last. This also affects child process watchers + * because those are implemented in terms of signal watchers. + */ + if (w == &loop->signal_io_watcher) + have_signals = 1; + else + w->cb(loop, w, pe->events); + + nevents++; + } + } + + if (have_signals != 0) + loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); + + loop->watchers[loop->nwatchers] = NULL; + loop->watchers[loop->nwatchers + 1] = NULL; + + if (have_signals != 0) + return; /* Event loop should cycle now so don't poll again. */ + + if (nevents != 0) { + if (nfds == ARRAY_SIZE(events) && --count != 0) { + /* Poll for more events but don't block this time. */ + timeout = 0; + continue; + } + return; + } + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + +update_timeout: + assert(timeout > 0); + + real_timeout -= (loop->time - base); + if (real_timeout <= 0) + return; + + timeout = real_timeout; + } +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + static clock_t fast_clock_id = -1; + struct timespec t; + clock_t clock_id; + + /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has + * millisecond granularity or better. CLOCK_MONOTONIC_COARSE is + * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may + * decide to make a costly system call. + */ + /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE + * when it has microsecond granularity or better (unlikely). + */ + if (type == UV_CLOCK_FAST && fast_clock_id == -1) { + if (clock_getres(CLOCK_MONOTONIC_COARSE, &t) == 0 && + t.tv_nsec <= 1 * 1000 * 1000) { + fast_clock_id = CLOCK_MONOTONIC_COARSE; + } else { + fast_clock_id = CLOCK_MONOTONIC; + } + } + + clock_id = CLOCK_MONOTONIC; + if (type == UV_CLOCK_FAST) + clock_id = fast_clock_id; + + if (clock_gettime(clock_id, &t)) + return 0; /* Not really possible. */ + + return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec; +} + + +void uv_loadavg(double avg[3]) { + struct sysinfo info; + + if (sysinfo(&info) < 0) return; + + avg[0] = (double) info.loads[0] / 65536.0; + avg[1] = (double) info.loads[1] / 65536.0; + avg[2] = (double) info.loads[2] / 65536.0; +} + + +int uv_exepath(char* buffer, size_t* size) { + ssize_t n; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + n = *size - 1; + if (n > 0) + n = readlink("/proc/self/exe", buffer, n); + + if (n == -1) + return -errno; + + buffer[n] = '\0'; + *size = n; + + return 0; +} + + +uint64_t uv_get_free_memory(void) { + struct sysinfo info; + + if (sysinfo(&info) == 0) + return (uint64_t) info.freeram * info.mem_unit; + return 0; +} + + +uint64_t uv_get_total_memory(void) { + struct sysinfo info; + + if (sysinfo(&info) == 0) + return (uint64_t) info.totalram * info.mem_unit; + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + char buf[1024]; + const char* s; + ssize_t n; + long val; + int fd; + int i; + + do + fd = open("/proc/self/stat", O_RDONLY); + while (fd == -1 && errno == EINTR); + + if (fd == -1) + return -errno; + + do + n = read(fd, buf, sizeof(buf) - 1); + while (n == -1 && errno == EINTR); + + uv__close(fd); + if (n == -1) + return -errno; + buf[n] = '\0'; + + s = strchr(buf, ' '); + if (s == NULL) + goto err; + + s += 1; + if (*s != '(') + goto err; + + s = strchr(s, ')'); + if (s == NULL) + goto err; + + for (i = 1; i <= 22; i++) { + s = strchr(s + 1, ' '); + if (s == NULL) + goto err; + } + + errno = 0; + val = strtol(s, NULL, 10); + if (errno != 0) + goto err; + if (val < 0) + goto err; + + *rss = val * getpagesize(); + return 0; + +err: + return -EINVAL; +} + + +int uv_uptime(double* uptime) { + static volatile int no_clock_boottime; + struct timespec now; + int r; + + /* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available + * (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system + * is suspended. + */ + if (no_clock_boottime) { + retry: r = clock_gettime(CLOCK_MONOTONIC, &now); + } + else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) { + no_clock_boottime = 1; + goto retry; + } + + if (r) + return -errno; + + *uptime = now.tv_sec; + return 0; +} + + +static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) { + unsigned int num; + char buf[1024]; + + if (!fgets(buf, sizeof(buf), statfile_fp)) + return -EIO; + + num = 0; + while (fgets(buf, sizeof(buf), statfile_fp)) { + if (strncmp(buf, "cpu", 3)) + break; + num++; + } + + if (num == 0) + return -EIO; + + *numcpus = num; + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + unsigned int numcpus; + uv_cpu_info_t* ci; + int err; + FILE* statfile_fp; + + *cpu_infos = NULL; + *count = 0; + + statfile_fp = uv__open_file("/proc/stat"); + if (statfile_fp == NULL) + return -errno; + + err = uv__cpu_num(statfile_fp, &numcpus); + if (err < 0) + goto out; + + err = -ENOMEM; + ci = uv__calloc(numcpus, sizeof(*ci)); + if (ci == NULL) + goto out; + + err = read_models(numcpus, ci); + if (err == 0) + err = read_times(statfile_fp, numcpus, ci); + + if (err) { + uv_free_cpu_info(ci, numcpus); + goto out; + } + + /* read_models() on x86 also reads the CPU speed from /proc/cpuinfo. + * We don't check for errors here. Worst case, the field is left zero. + */ + if (ci[0].speed == 0) + read_speeds(numcpus, ci); + + *cpu_infos = ci; + *count = numcpus; + err = 0; + +out: + + if (fclose(statfile_fp)) + if (errno != EINTR && errno != EINPROGRESS) + abort(); + + return err; +} + + +static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) { + unsigned int num; + + for (num = 0; num < numcpus; num++) + ci[num].speed = read_cpufreq(num) / 1000; +} + + +/* Also reads the CPU frequency on x86. The other architectures only have + * a BogoMIPS field, which may not be very accurate. + * + * Note: Simply returns on error, uv_cpu_info() takes care of the cleanup. + */ +static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) { + static const char model_marker[] = "model name\t: "; + static const char speed_marker[] = "cpu MHz\t\t: "; + const char* inferred_model; + unsigned int model_idx; + unsigned int speed_idx; + char buf[1024]; + char* model; + FILE* fp; + + /* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */ + (void) &model_marker; + (void) &speed_marker; + (void) &speed_idx; + (void) &model; + (void) &buf; + (void) &fp; + + model_idx = 0; + speed_idx = 0; + +#if defined(__arm__) || \ + defined(__i386__) || \ + defined(__mips__) || \ + defined(__x86_64__) + fp = uv__open_file("/proc/cpuinfo"); + if (fp == NULL) + return -errno; + + while (fgets(buf, sizeof(buf), fp)) { + if (model_idx < numcpus) { + if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) { + model = buf + sizeof(model_marker) - 1; + model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */ + if (model == NULL) { + fclose(fp); + return -ENOMEM; + } + ci[model_idx++].model = model; + continue; + } + } +#if defined(__arm__) || defined(__mips__) + if (model_idx < numcpus) { +#if defined(__arm__) + /* Fallback for pre-3.8 kernels. */ + static const char model_marker[] = "Processor\t: "; +#else /* defined(__mips__) */ + static const char model_marker[] = "cpu model\t\t: "; +#endif + if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) { + model = buf + sizeof(model_marker) - 1; + model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */ + if (model == NULL) { + fclose(fp); + return -ENOMEM; + } + ci[model_idx++].model = model; + continue; + } + } +#else /* !__arm__ && !__mips__ */ + if (speed_idx < numcpus) { + if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) { + ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1); + continue; + } + } +#endif /* __arm__ || __mips__ */ + } + + fclose(fp); +#endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */ + + /* Now we want to make sure that all the models contain *something* because + * it's not safe to leave them as null. Copy the last entry unless there + * isn't one, in that case we simply put "unknown" into everything. + */ + inferred_model = "unknown"; + if (model_idx > 0) + inferred_model = ci[model_idx - 1].model; + + while (model_idx < numcpus) { + model = uv__strndup(inferred_model, strlen(inferred_model)); + if (model == NULL) + return -ENOMEM; + ci[model_idx++].model = model; + } + + return 0; +} + + +static int read_times(FILE* statfile_fp, + unsigned int numcpus, + uv_cpu_info_t* ci) { + unsigned long clock_ticks; + struct uv_cpu_times_s ts; + unsigned long user; + unsigned long nice; + unsigned long sys; + unsigned long idle; + unsigned long dummy; + unsigned long irq; + unsigned int num; + unsigned int len; + char buf[1024]; + + clock_ticks = sysconf(_SC_CLK_TCK); + assert(clock_ticks != (unsigned long) -1); + assert(clock_ticks != 0); + + rewind(statfile_fp); + + if (!fgets(buf, sizeof(buf), statfile_fp)) + abort(); + + num = 0; + + while (fgets(buf, sizeof(buf), statfile_fp)) { + if (num >= numcpus) + break; + + if (strncmp(buf, "cpu", 3)) + break; + + /* skip "cpu " marker */ + { + unsigned int n; + int r = sscanf(buf, "cpu%u ", &n); + assert(r == 1); + (void) r; /* silence build warning */ + for (len = sizeof("cpu0"); n /= 10; len++); + } + + /* Line contains user, nice, system, idle, iowait, irq, softirq, steal, + * guest, guest_nice but we're only interested in the first four + irq. + * + * Don't use %*s to skip fields or %ll to read straight into the uint64_t + * fields, they're not allowed in C89 mode. + */ + if (6 != sscanf(buf + len, + "%lu %lu %lu %lu %lu %lu", + &user, + &nice, + &sys, + &idle, + &dummy, + &irq)) + abort(); + + ts.user = clock_ticks * user; + ts.nice = clock_ticks * nice; + ts.sys = clock_ticks * sys; + ts.idle = clock_ticks * idle; + ts.irq = clock_ticks * irq; + ci[num++].cpu_times = ts; + } + assert(num == numcpus); + + return 0; +} + + +static unsigned long read_cpufreq(unsigned int cpunum) { + unsigned long val; + char buf[1024]; + FILE* fp; + + snprintf(buf, + sizeof(buf), + "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq", + cpunum); + + fp = uv__open_file(buf); + if (fp == NULL) + return 0; + + if (fscanf(fp, "%lu", &val) != 1) + val = 0; + + fclose(fp); + + return val; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, + int* count) { +#ifndef HAVE_IFADDRS_H + return -ENOSYS; +#else + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_ll *sll; + + if (getifaddrs(&addrs)) + return -errno; + + *count = 0; + *addresses = NULL; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family == PF_PACKET)) { + continue; + } + + (*count)++; + } + + if (*count == 0) + return 0; + + *addresses = uv__malloc(*count * sizeof(**addresses)); + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + /* + * On Linux getifaddrs returns information related to the raw underlying + * devices. We're not interested in this information yet. + */ + if (ent->ifa_addr->sa_family == PF_PACKET) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != PF_PACKET)) { + continue; + } + + address = *addresses; + + for (i = 0; i < (*count); i++) { + if (strcmp(address->name, ent->ifa_name) == 0) { + sll = (struct sockaddr_ll*)ent->ifa_addr; + memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +#endif +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} + + +void uv__set_process_title(const char* title) { +#if defined(PR_SET_NAME) + prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */ +#endif +} diff --git a/deps/libuv/src/unix/linux-inotify.c b/deps/libuv/src/unix/linux-inotify.c new file mode 100644 index 00000000..4708c051 --- /dev/null +++ b/deps/libuv/src/unix/linux-inotify.c @@ -0,0 +1,285 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "tree.h" +#include "internal.h" + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct watcher_list { + RB_ENTRY(watcher_list) entry; + QUEUE watchers; + int iterating; + char* path; + int wd; +}; + +struct watcher_root { + struct watcher_list* rbh_root; +}; +#define CAST(p) ((struct watcher_root*)(p)) + + +static int compare_watchers(const struct watcher_list* a, + const struct watcher_list* b) { + if (a->wd < b->wd) return -1; + if (a->wd > b->wd) return 1; + return 0; +} + + +RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers) + + +static void uv__inotify_read(uv_loop_t* loop, + uv__io_t* w, + unsigned int revents); + + +static int new_inotify_fd(void) { + int err; + int fd; + + fd = uv__inotify_init1(UV__IN_NONBLOCK | UV__IN_CLOEXEC); + if (fd != -1) + return fd; + + if (errno != ENOSYS) + return -errno; + + fd = uv__inotify_init(); + if (fd == -1) + return -errno; + + err = uv__cloexec(fd, 1); + if (err == 0) + err = uv__nonblock(fd, 1); + + if (err) { + uv__close(fd); + return err; + } + + return fd; +} + + +static int init_inotify(uv_loop_t* loop) { + int err; + + if (loop->inotify_fd != -1) + return 0; + + err = new_inotify_fd(); + if (err < 0) + return err; + + loop->inotify_fd = err; + uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd); + uv__io_start(loop, &loop->inotify_read_watcher, POLLIN); + + return 0; +} + + +static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) { + struct watcher_list w; + w.wd = wd; + return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w); +} + +static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) { + /* if the watcher_list->watchers is being iterated over, we can't free it. */ + if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) { + /* No watchers left for this path. Clean up. */ + RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w); + uv__inotify_rm_watch(loop->inotify_fd, w->wd); + uv__free(w); + } +} + +static void uv__inotify_read(uv_loop_t* loop, + uv__io_t* dummy, + unsigned int events) { + const struct uv__inotify_event* e; + struct watcher_list* w; + uv_fs_event_t* h; + QUEUE queue; + QUEUE* q; + const char* path; + ssize_t size; + const char *p; + /* needs to be large enough for sizeof(inotify_event) + strlen(path) */ + char buf[4096]; + + while (1) { + do + size = read(loop->inotify_fd, buf, sizeof(buf)); + while (size == -1 && errno == EINTR); + + if (size == -1) { + assert(errno == EAGAIN || errno == EWOULDBLOCK); + break; + } + + assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */ + + /* Now we have one or more inotify_event structs. */ + for (p = buf; p < buf + size; p += sizeof(*e) + e->len) { + e = (const struct uv__inotify_event*)p; + + events = 0; + if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY)) + events |= UV_CHANGE; + if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY)) + events |= UV_RENAME; + + w = find_watcher(loop, e->wd); + if (w == NULL) + continue; /* Stale event, no watchers left. */ + + /* inotify does not return the filename when monitoring a single file + * for modifications. Repurpose the filename for API compatibility. + * I'm not convinced this is a good thing, maybe it should go. + */ + path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path); + + /* We're about to iterate over the queue and call user's callbacks. + * What can go wrong? + * A callback could call uv_fs_event_stop() + * and the queue can change under our feet. + * So, we use QUEUE_MOVE() trick to safely iterate over the queue. + * And we don't free the watcher_list until we're done iterating. + * + * First, + * tell uv_fs_event_stop() (that could be called from a user's callback) + * not to free watcher_list. + */ + w->iterating = 1; + QUEUE_MOVE(&w->watchers, &queue); + while (!QUEUE_EMPTY(&queue)) { + q = QUEUE_HEAD(&queue); + h = QUEUE_DATA(q, uv_fs_event_t, watchers); + + QUEUE_REMOVE(q); + QUEUE_INSERT_TAIL(&w->watchers, q); + + h->cb(h, path, events, 0); + } + /* done iterating, time to (maybe) free empty watcher_list */ + w->iterating = 0; + maybe_free_watcher_list(w, loop); + } + } +} + + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); + return 0; +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* path, + unsigned int flags) { + struct watcher_list* w; + int events; + int err; + int wd; + + if (uv__is_active(handle)) + return -EINVAL; + + err = init_inotify(handle->loop); + if (err) + return err; + + events = UV__IN_ATTRIB + | UV__IN_CREATE + | UV__IN_MODIFY + | UV__IN_DELETE + | UV__IN_DELETE_SELF + | UV__IN_MOVE_SELF + | UV__IN_MOVED_FROM + | UV__IN_MOVED_TO; + + wd = uv__inotify_add_watch(handle->loop->inotify_fd, path, events); + if (wd == -1) + return -errno; + + w = find_watcher(handle->loop, wd); + if (w) + goto no_insert; + + w = uv__malloc(sizeof(*w) + strlen(path) + 1); + if (w == NULL) + return -ENOMEM; + + w->wd = wd; + w->path = strcpy((char*)(w + 1), path); + QUEUE_INIT(&w->watchers); + w->iterating = 0; + RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w); + +no_insert: + uv__handle_start(handle); + QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers); + handle->path = w->path; + handle->cb = cb; + handle->wd = wd; + + return 0; +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { + struct watcher_list* w; + + if (!uv__is_active(handle)) + return 0; + + w = find_watcher(handle->loop, handle->wd); + assert(w != NULL); + + handle->wd = -1; + handle->path = NULL; + uv__handle_stop(handle); + QUEUE_REMOVE(&handle->watchers); + + maybe_free_watcher_list(w, handle->loop); + + return 0; +} + + +void uv__fs_event_close(uv_fs_event_t* handle) { + uv_fs_event_stop(handle); +} diff --git a/deps/libuv/src/unix/linux-syscalls.c b/deps/libuv/src/unix/linux-syscalls.c new file mode 100644 index 00000000..89998ded --- /dev/null +++ b/deps/libuv/src/unix/linux-syscalls.c @@ -0,0 +1,471 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "linux-syscalls.h" +#include +#include +#include +#include +#include + +#if defined(__has_feature) +# if __has_feature(memory_sanitizer) +# define MSAN_ACTIVE 1 +# include +# endif +#endif + +#if defined(__i386__) +# ifndef __NR_socketcall +# define __NR_socketcall 102 +# endif +#endif + +#if defined(__arm__) +# if defined(__thumb__) || defined(__ARM_EABI__) +# define UV_SYSCALL_BASE 0 +# else +# define UV_SYSCALL_BASE 0x900000 +# endif +#endif /* __arm__ */ + +#ifndef __NR_accept4 +# if defined(__x86_64__) +# define __NR_accept4 288 +# elif defined(__i386__) + /* Nothing. Handled through socketcall(). */ +# elif defined(__arm__) +# define __NR_accept4 (UV_SYSCALL_BASE + 366) +# endif +#endif /* __NR_accept4 */ + +#ifndef __NR_eventfd +# if defined(__x86_64__) +# define __NR_eventfd 284 +# elif defined(__i386__) +# define __NR_eventfd 323 +# elif defined(__arm__) +# define __NR_eventfd (UV_SYSCALL_BASE + 351) +# endif +#endif /* __NR_eventfd */ + +#ifndef __NR_eventfd2 +# if defined(__x86_64__) +# define __NR_eventfd2 290 +# elif defined(__i386__) +# define __NR_eventfd2 328 +# elif defined(__arm__) +# define __NR_eventfd2 (UV_SYSCALL_BASE + 356) +# endif +#endif /* __NR_eventfd2 */ + +#ifndef __NR_epoll_create +# if defined(__x86_64__) +# define __NR_epoll_create 213 +# elif defined(__i386__) +# define __NR_epoll_create 254 +# elif defined(__arm__) +# define __NR_epoll_create (UV_SYSCALL_BASE + 250) +# endif +#endif /* __NR_epoll_create */ + +#ifndef __NR_epoll_create1 +# if defined(__x86_64__) +# define __NR_epoll_create1 291 +# elif defined(__i386__) +# define __NR_epoll_create1 329 +# elif defined(__arm__) +# define __NR_epoll_create1 (UV_SYSCALL_BASE + 357) +# endif +#endif /* __NR_epoll_create1 */ + +#ifndef __NR_epoll_ctl +# if defined(__x86_64__) +# define __NR_epoll_ctl 233 /* used to be 214 */ +# elif defined(__i386__) +# define __NR_epoll_ctl 255 +# elif defined(__arm__) +# define __NR_epoll_ctl (UV_SYSCALL_BASE + 251) +# endif +#endif /* __NR_epoll_ctl */ + +#ifndef __NR_epoll_wait +# if defined(__x86_64__) +# define __NR_epoll_wait 232 /* used to be 215 */ +# elif defined(__i386__) +# define __NR_epoll_wait 256 +# elif defined(__arm__) +# define __NR_epoll_wait (UV_SYSCALL_BASE + 252) +# endif +#endif /* __NR_epoll_wait */ + +#ifndef __NR_epoll_pwait +# if defined(__x86_64__) +# define __NR_epoll_pwait 281 +# elif defined(__i386__) +# define __NR_epoll_pwait 319 +# elif defined(__arm__) +# define __NR_epoll_pwait (UV_SYSCALL_BASE + 346) +# endif +#endif /* __NR_epoll_pwait */ + +#ifndef __NR_inotify_init +# if defined(__x86_64__) +# define __NR_inotify_init 253 +# elif defined(__i386__) +# define __NR_inotify_init 291 +# elif defined(__arm__) +# define __NR_inotify_init (UV_SYSCALL_BASE + 316) +# endif +#endif /* __NR_inotify_init */ + +#ifndef __NR_inotify_init1 +# if defined(__x86_64__) +# define __NR_inotify_init1 294 +# elif defined(__i386__) +# define __NR_inotify_init1 332 +# elif defined(__arm__) +# define __NR_inotify_init1 (UV_SYSCALL_BASE + 360) +# endif +#endif /* __NR_inotify_init1 */ + +#ifndef __NR_inotify_add_watch +# if defined(__x86_64__) +# define __NR_inotify_add_watch 254 +# elif defined(__i386__) +# define __NR_inotify_add_watch 292 +# elif defined(__arm__) +# define __NR_inotify_add_watch (UV_SYSCALL_BASE + 317) +# endif +#endif /* __NR_inotify_add_watch */ + +#ifndef __NR_inotify_rm_watch +# if defined(__x86_64__) +# define __NR_inotify_rm_watch 255 +# elif defined(__i386__) +# define __NR_inotify_rm_watch 293 +# elif defined(__arm__) +# define __NR_inotify_rm_watch (UV_SYSCALL_BASE + 318) +# endif +#endif /* __NR_inotify_rm_watch */ + +#ifndef __NR_pipe2 +# if defined(__x86_64__) +# define __NR_pipe2 293 +# elif defined(__i386__) +# define __NR_pipe2 331 +# elif defined(__arm__) +# define __NR_pipe2 (UV_SYSCALL_BASE + 359) +# endif +#endif /* __NR_pipe2 */ + +#ifndef __NR_recvmmsg +# if defined(__x86_64__) +# define __NR_recvmmsg 299 +# elif defined(__i386__) +# define __NR_recvmmsg 337 +# elif defined(__arm__) +# define __NR_recvmmsg (UV_SYSCALL_BASE + 365) +# endif +#endif /* __NR_recvmsg */ + +#ifndef __NR_sendmmsg +# if defined(__x86_64__) +# define __NR_sendmmsg 307 +# elif defined(__i386__) +# define __NR_sendmmsg 345 +# elif defined(__arm__) +# define __NR_sendmmsg (UV_SYSCALL_BASE + 374) +# endif +#endif /* __NR_sendmmsg */ + +#ifndef __NR_utimensat +# if defined(__x86_64__) +# define __NR_utimensat 280 +# elif defined(__i386__) +# define __NR_utimensat 320 +# elif defined(__arm__) +# define __NR_utimensat (UV_SYSCALL_BASE + 348) +# endif +#endif /* __NR_utimensat */ + +#ifndef __NR_preadv +# if defined(__x86_64__) +# define __NR_preadv 295 +# elif defined(__i386__) +# define __NR_preadv 333 +# elif defined(__arm__) +# define __NR_preadv (UV_SYSCALL_BASE + 361) +# endif +#endif /* __NR_preadv */ + +#ifndef __NR_pwritev +# if defined(__x86_64__) +# define __NR_pwritev 296 +# elif defined(__i386__) +# define __NR_pwritev 334 +# elif defined(__arm__) +# define __NR_pwritev (UV_SYSCALL_BASE + 362) +# endif +#endif /* __NR_pwritev */ + +#ifndef __NR_dup3 +# if defined(__x86_64__) +# define __NR_dup3 292 +# elif defined(__i386__) +# define __NR_dup3 330 +# elif defined(__arm__) +# define __NR_dup3 (UV_SYSCALL_BASE + 358) +# endif +#endif /* __NR_pwritev */ + + +int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags) { +#if defined(__i386__) + unsigned long args[4]; + int r; + + args[0] = (unsigned long) fd; + args[1] = (unsigned long) addr; + args[2] = (unsigned long) addrlen; + args[3] = (unsigned long) flags; + + r = syscall(__NR_socketcall, 18 /* SYS_ACCEPT4 */, args); + + /* socketcall() raises EINVAL when SYS_ACCEPT4 is not supported but so does + * a bad flags argument. Try to distinguish between the two cases. + */ + if (r == -1) + if (errno == EINVAL) + if ((flags & ~(UV__SOCK_CLOEXEC|UV__SOCK_NONBLOCK)) == 0) + errno = ENOSYS; + + return r; +#elif defined(__NR_accept4) + return syscall(__NR_accept4, fd, addr, addrlen, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__eventfd(unsigned int count) { +#if defined(__NR_eventfd) + return syscall(__NR_eventfd, count); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__eventfd2(unsigned int count, int flags) { +#if defined(__NR_eventfd2) + return syscall(__NR_eventfd2, count, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__epoll_create(int size) { +#if defined(__NR_epoll_create) + return syscall(__NR_epoll_create, size); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__epoll_create1(int flags) { +#if defined(__NR_epoll_create1) + return syscall(__NR_epoll_create1, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event* events) { +#if defined(__NR_epoll_ctl) + return syscall(__NR_epoll_ctl, epfd, op, fd, events); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__epoll_wait(int epfd, + struct uv__epoll_event* events, + int nevents, + int timeout) { +#if defined(__NR_epoll_wait) + int result; + result = syscall(__NR_epoll_wait, epfd, events, nevents, timeout); +#if MSAN_ACTIVE + if (result > 0) + __msan_unpoison(events, sizeof(events[0]) * result); +#endif + return result; +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__epoll_pwait(int epfd, + struct uv__epoll_event* events, + int nevents, + int timeout, + uint64_t sigmask) { +#if defined(__NR_epoll_pwait) + int result; + result = syscall(__NR_epoll_pwait, + epfd, + events, + nevents, + timeout, + &sigmask, + sizeof(sigmask)); +#if MSAN_ACTIVE + if (result > 0) + __msan_unpoison(events, sizeof(events[0]) * result); +#endif + return result; +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__inotify_init(void) { +#if defined(__NR_inotify_init) + return syscall(__NR_inotify_init); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__inotify_init1(int flags) { +#if defined(__NR_inotify_init1) + return syscall(__NR_inotify_init1, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__inotify_add_watch(int fd, const char* path, uint32_t mask) { +#if defined(__NR_inotify_add_watch) + return syscall(__NR_inotify_add_watch, fd, path, mask); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__inotify_rm_watch(int fd, int32_t wd) { +#if defined(__NR_inotify_rm_watch) + return syscall(__NR_inotify_rm_watch, fd, wd); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__pipe2(int pipefd[2], int flags) { +#if defined(__NR_pipe2) + int result; + result = syscall(__NR_pipe2, pipefd, flags); +#if MSAN_ACTIVE + if (!result) + __msan_unpoison(pipefd, sizeof(int[2])); +#endif + return result; +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__sendmmsg(int fd, + struct uv__mmsghdr* mmsg, + unsigned int vlen, + unsigned int flags) { +#if defined(__NR_sendmmsg) + return syscall(__NR_sendmmsg, fd, mmsg, vlen, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__recvmmsg(int fd, + struct uv__mmsghdr* mmsg, + unsigned int vlen, + unsigned int flags, + struct timespec* timeout) { +#if defined(__NR_recvmmsg) + return syscall(__NR_recvmmsg, fd, mmsg, vlen, flags, timeout); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__utimesat(int dirfd, + const char* path, + const struct timespec times[2], + int flags) +{ +#if defined(__NR_utimensat) + return syscall(__NR_utimensat, dirfd, path, times, flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) { +#if defined(__NR_preadv) + return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32)); +#else + return errno = ENOSYS, -1; +#endif +} + + +ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) { +#if defined(__NR_pwritev) + return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32)); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__dup3(int oldfd, int newfd, int flags) { +#if defined(__NR_dup3) + return syscall(__NR_dup3, oldfd, newfd, flags); +#else + return errno = ENOSYS, -1; +#endif +} diff --git a/deps/libuv/src/unix/linux-syscalls.h b/deps/libuv/src/unix/linux-syscalls.h new file mode 100644 index 00000000..4c095e9b --- /dev/null +++ b/deps/libuv/src/unix/linux-syscalls.h @@ -0,0 +1,151 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_LINUX_SYSCALL_H_ +#define UV_LINUX_SYSCALL_H_ + +#undef _GNU_SOURCE +#define _GNU_SOURCE + +#include +#include +#include +#include +#include + +#if defined(__alpha__) +# define UV__O_CLOEXEC 0x200000 +#elif defined(__hppa__) +# define UV__O_CLOEXEC 0x200000 +#elif defined(__sparc__) +# define UV__O_CLOEXEC 0x400000 +#else +# define UV__O_CLOEXEC 0x80000 +#endif + +#if defined(__alpha__) +# define UV__O_NONBLOCK 0x4 +#elif defined(__hppa__) +# define UV__O_NONBLOCK O_NONBLOCK +#elif defined(__mips__) +# define UV__O_NONBLOCK 0x80 +#elif defined(__sparc__) +# define UV__O_NONBLOCK 0x4000 +#else +# define UV__O_NONBLOCK 0x800 +#endif + +#define UV__EFD_CLOEXEC UV__O_CLOEXEC +#define UV__EFD_NONBLOCK UV__O_NONBLOCK + +#define UV__IN_CLOEXEC UV__O_CLOEXEC +#define UV__IN_NONBLOCK UV__O_NONBLOCK + +#define UV__SOCK_CLOEXEC UV__O_CLOEXEC +#if defined(SOCK_NONBLOCK) +# define UV__SOCK_NONBLOCK SOCK_NONBLOCK +#else +# define UV__SOCK_NONBLOCK UV__O_NONBLOCK +#endif + +/* epoll flags */ +#define UV__EPOLL_CLOEXEC UV__O_CLOEXEC +#define UV__EPOLL_CTL_ADD 1 +#define UV__EPOLL_CTL_DEL 2 +#define UV__EPOLL_CTL_MOD 3 + +/* inotify flags */ +#define UV__IN_ACCESS 0x001 +#define UV__IN_MODIFY 0x002 +#define UV__IN_ATTRIB 0x004 +#define UV__IN_CLOSE_WRITE 0x008 +#define UV__IN_CLOSE_NOWRITE 0x010 +#define UV__IN_OPEN 0x020 +#define UV__IN_MOVED_FROM 0x040 +#define UV__IN_MOVED_TO 0x080 +#define UV__IN_CREATE 0x100 +#define UV__IN_DELETE 0x200 +#define UV__IN_DELETE_SELF 0x400 +#define UV__IN_MOVE_SELF 0x800 + +#if defined(__x86_64__) +struct uv__epoll_event { + uint32_t events; + uint64_t data; +} __attribute__((packed)); +#else +struct uv__epoll_event { + uint32_t events; + uint64_t data; +}; +#endif + +struct uv__inotify_event { + int32_t wd; + uint32_t mask; + uint32_t cookie; + uint32_t len; + /* char name[0]; */ +}; + +struct uv__mmsghdr { + struct msghdr msg_hdr; + unsigned int msg_len; +}; + +int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags); +int uv__eventfd(unsigned int count); +int uv__epoll_create(int size); +int uv__epoll_create1(int flags); +int uv__epoll_ctl(int epfd, int op, int fd, struct uv__epoll_event *ev); +int uv__epoll_wait(int epfd, + struct uv__epoll_event* events, + int nevents, + int timeout); +int uv__epoll_pwait(int epfd, + struct uv__epoll_event* events, + int nevents, + int timeout, + uint64_t sigmask); +int uv__eventfd2(unsigned int count, int flags); +int uv__inotify_init(void); +int uv__inotify_init1(int flags); +int uv__inotify_add_watch(int fd, const char* path, uint32_t mask); +int uv__inotify_rm_watch(int fd, int32_t wd); +int uv__pipe2(int pipefd[2], int flags); +int uv__recvmmsg(int fd, + struct uv__mmsghdr* mmsg, + unsigned int vlen, + unsigned int flags, + struct timespec* timeout); +int uv__sendmmsg(int fd, + struct uv__mmsghdr* mmsg, + unsigned int vlen, + unsigned int flags); +int uv__utimesat(int dirfd, + const char* path, + const struct timespec times[2], + int flags); +ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset); +ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset); +int uv__dup3(int oldfd, int newfd, int flags); + +#endif /* UV_LINUX_SYSCALL_H_ */ diff --git a/deps/libuv/src/unix/loop-watcher.c b/deps/libuv/src/unix/loop-watcher.c new file mode 100644 index 00000000..340bb0df --- /dev/null +++ b/deps/libuv/src/unix/loop-watcher.c @@ -0,0 +1,68 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#define UV_LOOP_WATCHER_DEFINE(name, type) \ + int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \ + uv__handle_init(loop, (uv_handle_t*)handle, UV_##type); \ + handle->name##_cb = NULL; \ + return 0; \ + } \ + \ + int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \ + if (uv__is_active(handle)) return 0; \ + if (cb == NULL) return -EINVAL; \ + QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue); \ + handle->name##_cb = cb; \ + uv__handle_start(handle); \ + return 0; \ + } \ + \ + int uv_##name##_stop(uv_##name##_t* handle) { \ + if (!uv__is_active(handle)) return 0; \ + QUEUE_REMOVE(&handle->queue); \ + uv__handle_stop(handle); \ + return 0; \ + } \ + \ + void uv__run_##name(uv_loop_t* loop) { \ + uv_##name##_t* h; \ + QUEUE queue; \ + QUEUE* q; \ + QUEUE_MOVE(&loop->name##_handles, &queue); \ + while (!QUEUE_EMPTY(&queue)) { \ + q = QUEUE_HEAD(&queue); \ + h = QUEUE_DATA(q, uv_##name##_t, queue); \ + QUEUE_REMOVE(q); \ + QUEUE_INSERT_TAIL(&loop->name##_handles, q); \ + h->name##_cb(h); \ + } \ + } \ + \ + void uv__##name##_close(uv_##name##_t* handle) { \ + uv_##name##_stop(handle); \ + } + +UV_LOOP_WATCHER_DEFINE(prepare, PREPARE) +UV_LOOP_WATCHER_DEFINE(check, CHECK) +UV_LOOP_WATCHER_DEFINE(idle, IDLE) diff --git a/deps/libuv/src/unix/loop.c b/deps/libuv/src/unix/loop.c new file mode 100644 index 00000000..bd63c2f9 --- /dev/null +++ b/deps/libuv/src/unix/loop.c @@ -0,0 +1,159 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "tree.h" +#include "internal.h" +#include "heap-inl.h" +#include +#include +#include + +int uv_loop_init(uv_loop_t* loop) { + void* saved_data; + int err; + + uv__signal_global_once_init(); + + saved_data = loop->data; + memset(loop, 0, sizeof(*loop)); + loop->data = saved_data; + + heap_init((struct heap*) &loop->timer_heap); + QUEUE_INIT(&loop->wq); + QUEUE_INIT(&loop->active_reqs); + QUEUE_INIT(&loop->idle_handles); + QUEUE_INIT(&loop->async_handles); + QUEUE_INIT(&loop->check_handles); + QUEUE_INIT(&loop->prepare_handles); + QUEUE_INIT(&loop->handle_queue); + + loop->nfds = 0; + loop->watchers = NULL; + loop->nwatchers = 0; + QUEUE_INIT(&loop->pending_queue); + QUEUE_INIT(&loop->watcher_queue); + + loop->closing_handles = NULL; + uv__update_time(loop); + uv__async_init(&loop->async_watcher); + loop->signal_pipefd[0] = -1; + loop->signal_pipefd[1] = -1; + loop->backend_fd = -1; + loop->emfile_fd = -1; + + loop->timer_counter = 0; + loop->stop_flag = 0; + + err = uv__platform_loop_init(loop); + if (err) + return err; + + err = uv_signal_init(loop, &loop->child_watcher); + if (err) + goto fail_signal_init; + + uv__handle_unref(&loop->child_watcher); + loop->child_watcher.flags |= UV__HANDLE_INTERNAL; + QUEUE_INIT(&loop->process_handles); + + err = uv_rwlock_init(&loop->cloexec_lock); + if (err) + goto fail_rwlock_init; + + err = uv_mutex_init(&loop->wq_mutex); + if (err) + goto fail_mutex_init; + + err = uv_async_init(loop, &loop->wq_async, uv__work_done); + if (err) + goto fail_async_init; + + uv__handle_unref(&loop->wq_async); + loop->wq_async.flags |= UV__HANDLE_INTERNAL; + + return 0; + +fail_async_init: + uv_mutex_destroy(&loop->wq_mutex); + +fail_mutex_init: + uv_rwlock_destroy(&loop->cloexec_lock); + +fail_rwlock_init: + uv__signal_loop_cleanup(loop); + +fail_signal_init: + uv__platform_loop_delete(loop); + + return err; +} + + +void uv__loop_close(uv_loop_t* loop) { + uv__signal_loop_cleanup(loop); + uv__platform_loop_delete(loop); + uv__async_stop(loop, &loop->async_watcher); + + if (loop->emfile_fd != -1) { + uv__close(loop->emfile_fd); + loop->emfile_fd = -1; + } + + if (loop->backend_fd != -1) { + uv__close(loop->backend_fd); + loop->backend_fd = -1; + } + + uv_mutex_lock(&loop->wq_mutex); + assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!"); + assert(!uv__has_active_reqs(loop)); + uv_mutex_unlock(&loop->wq_mutex); + uv_mutex_destroy(&loop->wq_mutex); + + /* + * Note that all thread pool stuff is finished at this point and + * it is safe to just destroy rw lock + */ + uv_rwlock_destroy(&loop->cloexec_lock); + +#if 0 + assert(QUEUE_EMPTY(&loop->pending_queue)); + assert(QUEUE_EMPTY(&loop->watcher_queue)); + assert(loop->nfds == 0); +#endif + + uv__free(loop->watchers); + loop->watchers = NULL; + loop->nwatchers = 0; +} + + +int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) { + if (option != UV_LOOP_BLOCK_SIGNAL) + return UV_ENOSYS; + + if (va_arg(ap, int) != SIGPROF) + return UV_EINVAL; + + loop->flags |= UV_LOOP_BLOCK_SIGPROF; + return 0; +} diff --git a/deps/libuv/src/unix/netbsd.c b/deps/libuv/src/unix/netbsd.c new file mode 100644 index 00000000..4a9e6cbc --- /dev/null +++ b/deps/libuv/src/unix/netbsd.c @@ -0,0 +1,380 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#undef NANOSEC +#define NANOSEC ((uint64_t) 1e9) + +static char *process_title; + + +int uv__platform_loop_init(uv_loop_t* loop) { + return uv__kqueue_init(loop); +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec); +} + + +void uv_loadavg(double avg[3]) { + struct loadavg info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_LOADAVG}; + + if (sysctl(which, 2, &info, &size, NULL, 0) == -1) return; + + avg[0] = (double) info.ldavg[0] / info.fscale; + avg[1] = (double) info.ldavg[1] / info.fscale; + avg[2] = (double) info.ldavg[2] / info.fscale; +} + + +int uv_exepath(char* buffer, size_t* size) { + int mib[4]; + size_t cb; + pid_t mypid; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + mypid = getpid(); + mib[0] = CTL_KERN; + mib[1] = KERN_PROC_ARGS; + mib[2] = mypid; + mib[3] = KERN_PROC_ARGV; + + cb = *size; + if (sysctl(mib, 4, buffer, &cb, NULL, 0)) + return -errno; + *size = strlen(buffer); + + return 0; +} + + +uint64_t uv_get_free_memory(void) { + struct uvmexp info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_UVMEXP}; + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info.free * sysconf(_SC_PAGESIZE); +} + + +uint64_t uv_get_total_memory(void) { +#if defined(HW_PHYSMEM64) + uint64_t info; + int which[] = {CTL_HW, HW_PHYSMEM64}; +#else + unsigned int info; + int which[] = {CTL_HW, HW_PHYSMEM}; +#endif + size_t size = sizeof(info); + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info; +} + + +char** uv_setup_args(int argc, char** argv) { + process_title = argc ? uv__strdup(argv[0]) : NULL; + return argv; +} + + +int uv_set_process_title(const char* title) { + if (process_title) uv__free(process_title); + + process_title = uv__strdup(title); + setproctitle("%s", title); + + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + size_t len; + + if (buffer == NULL || size == 0) + return -EINVAL; + + if (process_title) { + len = strlen(process_title) + 1; + + if (size < len) + return -ENOBUFS; + + memcpy(buffer, process_title, len); + } else { + len = 0; + } + + buffer[len] = '\0'; + + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + kvm_t *kd = NULL; + struct kinfo_proc2 *kinfo = NULL; + pid_t pid; + int nprocs; + int max_size = sizeof(struct kinfo_proc2); + int page_size; + + page_size = getpagesize(); + pid = getpid(); + + kd = kvm_open(NULL, NULL, NULL, KVM_NO_FILES, "kvm_open"); + + if (kd == NULL) goto error; + + kinfo = kvm_getproc2(kd, KERN_PROC_PID, pid, max_size, &nprocs); + if (kinfo == NULL) goto error; + + *rss = kinfo->p_vm_rssize * page_size; + + kvm_close(kd); + + return 0; + +error: + if (kd) kvm_close(kd); + return -EPERM; +} + + +int uv_uptime(double* uptime) { + time_t now; + struct timeval info; + size_t size = sizeof(info); + static int which[] = {CTL_KERN, KERN_BOOTTIME}; + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + now = time(NULL); + + *uptime = (double)(now - info.tv_sec); + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK); + unsigned int multiplier = ((uint64_t)1000L / ticks); + unsigned int cur = 0; + uv_cpu_info_t* cpu_info; + u_int64_t* cp_times; + char model[512]; + u_int64_t cpuspeed; + int numcpus; + size_t size; + int i; + + size = sizeof(model); + if (sysctlbyname("machdep.cpu_brand", &model, &size, NULL, 0) && + sysctlbyname("hw.model", &model, &size, NULL, 0)) { + return -errno; + } + + size = sizeof(numcpus); + if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0)) + return -errno; + *count = numcpus; + + /* Only i386 and amd64 have machdep.tsc_freq */ + size = sizeof(cpuspeed); + if (sysctlbyname("machdep.tsc_freq", &cpuspeed, &size, NULL, 0)) + cpuspeed = 0; + + size = numcpus * CPUSTATES * sizeof(*cp_times); + cp_times = uv__malloc(size); + if (cp_times == NULL) + return -ENOMEM; + + if (sysctlbyname("kern.cp_time", cp_times, &size, NULL, 0)) + return -errno; + + *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos)); + if (!(*cpu_infos)) { + uv__free(cp_times); + uv__free(*cpu_infos); + return -ENOMEM; + } + + for (i = 0; i < numcpus; i++) { + cpu_info = &(*cpu_infos)[i]; + cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier; + cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier; + cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier; + cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier; + cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier; + cpu_info->model = uv__strdup(model); + cpu_info->speed = (int)(cpuspeed/(uint64_t) 1e6); + cur += CPUSTATES; + } + uv__free(cp_times); + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_dl *sa_addr; + + if (getifaddrs(&addrs)) + return -errno; + + *count = 0; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != PF_INET)) { + continue; + } + (*count)++; + } + + *addresses = uv__malloc(*count * sizeof(**addresses)); + + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + if (ent->ifa_addr->sa_family != PF_INET) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != AF_LINK)) { + continue; + } + + address = *addresses; + + for (i = 0; i < (*count); i++) { + if (strcmp(address->name, ent->ifa_name) == 0) { + sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} diff --git a/deps/libuv/src/unix/openbsd.c b/deps/libuv/src/unix/openbsd.c new file mode 100644 index 00000000..909288cc --- /dev/null +++ b/deps/libuv/src/unix/openbsd.c @@ -0,0 +1,396 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#undef NANOSEC +#define NANOSEC ((uint64_t) 1e9) + + +static char *process_title; + + +int uv__platform_loop_init(uv_loop_t* loop) { + return uv__kqueue_init(loop); +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec); +} + + +void uv_loadavg(double avg[3]) { + struct loadavg info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_LOADAVG}; + + if (sysctl(which, 2, &info, &size, NULL, 0) < 0) return; + + avg[0] = (double) info.ldavg[0] / info.fscale; + avg[1] = (double) info.ldavg[1] / info.fscale; + avg[2] = (double) info.ldavg[2] / info.fscale; +} + + +int uv_exepath(char* buffer, size_t* size) { + int mib[4]; + char **argsbuf = NULL; + char **argsbuf_tmp; + size_t argsbuf_size = 100U; + size_t exepath_size; + pid_t mypid; + int err; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + mypid = getpid(); + for (;;) { + err = -ENOMEM; + argsbuf_tmp = uv__realloc(argsbuf, argsbuf_size); + if (argsbuf_tmp == NULL) + goto out; + argsbuf = argsbuf_tmp; + mib[0] = CTL_KERN; + mib[1] = KERN_PROC_ARGS; + mib[2] = mypid; + mib[3] = KERN_PROC_ARGV; + if (sysctl(mib, 4, argsbuf, &argsbuf_size, NULL, 0) == 0) { + break; + } + if (errno != ENOMEM) { + err = -errno; + goto out; + } + argsbuf_size *= 2U; + } + + if (argsbuf[0] == NULL) { + err = -EINVAL; /* FIXME(bnoordhuis) More appropriate error. */ + goto out; + } + + *size -= 1; + exepath_size = strlen(argsbuf[0]); + if (*size > exepath_size) + *size = exepath_size; + + memcpy(buffer, argsbuf[0], *size); + buffer[*size] = '\0'; + err = 0; + +out: + uv__free(argsbuf); + + return err; +} + + +uint64_t uv_get_free_memory(void) { + struct uvmexp info; + size_t size = sizeof(info); + int which[] = {CTL_VM, VM_UVMEXP}; + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info.free * sysconf(_SC_PAGESIZE); +} + + +uint64_t uv_get_total_memory(void) { + uint64_t info; + int which[] = {CTL_HW, HW_PHYSMEM64}; + size_t size = sizeof(info); + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + return (uint64_t) info; +} + + +char** uv_setup_args(int argc, char** argv) { + process_title = argc ? uv__strdup(argv[0]) : NULL; + return argv; +} + + +int uv_set_process_title(const char* title) { + uv__free(process_title); + process_title = uv__strdup(title); + setproctitle(title); + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + size_t len; + + if (buffer == NULL || size == 0) + return -EINVAL; + + if (process_title) { + len = strlen(process_title) + 1; + + if (size < len) + return -ENOBUFS; + + memcpy(buffer, process_title, len); + } else { + len = 0; + } + + buffer[len] = '\0'; + + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + struct kinfo_proc kinfo; + size_t page_size = getpagesize(); + size_t size = sizeof(struct kinfo_proc); + int mib[6]; + + mib[0] = CTL_KERN; + mib[1] = KERN_PROC; + mib[2] = KERN_PROC_PID; + mib[3] = getpid(); + mib[4] = sizeof(struct kinfo_proc); + mib[5] = 1; + + if (sysctl(mib, 6, &kinfo, &size, NULL, 0) < 0) + return -errno; + + *rss = kinfo.p_vm_rssize * page_size; + return 0; +} + + +int uv_uptime(double* uptime) { + time_t now; + struct timeval info; + size_t size = sizeof(info); + static int which[] = {CTL_KERN, KERN_BOOTTIME}; + + if (sysctl(which, 2, &info, &size, NULL, 0)) + return -errno; + + now = time(NULL); + + *uptime = (double)(now - info.tv_sec); + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK), + multiplier = ((uint64_t)1000L / ticks), cpuspeed; + uint64_t info[CPUSTATES]; + char model[512]; + int numcpus = 1; + int which[] = {CTL_HW,HW_MODEL,0}; + size_t size; + int i; + uv_cpu_info_t* cpu_info; + + size = sizeof(model); + if (sysctl(which, 2, &model, &size, NULL, 0)) + return -errno; + + which[1] = HW_NCPU; + size = sizeof(numcpus); + if (sysctl(which, 2, &numcpus, &size, NULL, 0)) + return -errno; + + *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos)); + if (!(*cpu_infos)) + return -ENOMEM; + + *count = numcpus; + + which[1] = HW_CPUSPEED; + size = sizeof(cpuspeed); + if (sysctl(which, 2, &cpuspeed, &size, NULL, 0)) { + uv__free(*cpu_infos); + return -errno; + } + + size = sizeof(info); + which[0] = CTL_KERN; + which[1] = KERN_CPTIME2; + for (i = 0; i < numcpus; i++) { + which[2] = i; + size = sizeof(info); + if (sysctl(which, 3, &info, &size, NULL, 0)) { + uv__free(*cpu_infos); + return -errno; + } + + cpu_info = &(*cpu_infos)[i]; + + cpu_info->cpu_times.user = (uint64_t)(info[CP_USER]) * multiplier; + cpu_info->cpu_times.nice = (uint64_t)(info[CP_NICE]) * multiplier; + cpu_info->cpu_times.sys = (uint64_t)(info[CP_SYS]) * multiplier; + cpu_info->cpu_times.idle = (uint64_t)(info[CP_IDLE]) * multiplier; + cpu_info->cpu_times.irq = (uint64_t)(info[CP_INTR]) * multiplier; + + cpu_info->model = uv__strdup(model); + cpu_info->speed = cpuspeed; + } + + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +int uv_interface_addresses(uv_interface_address_t** addresses, + int* count) { + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_dl *sa_addr; + + if (getifaddrs(&addrs) != 0) + return -errno; + + *count = 0; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != PF_INET)) { + continue; + } + (*count)++; + } + + *addresses = uv__malloc(*count * sizeof(**addresses)); + + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + if (ent->ifa_addr->sa_family != PF_INET) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family != AF_LINK)) { + continue; + } + + address = *addresses; + + for (i = 0; i < (*count); i++) { + if (strcmp(address->name, ent->ifa_name) == 0) { + sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} diff --git a/deps/libuv/src/unix/os390.c b/deps/libuv/src/unix/os390.c new file mode 100644 index 00000000..bcdbc4b6 --- /dev/null +++ b/deps/libuv/src/unix/os390.c @@ -0,0 +1,42 @@ +/* Copyright libuv project contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "internal.h" + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + struct pollfd p[1]; + int rv; + + p[0].fd = fd; + p[0].events = POLLIN; + + do + rv = poll(p, 1, 0); + while (rv == -1 && errno == EINTR); + + if (rv == -1) + abort(); + + if (p[0].revents & POLLNVAL) + return -1; + + return 0; +} diff --git a/deps/libuv/src/unix/pipe.c b/deps/libuv/src/unix/pipe.c new file mode 100644 index 00000000..b73994cb --- /dev/null +++ b/deps/libuv/src/unix/pipe.c @@ -0,0 +1,298 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include +#include +#include + + +int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) { + uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE); + handle->shutdown_req = NULL; + handle->connect_req = NULL; + handle->pipe_fname = NULL; + handle->ipc = ipc; + return 0; +} + + +int uv_pipe_bind(uv_pipe_t* handle, const char* name) { + struct sockaddr_un saddr; + const char* pipe_fname; + int sockfd; + int err; + + pipe_fname = NULL; + sockfd = -1; + + /* Already bound? */ + if (uv__stream_fd(handle) >= 0) + return -EINVAL; + + /* Make a copy of the file name, it outlives this function's scope. */ + pipe_fname = uv__strdup(name); + if (pipe_fname == NULL) + return -ENOMEM; + + /* We've got a copy, don't touch the original any more. */ + name = NULL; + + err = uv__socket(AF_UNIX, SOCK_STREAM, 0); + if (err < 0) + goto err_socket; + sockfd = err; + + memset(&saddr, 0, sizeof saddr); + strncpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path) - 1); + saddr.sun_path[sizeof(saddr.sun_path) - 1] = '\0'; + saddr.sun_family = AF_UNIX; + + if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) { + err = -errno; + /* Convert ENOENT to EACCES for compatibility with Windows. */ + if (err == -ENOENT) + err = -EACCES; + goto err_bind; + } + + /* Success. */ + handle->flags |= UV_HANDLE_BOUND; + handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */ + handle->io_watcher.fd = sockfd; + return 0; + +err_bind: + uv__close(sockfd); + +err_socket: + uv__free((void*)pipe_fname); + return err; +} + + +int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) { + if (uv__stream_fd(handle) == -1) + return -EINVAL; + +#if defined(__MVS__) + /* On zOS, backlog=0 has undefined behaviour */ + if (backlog == 0) + backlog = 1; + else if (backlog < 0) + backlog = SOMAXCONN; +#endif + + if (listen(uv__stream_fd(handle), backlog)) + return -errno; + + handle->connection_cb = cb; + handle->io_watcher.cb = uv__server_io; + uv__io_start(handle->loop, &handle->io_watcher, POLLIN); + return 0; +} + + +void uv__pipe_close(uv_pipe_t* handle) { + if (handle->pipe_fname) { + /* + * Unlink the file system entity before closing the file descriptor. + * Doing it the other way around introduces a race where our process + * unlinks a socket with the same name that's just been created by + * another thread or process. + */ + unlink(handle->pipe_fname); + uv__free((void*)handle->pipe_fname); + handle->pipe_fname = NULL; + } + + uv__stream_close((uv_stream_t*)handle); +} + + +int uv_pipe_open(uv_pipe_t* handle, uv_file fd) { + int err; + + err = uv__nonblock(fd, 1); + if (err) + return err; + +#if defined(__APPLE__) + err = uv__stream_try_select((uv_stream_t*) handle, &fd); + if (err) + return err; +#endif /* defined(__APPLE__) */ + + return uv__stream_open((uv_stream_t*)handle, + fd, + UV_STREAM_READABLE | UV_STREAM_WRITABLE); +} + + +void uv_pipe_connect(uv_connect_t* req, + uv_pipe_t* handle, + const char* name, + uv_connect_cb cb) { + struct sockaddr_un saddr; + int new_sock; + int err; + int r; + + new_sock = (uv__stream_fd(handle) == -1); + + if (new_sock) { + err = uv__socket(AF_UNIX, SOCK_STREAM, 0); + if (err < 0) + goto out; + handle->io_watcher.fd = err; + } + + memset(&saddr, 0, sizeof saddr); + strncpy(saddr.sun_path, name, sizeof(saddr.sun_path) - 1); + saddr.sun_path[sizeof(saddr.sun_path) - 1] = '\0'; + saddr.sun_family = AF_UNIX; + + do { + r = connect(uv__stream_fd(handle), + (struct sockaddr*)&saddr, sizeof saddr); + } + while (r == -1 && errno == EINTR); + + if (r == -1 && errno != EINPROGRESS) { + err = -errno; + goto out; + } + + err = 0; + if (new_sock) { + err = uv__stream_open((uv_stream_t*)handle, + uv__stream_fd(handle), + UV_STREAM_READABLE | UV_STREAM_WRITABLE); + } + + if (err == 0) + uv__io_start(handle->loop, &handle->io_watcher, POLLIN | POLLOUT); + +out: + handle->delayed_error = err; + handle->connect_req = req; + + uv__req_init(handle->loop, req, UV_CONNECT); + req->handle = (uv_stream_t*)handle; + req->cb = cb; + QUEUE_INIT(&req->queue); + + /* Force callback to run on next tick in case of error. */ + if (err) + uv__io_feed(handle->loop, &handle->io_watcher); + +} + + +typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*); + + +static int uv__pipe_getsockpeername(const uv_pipe_t* handle, + uv__peersockfunc func, + char* buffer, + size_t* size) { + struct sockaddr_un sa; + socklen_t addrlen; + int err; + + addrlen = sizeof(sa); + memset(&sa, 0, addrlen); + err = func(uv__stream_fd(handle), (struct sockaddr*) &sa, &addrlen); + if (err < 0) { + *size = 0; + return -errno; + } + +#if defined(__linux__) + if (sa.sun_path[0] == 0) + /* Linux abstract namespace */ + addrlen -= offsetof(struct sockaddr_un, sun_path); + else +#endif + addrlen = strlen(sa.sun_path); + + + if (addrlen >= *size) { + *size = addrlen + 1; + return UV_ENOBUFS; + } + + memcpy(buffer, sa.sun_path, addrlen); + *size = addrlen; + + /* only null-terminate if it's not an abstract socket */ + if (buffer[0] != '\0') + buffer[addrlen] = '\0'; + + return 0; +} + + +int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) { + return uv__pipe_getsockpeername(handle, getsockname, buffer, size); +} + + +int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) { + return uv__pipe_getsockpeername(handle, getpeername, buffer, size); +} + + +void uv_pipe_pending_instances(uv_pipe_t* handle, int count) { +} + + +int uv_pipe_pending_count(uv_pipe_t* handle) { + uv__stream_queued_fds_t* queued_fds; + + if (!handle->ipc) + return 0; + + if (handle->accepted_fd == -1) + return 0; + + if (handle->queued_fds == NULL) + return 1; + + queued_fds = handle->queued_fds; + return queued_fds->offset + 1; +} + + +uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) { + if (!handle->ipc) + return UV_UNKNOWN_HANDLE; + + if (handle->accepted_fd == -1) + return UV_UNKNOWN_HANDLE; + else + return uv__handle_type(handle->accepted_fd); +} diff --git a/deps/libuv/src/unix/poll.c b/deps/libuv/src/unix/poll.c new file mode 100644 index 00000000..4c0d478e --- /dev/null +++ b/deps/libuv/src/unix/poll.c @@ -0,0 +1,130 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include + + +static void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + uv_poll_t* handle; + int pevents; + + handle = container_of(w, uv_poll_t, io_watcher); + + if (events & POLLERR) { + uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP); + uv__handle_stop(handle); + handle->poll_cb(handle, -EBADF, 0); + return; + } + + pevents = 0; + if (events & POLLIN) + pevents |= UV_READABLE; + if (events & POLLOUT) + pevents |= UV_WRITABLE; + if (events & UV__POLLRDHUP) + pevents |= UV_DISCONNECT; + + handle->poll_cb(handle, 0, pevents); +} + + +int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) { + int err; + + err = uv__io_check_fd(loop, fd); + if (err) + return err; + + /* If ioctl(FIONBIO) reports ENOTTY, try fcntl(F_GETFL) + fcntl(F_SETFL). + * Workaround for e.g. kqueue fds not supporting ioctls. + */ + err = uv__nonblock(fd, 1); + if (err == -ENOTTY) + if (uv__nonblock == uv__nonblock_ioctl) + err = uv__nonblock_fcntl(fd, 1); + + if (err) + return err; + + uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL); + uv__io_init(&handle->io_watcher, uv__poll_io, fd); + handle->poll_cb = NULL; + return 0; +} + + +int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle, + uv_os_sock_t socket) { + return uv_poll_init(loop, handle, socket); +} + + +static void uv__poll_stop(uv_poll_t* handle) { + uv__io_stop(handle->loop, + &handle->io_watcher, + POLLIN | POLLOUT | UV__POLLRDHUP); + uv__handle_stop(handle); +} + + +int uv_poll_stop(uv_poll_t* handle) { + assert(!(handle->flags & (UV_CLOSING | UV_CLOSED))); + uv__poll_stop(handle); + return 0; +} + + +int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) { + int events; + + assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0); + assert(!(handle->flags & (UV_CLOSING | UV_CLOSED))); + + uv__poll_stop(handle); + + if (pevents == 0) + return 0; + + events = 0; + if (pevents & UV_READABLE) + events |= POLLIN; + if (pevents & UV_WRITABLE) + events |= POLLOUT; + if (pevents & UV_DISCONNECT) + events |= UV__POLLRDHUP; + + uv__io_start(handle->loop, &handle->io_watcher, events); + uv__handle_start(handle); + handle->poll_cb = poll_cb; + + return 0; +} + + +void uv__poll_close(uv_poll_t* handle) { + uv__poll_stop(handle); +} diff --git a/deps/libuv/src/unix/process.c b/deps/libuv/src/unix/process.c new file mode 100644 index 00000000..45f5b452 --- /dev/null +++ b/deps/libuv/src/unix/process.c @@ -0,0 +1,563 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if defined(__APPLE__) && !TARGET_OS_IPHONE +# include +# define environ (*_NSGetEnviron()) +#else +extern char **environ; +#endif + +#if defined(__linux__) || defined(__GLIBC__) +# include +#endif + + +static void uv__chld(uv_signal_t* handle, int signum) { + uv_process_t* process; + uv_loop_t* loop; + int exit_status; + int term_signal; + int status; + pid_t pid; + QUEUE pending; + QUEUE* q; + QUEUE* h; + + assert(signum == SIGCHLD); + + QUEUE_INIT(&pending); + loop = handle->loop; + + h = &loop->process_handles; + q = QUEUE_HEAD(h); + while (q != h) { + process = QUEUE_DATA(q, uv_process_t, queue); + q = QUEUE_NEXT(q); + + do + pid = waitpid(process->pid, &status, WNOHANG); + while (pid == -1 && errno == EINTR); + + if (pid == 0) + continue; + + if (pid == -1) { + if (errno != ECHILD) + abort(); + continue; + } + + process->status = status; + QUEUE_REMOVE(&process->queue); + QUEUE_INSERT_TAIL(&pending, &process->queue); + } + + h = &pending; + q = QUEUE_HEAD(h); + while (q != h) { + process = QUEUE_DATA(q, uv_process_t, queue); + q = QUEUE_NEXT(q); + + QUEUE_REMOVE(&process->queue); + QUEUE_INIT(&process->queue); + uv__handle_stop(process); + + if (process->exit_cb == NULL) + continue; + + exit_status = 0; + if (WIFEXITED(process->status)) + exit_status = WEXITSTATUS(process->status); + + term_signal = 0; + if (WIFSIGNALED(process->status)) + term_signal = WTERMSIG(process->status); + + process->exit_cb(process, exit_status, term_signal); + } + assert(QUEUE_EMPTY(&pending)); +} + + +int uv__make_socketpair(int fds[2], int flags) { +#if defined(__linux__) + static int no_cloexec; + + if (no_cloexec) + goto skip; + + if (socketpair(AF_UNIX, SOCK_STREAM | UV__SOCK_CLOEXEC | flags, 0, fds) == 0) + return 0; + + /* Retry on EINVAL, it means SOCK_CLOEXEC is not supported. + * Anything else is a genuine error. + */ + if (errno != EINVAL) + return -errno; + + no_cloexec = 1; + +skip: +#endif + + if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) + return -errno; + + uv__cloexec(fds[0], 1); + uv__cloexec(fds[1], 1); + + if (flags & UV__F_NONBLOCK) { + uv__nonblock(fds[0], 1); + uv__nonblock(fds[1], 1); + } + + return 0; +} + + +int uv__make_pipe(int fds[2], int flags) { +#if defined(__linux__) + static int no_pipe2; + + if (no_pipe2) + goto skip; + + if (uv__pipe2(fds, flags | UV__O_CLOEXEC) == 0) + return 0; + + if (errno != ENOSYS) + return -errno; + + no_pipe2 = 1; + +skip: +#endif + + if (pipe(fds)) + return -errno; + + uv__cloexec(fds[0], 1); + uv__cloexec(fds[1], 1); + + if (flags & UV__F_NONBLOCK) { + uv__nonblock(fds[0], 1); + uv__nonblock(fds[1], 1); + } + + return 0; +} + + +/* + * Used for initializing stdio streams like options.stdin_stream. Returns + * zero on success. See also the cleanup section in uv_spawn(). + */ +static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) { + int mask; + int fd; + + mask = UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD | UV_INHERIT_STREAM; + + switch (container->flags & mask) { + case UV_IGNORE: + return 0; + + case UV_CREATE_PIPE: + assert(container->data.stream != NULL); + if (container->data.stream->type != UV_NAMED_PIPE) + return -EINVAL; + else + return uv__make_socketpair(fds, 0); + + case UV_INHERIT_FD: + case UV_INHERIT_STREAM: + if (container->flags & UV_INHERIT_FD) + fd = container->data.fd; + else + fd = uv__stream_fd(container->data.stream); + + if (fd == -1) + return -EINVAL; + + fds[1] = fd; + return 0; + + default: + assert(0 && "Unexpected flags"); + return -EINVAL; + } +} + + +static int uv__process_open_stream(uv_stdio_container_t* container, + int pipefds[2], + int writable) { + int flags; + int err; + + if (!(container->flags & UV_CREATE_PIPE) || pipefds[0] < 0) + return 0; + + err = uv__close(pipefds[1]); + if (err != 0) + abort(); + + pipefds[1] = -1; + uv__nonblock(pipefds[0], 1); + + if (container->data.stream->type == UV_NAMED_PIPE && + ((uv_pipe_t*)container->data.stream)->ipc) + flags = UV_STREAM_READABLE | UV_STREAM_WRITABLE; + else if (writable) + flags = UV_STREAM_WRITABLE; + else + flags = UV_STREAM_READABLE; + + return uv__stream_open(container->data.stream, pipefds[0], flags); +} + + +static void uv__process_close_stream(uv_stdio_container_t* container) { + if (!(container->flags & UV_CREATE_PIPE)) return; + uv__stream_close((uv_stream_t*)container->data.stream); +} + + +static void uv__write_int(int fd, int val) { + ssize_t n; + + do + n = write(fd, &val, sizeof(val)); + while (n == -1 && errno == EINTR); + + if (n == -1 && errno == EPIPE) + return; /* parent process has quit */ + + assert(n == sizeof(val)); +} + + +#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)) +/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be + * avoided. Since this isn't called on those targets, the function + * doesn't even need to be defined for them. + */ +static void uv__process_child_init(const uv_process_options_t* options, + int stdio_count, + int (*pipes)[2], + int error_fd) { + int close_fd; + int use_fd; + int fd; + + if (options->flags & UV_PROCESS_DETACHED) + setsid(); + + /* First duplicate low numbered fds, since it's not safe to duplicate them, + * they could get replaced. Example: swapping stdout and stderr; without + * this fd 2 (stderr) would be duplicated into fd 1, thus making both + * stdout and stderr go to the same fd, which was not the intention. */ + for (fd = 0; fd < stdio_count; fd++) { + use_fd = pipes[fd][1]; + if (use_fd < 0 || use_fd >= fd) + continue; + pipes[fd][1] = fcntl(use_fd, F_DUPFD, stdio_count); + if (pipes[fd][1] == -1) { + uv__write_int(error_fd, -errno); + _exit(127); + } + } + + for (fd = 0; fd < stdio_count; fd++) { + close_fd = pipes[fd][0]; + use_fd = pipes[fd][1]; + + if (use_fd < 0) { + if (fd >= 3) + continue; + else { + /* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is + * set + */ + use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR); + close_fd = use_fd; + + if (use_fd == -1) { + uv__write_int(error_fd, -errno); + _exit(127); + } + } + } + + if (fd == use_fd) + uv__cloexec(use_fd, 0); + else + fd = dup2(use_fd, fd); + + if (fd == -1) { + uv__write_int(error_fd, -errno); + _exit(127); + } + + if (fd <= 2) + uv__nonblock(fd, 0); + + if (close_fd >= stdio_count) + uv__close(close_fd); + } + + for (fd = 0; fd < stdio_count; fd++) { + use_fd = pipes[fd][1]; + + if (use_fd >= stdio_count) + uv__close(use_fd); + } + + if (options->cwd != NULL && chdir(options->cwd)) { + uv__write_int(error_fd, -errno); + _exit(127); + } + + if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) { + /* When dropping privileges from root, the `setgroups` call will + * remove any extraneous groups. If we don't call this, then + * even though our uid has dropped, we may still have groups + * that enable us to do super-user things. This will fail if we + * aren't root, so don't bother checking the return value, this + * is just done as an optimistic privilege dropping function. + */ + SAVE_ERRNO(setgroups(0, NULL)); + } + + if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid)) { + uv__write_int(error_fd, -errno); + _exit(127); + } + + if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid)) { + uv__write_int(error_fd, -errno); + _exit(127); + } + + if (options->env != NULL) { + environ = options->env; + } + + execvp(options->file, options->args); + uv__write_int(error_fd, -errno); + _exit(127); +} +#endif + + +int uv_spawn(uv_loop_t* loop, + uv_process_t* process, + const uv_process_options_t* options) { +#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH) + /* fork is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED. */ + return -ENOSYS; +#else + int signal_pipe[2] = { -1, -1 }; + int (*pipes)[2]; + int stdio_count; + ssize_t r; + pid_t pid; + int err; + int exec_errorno; + int i; + int status; + + assert(options->file != NULL); + assert(!(options->flags & ~(UV_PROCESS_DETACHED | + UV_PROCESS_SETGID | + UV_PROCESS_SETUID | + UV_PROCESS_WINDOWS_HIDE | + UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS))); + + uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS); + QUEUE_INIT(&process->queue); + + stdio_count = options->stdio_count; + if (stdio_count < 3) + stdio_count = 3; + + err = -ENOMEM; + pipes = uv__malloc(stdio_count * sizeof(*pipes)); + if (pipes == NULL) + goto error; + + for (i = 0; i < stdio_count; i++) { + pipes[i][0] = -1; + pipes[i][1] = -1; + } + + for (i = 0; i < options->stdio_count; i++) { + err = uv__process_init_stdio(options->stdio + i, pipes[i]); + if (err) + goto error; + } + + /* This pipe is used by the parent to wait until + * the child has called `execve()`. We need this + * to avoid the following race condition: + * + * if ((pid = fork()) > 0) { + * kill(pid, SIGTERM); + * } + * else if (pid == 0) { + * execve("/bin/cat", argp, envp); + * } + * + * The parent sends a signal immediately after forking. + * Since the child may not have called `execve()` yet, + * there is no telling what process receives the signal, + * our fork or /bin/cat. + * + * To avoid ambiguity, we create a pipe with both ends + * marked close-on-exec. Then, after the call to `fork()`, + * the parent polls the read end until it EOFs or errors with EPIPE. + */ + err = uv__make_pipe(signal_pipe, 0); + if (err) + goto error; + + uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD); + + /* Acquire write lock to prevent opening new fds in worker threads */ + uv_rwlock_wrlock(&loop->cloexec_lock); + pid = fork(); + + if (pid == -1) { + err = -errno; + uv_rwlock_wrunlock(&loop->cloexec_lock); + uv__close(signal_pipe[0]); + uv__close(signal_pipe[1]); + goto error; + } + + if (pid == 0) { + uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]); + abort(); + } + + /* Release lock in parent process */ + uv_rwlock_wrunlock(&loop->cloexec_lock); + uv__close(signal_pipe[1]); + + process->status = 0; + exec_errorno = 0; + do + r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno)); + while (r == -1 && errno == EINTR); + + if (r == 0) + ; /* okay, EOF */ + else if (r == sizeof(exec_errorno)) { + do + err = waitpid(pid, &status, 0); /* okay, read errorno */ + while (err == -1 && errno == EINTR); + assert(err == pid); + } else if (r == -1 && errno == EPIPE) { + do + err = waitpid(pid, &status, 0); /* okay, got EPIPE */ + while (err == -1 && errno == EINTR); + assert(err == pid); + } else + abort(); + + uv__close_nocheckstdio(signal_pipe[0]); + + for (i = 0; i < options->stdio_count; i++) { + err = uv__process_open_stream(options->stdio + i, pipes[i], i == 0); + if (err == 0) + continue; + + while (i--) + uv__process_close_stream(options->stdio + i); + + goto error; + } + + /* Only activate this handle if exec() happened successfully */ + if (exec_errorno == 0) { + QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue); + uv__handle_start(process); + } + + process->pid = pid; + process->exit_cb = options->exit_cb; + + uv__free(pipes); + return exec_errorno; + +error: + if (pipes != NULL) { + for (i = 0; i < stdio_count; i++) { + if (i < options->stdio_count) + if (options->stdio[i].flags & (UV_INHERIT_FD | UV_INHERIT_STREAM)) + continue; + if (pipes[i][0] != -1) + uv__close_nocheckstdio(pipes[i][0]); + if (pipes[i][1] != -1) + uv__close_nocheckstdio(pipes[i][1]); + } + uv__free(pipes); + } + + return err; +#endif +} + + +int uv_process_kill(uv_process_t* process, int signum) { + return uv_kill(process->pid, signum); +} + + +int uv_kill(int pid, int signum) { + if (kill(pid, signum)) + return -errno; + else + return 0; +} + + +void uv__process_close(uv_process_t* handle) { + QUEUE_REMOVE(&handle->queue); + uv__handle_stop(handle); + if (QUEUE_EMPTY(&handle->loop->process_handles)) + uv_signal_stop(&handle->loop->child_watcher); +} diff --git a/deps/libuv/src/unix/proctitle.c b/deps/libuv/src/unix/proctitle.c new file mode 100644 index 00000000..08d875f7 --- /dev/null +++ b/deps/libuv/src/unix/proctitle.c @@ -0,0 +1,105 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include + +extern void uv__set_process_title(const char* title); + +static void* args_mem; + +static struct { + char* str; + size_t len; +} process_title; + + +char** uv_setup_args(int argc, char** argv) { + char** new_argv; + size_t size; + char* s; + int i; + + if (argc <= 0) + return argv; + + /* Calculate how much memory we need for the argv strings. */ + size = 0; + for (i = 0; i < argc; i++) + size += strlen(argv[i]) + 1; + + process_title.str = argv[0]; + process_title.len = argv[argc - 1] + strlen(argv[argc - 1]) - argv[0]; + assert(process_title.len + 1 == size); /* argv memory should be adjacent. */ + + /* Add space for the argv pointers. */ + size += (argc + 1) * sizeof(char*); + + new_argv = uv__malloc(size); + if (new_argv == NULL) + return argv; + args_mem = new_argv; + + /* Copy over the strings and set up the pointer table. */ + s = (char*) &new_argv[argc + 1]; + for (i = 0; i < argc; i++) { + size = strlen(argv[i]) + 1; + memcpy(s, argv[i], size); + new_argv[i] = s; + s += size; + } + new_argv[i] = NULL; + + return new_argv; +} + + +int uv_set_process_title(const char* title) { + if (process_title.len == 0) + return 0; + + /* No need to terminate, byte after is always '\0'. */ + strncpy(process_title.str, title, process_title.len); + uv__set_process_title(title); + + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + if (buffer == NULL || size == 0) + return -EINVAL; + else if (size <= process_title.len) + return -ENOBUFS; + + memcpy(buffer, process_title.str, process_title.len + 1); + buffer[process_title.len] = '\0'; + + return 0; +} + + +UV_DESTRUCTOR(static void free_args_mem(void)) { + uv__free(args_mem); /* Keep valgrind happy. */ + args_mem = NULL; +} diff --git a/deps/libuv/src/unix/pthread-barrier.c b/deps/libuv/src/unix/pthread-barrier.c new file mode 100644 index 00000000..f57bf250 --- /dev/null +++ b/deps/libuv/src/unix/pthread-barrier.c @@ -0,0 +1,120 @@ +/* +Copyright (c) 2016, Kari Tristan Helgason + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +#include "uv-common.h" +#include "pthread-barrier.h" + +#include +#include + +/* TODO: support barrier_attr */ +int pthread_barrier_init(pthread_barrier_t* barrier, + const void* barrier_attr, + unsigned count) { + int rc; + _uv_barrier* b; + + if (barrier == NULL || count == 0) + return EINVAL; + + if (barrier_attr != NULL) + return ENOTSUP; + + b = uv__malloc(sizeof(*b)); + if (b == NULL) + return ENOMEM; + + b->in = 0; + b->out = 0; + b->threshold = count; + + if ((rc = pthread_mutex_init(&b->mutex, NULL)) != 0) + goto error2; + if ((rc = pthread_cond_init(&b->cond, NULL)) != 0) + goto error; + + barrier->b = b; + return 0; + +error: + pthread_mutex_destroy(&b->mutex); +error2: + uv__free(b); + return rc; +} + +int pthread_barrier_wait(pthread_barrier_t* barrier) { + int rc; + _uv_barrier* b; + + if (barrier == NULL || barrier->b == NULL) + return EINVAL; + + b = barrier->b; + /* Lock the mutex*/ + if ((rc = pthread_mutex_lock(&b->mutex)) != 0) + return rc; + + /* Increment the count. If this is the first thread to reach the threshold, + wake up waiters, unlock the mutex, then return + PTHREAD_BARRIER_SERIAL_THREAD. */ + if (++b->in == b->threshold) { + b->in = 0; + b->out = b->threshold - 1; + assert(pthread_cond_signal(&b->cond) == 0); + + pthread_mutex_unlock(&b->mutex); + return PTHREAD_BARRIER_SERIAL_THREAD; + } + /* Otherwise, wait for other threads until in is set to 0, + then return 0 to indicate this is not the first thread. */ + do { + if ((rc = pthread_cond_wait(&b->cond, &b->mutex)) != 0) + break; + } while (b->in != 0); + + /* mark thread exit */ + b->out--; + pthread_cond_signal(&b->cond); + pthread_mutex_unlock(&b->mutex); + return rc; +} + +int pthread_barrier_destroy(pthread_barrier_t* barrier) { + int rc; + _uv_barrier* b; + + if (barrier == NULL || barrier->b == NULL) + return EINVAL; + + b = barrier->b; + + if ((rc = pthread_mutex_lock(&b->mutex)) != 0) + return rc; + + if (b->in > 0 || b->out > 0) + rc = EBUSY; + + pthread_mutex_unlock(&b->mutex); + + if (rc) + return rc; + + pthread_cond_destroy(&b->cond); + pthread_mutex_destroy(&b->mutex); + uv__free(barrier->b); + barrier->b = NULL; + return 0; +} diff --git a/deps/libuv/src/unix/pthread-fixes.c b/deps/libuv/src/unix/pthread-fixes.c new file mode 100644 index 00000000..fb179958 --- /dev/null +++ b/deps/libuv/src/unix/pthread-fixes.c @@ -0,0 +1,56 @@ +/* Copyright (c) 2013, Sony Mobile Communications AB + * Copyright (c) 2012, Google Inc. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* Android versions < 4.1 have a broken pthread_sigmask. */ +#include +#include +#include + +int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) { + static int workaround; + int err; + + if (workaround) { + return sigprocmask(how, set, oset); + } else { + err = pthread_sigmask(how, set, oset); + if (err) { + if (err == EINVAL && sigprocmask(how, set, oset) == 0) { + workaround = 1; + return 0; + } else { + return -1; + } + } + } + + return 0; +} diff --git a/deps/libuv/src/unix/signal.c b/deps/libuv/src/unix/signal.c new file mode 100644 index 00000000..ccc1847a --- /dev/null +++ b/deps/libuv/src/unix/signal.c @@ -0,0 +1,467 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include +#include +#include + + +typedef struct { + uv_signal_t* handle; + int signum; +} uv__signal_msg_t; + +RB_HEAD(uv__signal_tree_s, uv_signal_s); + + +static int uv__signal_unlock(void); +static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, unsigned int events); +static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2); +static void uv__signal_stop(uv_signal_t* handle); + + +static uv_once_t uv__signal_global_init_guard = UV_ONCE_INIT; +static struct uv__signal_tree_s uv__signal_tree = + RB_INITIALIZER(uv__signal_tree); +static int uv__signal_lock_pipefd[2]; + + +RB_GENERATE_STATIC(uv__signal_tree_s, + uv_signal_s, tree_entry, + uv__signal_compare) + + +static void uv__signal_global_init(void) { + if (uv__make_pipe(uv__signal_lock_pipefd, 0)) + abort(); + + if (uv__signal_unlock()) + abort(); +} + + +void uv__signal_global_once_init(void) { + uv_once(&uv__signal_global_init_guard, uv__signal_global_init); +} + + + +static int uv__signal_lock(void) { + int r; + char data; + + do { + r = read(uv__signal_lock_pipefd[0], &data, sizeof data); + } while (r < 0 && errno == EINTR); + + return (r < 0) ? -1 : 0; +} + + +static int uv__signal_unlock(void) { + int r; + char data = 42; + + do { + r = write(uv__signal_lock_pipefd[1], &data, sizeof data); + } while (r < 0 && errno == EINTR); + + return (r < 0) ? -1 : 0; +} + + +static void uv__signal_block_and_lock(sigset_t* saved_sigmask) { + sigset_t new_mask; + + if (sigfillset(&new_mask)) + abort(); + + if (pthread_sigmask(SIG_SETMASK, &new_mask, saved_sigmask)) + abort(); + + if (uv__signal_lock()) + abort(); +} + + +static void uv__signal_unlock_and_unblock(sigset_t* saved_sigmask) { + if (uv__signal_unlock()) + abort(); + + if (pthread_sigmask(SIG_SETMASK, saved_sigmask, NULL)) + abort(); +} + + +static uv_signal_t* uv__signal_first_handle(int signum) { + /* This function must be called with the signal lock held. */ + uv_signal_t lookup; + uv_signal_t* handle; + + lookup.signum = signum; + lookup.loop = NULL; + + handle = RB_NFIND(uv__signal_tree_s, &uv__signal_tree, &lookup); + + if (handle != NULL && handle->signum == signum) + return handle; + + return NULL; +} + + +static void uv__signal_handler(int signum) { + uv__signal_msg_t msg; + uv_signal_t* handle; + int saved_errno; + + saved_errno = errno; + memset(&msg, 0, sizeof msg); + + if (uv__signal_lock()) { + errno = saved_errno; + return; + } + + for (handle = uv__signal_first_handle(signum); + handle != NULL && handle->signum == signum; + handle = RB_NEXT(uv__signal_tree_s, &uv__signal_tree, handle)) { + int r; + + msg.signum = signum; + msg.handle = handle; + + /* write() should be atomic for small data chunks, so the entire message + * should be written at once. In theory the pipe could become full, in + * which case the user is out of luck. + */ + do { + r = write(handle->loop->signal_pipefd[1], &msg, sizeof msg); + } while (r == -1 && errno == EINTR); + + assert(r == sizeof msg || + (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK))); + + if (r != -1) + handle->caught_signals++; + } + + uv__signal_unlock(); + errno = saved_errno; +} + + +static int uv__signal_register_handler(int signum) { + /* When this function is called, the signal lock must be held. */ + struct sigaction sa; + + /* XXX use a separate signal stack? */ + memset(&sa, 0, sizeof(sa)); + if (sigfillset(&sa.sa_mask)) + abort(); + sa.sa_handler = uv__signal_handler; + + /* XXX save old action so we can restore it later on? */ + if (sigaction(signum, &sa, NULL)) + return -errno; + + return 0; +} + + +static void uv__signal_unregister_handler(int signum) { + /* When this function is called, the signal lock must be held. */ + struct sigaction sa; + + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = SIG_DFL; + + /* sigaction can only fail with EINVAL or EFAULT; an attempt to deregister a + * signal implies that it was successfully registered earlier, so EINVAL + * should never happen. + */ + if (sigaction(signum, &sa, NULL)) + abort(); +} + + +static int uv__signal_loop_once_init(uv_loop_t* loop) { + int err; + + /* Return if already initialized. */ + if (loop->signal_pipefd[0] != -1) + return 0; + + err = uv__make_pipe(loop->signal_pipefd, UV__F_NONBLOCK); + if (err) + return err; + + uv__io_init(&loop->signal_io_watcher, + uv__signal_event, + loop->signal_pipefd[0]); + uv__io_start(loop, &loop->signal_io_watcher, POLLIN); + + return 0; +} + + +void uv__signal_loop_cleanup(uv_loop_t* loop) { + QUEUE* q; + + /* Stop all the signal watchers that are still attached to this loop. This + * ensures that the (shared) signal tree doesn't contain any invalid entries + * entries, and that signal handlers are removed when appropriate. + * It's safe to use QUEUE_FOREACH here because the handles and the handle + * queue are not modified by uv__signal_stop(). + */ + QUEUE_FOREACH(q, &loop->handle_queue) { + uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue); + + if (handle->type == UV_SIGNAL) + uv__signal_stop((uv_signal_t*) handle); + } + + if (loop->signal_pipefd[0] != -1) { + uv__close(loop->signal_pipefd[0]); + loop->signal_pipefd[0] = -1; + } + + if (loop->signal_pipefd[1] != -1) { + uv__close(loop->signal_pipefd[1]); + loop->signal_pipefd[1] = -1; + } +} + + +int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) { + int err; + + err = uv__signal_loop_once_init(loop); + if (err) + return err; + + uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL); + handle->signum = 0; + handle->caught_signals = 0; + handle->dispatched_signals = 0; + + return 0; +} + + +void uv__signal_close(uv_signal_t* handle) { + + uv__signal_stop(handle); + + /* If there are any caught signals "trapped" in the signal pipe, we can't + * call the close callback yet. Otherwise, add the handle to the finish_close + * queue. + */ + if (handle->caught_signals == handle->dispatched_signals) { + uv__make_close_pending((uv_handle_t*) handle); + } +} + + +int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) { + sigset_t saved_sigmask; + int err; + + assert(!(handle->flags & (UV_CLOSING | UV_CLOSED))); + + /* If the user supplies signum == 0, then return an error already. If the + * signum is otherwise invalid then uv__signal_register will find out + * eventually. + */ + if (signum == 0) + return -EINVAL; + + /* Short circuit: if the signal watcher is already watching {signum} don't + * go through the process of deregistering and registering the handler. + * Additionally, this avoids pending signals getting lost in the small time + * time frame that handle->signum == 0. + */ + if (signum == handle->signum) { + handle->signal_cb = signal_cb; + return 0; + } + + /* If the signal handler was already active, stop it first. */ + if (handle->signum != 0) { + uv__signal_stop(handle); + } + + uv__signal_block_and_lock(&saved_sigmask); + + /* If at this point there are no active signal watchers for this signum (in + * any of the loops), it's time to try and register a handler for it here. + */ + if (uv__signal_first_handle(signum) == NULL) { + err = uv__signal_register_handler(signum); + if (err) { + /* Registering the signal handler failed. Must be an invalid signal. */ + uv__signal_unlock_and_unblock(&saved_sigmask); + return err; + } + } + + handle->signum = signum; + RB_INSERT(uv__signal_tree_s, &uv__signal_tree, handle); + + uv__signal_unlock_and_unblock(&saved_sigmask); + + handle->signal_cb = signal_cb; + uv__handle_start(handle); + + return 0; +} + + +static void uv__signal_event(uv_loop_t* loop, + uv__io_t* w, + unsigned int events) { + uv__signal_msg_t* msg; + uv_signal_t* handle; + char buf[sizeof(uv__signal_msg_t) * 32]; + size_t bytes, end, i; + int r; + + bytes = 0; + end = 0; + + do { + r = read(loop->signal_pipefd[0], buf + bytes, sizeof(buf) - bytes); + + if (r == -1 && errno == EINTR) + continue; + + if (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) { + /* If there are bytes in the buffer already (which really is extremely + * unlikely if possible at all) we can't exit the function here. We'll + * spin until more bytes are read instead. + */ + if (bytes > 0) + continue; + + /* Otherwise, there was nothing there. */ + return; + } + + /* Other errors really should never happen. */ + if (r == -1) + abort(); + + bytes += r; + + /* `end` is rounded down to a multiple of sizeof(uv__signal_msg_t). */ + end = (bytes / sizeof(uv__signal_msg_t)) * sizeof(uv__signal_msg_t); + + for (i = 0; i < end; i += sizeof(uv__signal_msg_t)) { + msg = (uv__signal_msg_t*) (buf + i); + handle = msg->handle; + + if (msg->signum == handle->signum) { + assert(!(handle->flags & UV_CLOSING)); + handle->signal_cb(handle, handle->signum); + } + + handle->dispatched_signals++; + + /* If uv_close was called while there were caught signals that were not + * yet dispatched, the uv__finish_close was deferred. Make close pending + * now if this has happened. + */ + if ((handle->flags & UV_CLOSING) && + (handle->caught_signals == handle->dispatched_signals)) { + uv__make_close_pending((uv_handle_t*) handle); + } + } + + bytes -= end; + + /* If there are any "partial" messages left, move them to the start of the + * the buffer, and spin. This should not happen. + */ + if (bytes) { + memmove(buf, buf + end, bytes); + continue; + } + } while (end == sizeof buf); +} + + +static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) { + /* Compare signums first so all watchers with the same signnum end up + * adjacent. + */ + if (w1->signum < w2->signum) return -1; + if (w1->signum > w2->signum) return 1; + + /* Sort by loop pointer, so we can easily look up the first item after + * { .signum = x, .loop = NULL }. + */ + if (w1->loop < w2->loop) return -1; + if (w1->loop > w2->loop) return 1; + + if (w1 < w2) return -1; + if (w1 > w2) return 1; + + return 0; +} + + +int uv_signal_stop(uv_signal_t* handle) { + assert(!(handle->flags & (UV_CLOSING | UV_CLOSED))); + uv__signal_stop(handle); + return 0; +} + + +static void uv__signal_stop(uv_signal_t* handle) { + uv_signal_t* removed_handle; + sigset_t saved_sigmask; + + /* If the watcher wasn't started, this is a no-op. */ + if (handle->signum == 0) + return; + + uv__signal_block_and_lock(&saved_sigmask); + + removed_handle = RB_REMOVE(uv__signal_tree_s, &uv__signal_tree, handle); + assert(removed_handle == handle); + (void) removed_handle; + + /* Check if there are other active signal watchers observing this signal. If + * not, unregister the signal handler. + */ + if (uv__signal_first_handle(handle->signum) == NULL) + uv__signal_unregister_handler(handle->signum); + + uv__signal_unlock_and_unblock(&saved_sigmask); + + handle->signum = 0; + uv__handle_stop(handle); +} diff --git a/deps/libuv/src/unix/spinlock.h b/deps/libuv/src/unix/spinlock.h new file mode 100644 index 00000000..a20c83cc --- /dev/null +++ b/deps/libuv/src/unix/spinlock.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2013, Ben Noordhuis + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef UV_SPINLOCK_H_ +#define UV_SPINLOCK_H_ + +#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */ +#include "atomic-ops.h" + +#define UV_SPINLOCK_INITIALIZER { 0 } + +typedef struct { + int lock; +} uv_spinlock_t; + +UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)); +UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)); +UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)); +UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)); + +UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) { + ACCESS_ONCE(int, spinlock->lock) = 0; +} + +UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) { + while (!uv_spinlock_trylock(spinlock)) cpu_relax(); +} + +UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) { + ACCESS_ONCE(int, spinlock->lock) = 0; +} + +UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) { + /* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing. + * Not really critical until we have locks that are (frequently) contended + * for by several threads. + */ + return 0 == cmpxchgi(&spinlock->lock, 0, 1); +} + +#endif /* UV_SPINLOCK_H_ */ diff --git a/deps/libuv/src/unix/stream.c b/deps/libuv/src/unix/stream.c new file mode 100644 index 00000000..d20d0bcb --- /dev/null +++ b/deps/libuv/src/unix/stream.c @@ -0,0 +1,1638 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include /* IOV_MAX */ + +#if defined(__APPLE__) +# include +# include +# include + +/* Forward declaration */ +typedef struct uv__stream_select_s uv__stream_select_t; + +struct uv__stream_select_s { + uv_stream_t* stream; + uv_thread_t thread; + uv_sem_t close_sem; + uv_sem_t async_sem; + uv_async_t async; + int events; + int fake_fd; + int int_fd; + int fd; + fd_set* sread; + size_t sread_sz; + fd_set* swrite; + size_t swrite_sz; +}; +#endif /* defined(__APPLE__) */ + +static void uv__stream_connect(uv_stream_t*); +static void uv__write(uv_stream_t* stream); +static void uv__read(uv_stream_t* stream); +static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events); +static void uv__write_callbacks(uv_stream_t* stream); +static size_t uv__write_req_size(uv_write_t* req); + + +void uv__stream_init(uv_loop_t* loop, + uv_stream_t* stream, + uv_handle_type type) { + int err; + + uv__handle_init(loop, (uv_handle_t*)stream, type); + stream->read_cb = NULL; + stream->alloc_cb = NULL; + stream->close_cb = NULL; + stream->connection_cb = NULL; + stream->connect_req = NULL; + stream->shutdown_req = NULL; + stream->accepted_fd = -1; + stream->queued_fds = NULL; + stream->delayed_error = 0; + QUEUE_INIT(&stream->write_queue); + QUEUE_INIT(&stream->write_completed_queue); + stream->write_queue_size = 0; + + if (loop->emfile_fd == -1) { + err = uv__open_cloexec("/dev/null", O_RDONLY); + if (err < 0) + /* In the rare case that "/dev/null" isn't mounted open "/" + * instead. + */ + err = uv__open_cloexec("/", O_RDONLY); + if (err >= 0) + loop->emfile_fd = err; + } + +#if defined(__APPLE__) + stream->select = NULL; +#endif /* defined(__APPLE_) */ + + uv__io_init(&stream->io_watcher, uv__stream_io, -1); +} + + +static void uv__stream_osx_interrupt_select(uv_stream_t* stream) { +#if defined(__APPLE__) + /* Notify select() thread about state change */ + uv__stream_select_t* s; + int r; + + s = stream->select; + if (s == NULL) + return; + + /* Interrupt select() loop + * NOTE: fake_fd and int_fd are socketpair(), thus writing to one will + * emit read event on other side + */ + do + r = write(s->fake_fd, "x", 1); + while (r == -1 && errno == EINTR); + + assert(r == 1); +#else /* !defined(__APPLE__) */ + /* No-op on any other platform */ +#endif /* !defined(__APPLE__) */ +} + + +#if defined(__APPLE__) +static void uv__stream_osx_select(void* arg) { + uv_stream_t* stream; + uv__stream_select_t* s; + char buf[1024]; + int events; + int fd; + int r; + int max_fd; + + stream = arg; + s = stream->select; + fd = s->fd; + + if (fd > s->int_fd) + max_fd = fd; + else + max_fd = s->int_fd; + + while (1) { + /* Terminate on semaphore */ + if (uv_sem_trywait(&s->close_sem) == 0) + break; + + /* Watch fd using select(2) */ + memset(s->sread, 0, s->sread_sz); + memset(s->swrite, 0, s->swrite_sz); + + if (uv__io_active(&stream->io_watcher, POLLIN)) + FD_SET(fd, s->sread); + if (uv__io_active(&stream->io_watcher, POLLOUT)) + FD_SET(fd, s->swrite); + FD_SET(s->int_fd, s->sread); + + /* Wait indefinitely for fd events */ + r = select(max_fd + 1, s->sread, s->swrite, NULL, NULL); + if (r == -1) { + if (errno == EINTR) + continue; + + /* XXX: Possible?! */ + abort(); + } + + /* Ignore timeouts */ + if (r == 0) + continue; + + /* Empty socketpair's buffer in case of interruption */ + if (FD_ISSET(s->int_fd, s->sread)) + while (1) { + r = read(s->int_fd, buf, sizeof(buf)); + + if (r == sizeof(buf)) + continue; + + if (r != -1) + break; + + if (errno == EAGAIN || errno == EWOULDBLOCK) + break; + + if (errno == EINTR) + continue; + + abort(); + } + + /* Handle events */ + events = 0; + if (FD_ISSET(fd, s->sread)) + events |= POLLIN; + if (FD_ISSET(fd, s->swrite)) + events |= POLLOUT; + + assert(events != 0 || FD_ISSET(s->int_fd, s->sread)); + if (events != 0) { + ACCESS_ONCE(int, s->events) = events; + + uv_async_send(&s->async); + uv_sem_wait(&s->async_sem); + + /* Should be processed at this stage */ + assert((s->events == 0) || (stream->flags & UV_CLOSING)); + } + } +} + + +static void uv__stream_osx_select_cb(uv_async_t* handle) { + uv__stream_select_t* s; + uv_stream_t* stream; + int events; + + s = container_of(handle, uv__stream_select_t, async); + stream = s->stream; + + /* Get and reset stream's events */ + events = s->events; + ACCESS_ONCE(int, s->events) = 0; + + assert(events != 0); + assert(events == (events & (POLLIN | POLLOUT))); + + /* Invoke callback on event-loop */ + if ((events & POLLIN) && uv__io_active(&stream->io_watcher, POLLIN)) + uv__stream_io(stream->loop, &stream->io_watcher, POLLIN); + + if ((events & POLLOUT) && uv__io_active(&stream->io_watcher, POLLOUT)) + uv__stream_io(stream->loop, &stream->io_watcher, POLLOUT); + + if (stream->flags & UV_CLOSING) + return; + + /* NOTE: It is important to do it here, otherwise `select()` might be called + * before the actual `uv__read()`, leading to the blocking syscall + */ + uv_sem_post(&s->async_sem); +} + + +static void uv__stream_osx_cb_close(uv_handle_t* async) { + uv__stream_select_t* s; + + s = container_of(async, uv__stream_select_t, async); + uv__free(s); +} + + +int uv__stream_try_select(uv_stream_t* stream, int* fd) { + /* + * kqueue doesn't work with some files from /dev mount on osx. + * select(2) in separate thread for those fds + */ + + struct kevent filter[1]; + struct kevent events[1]; + struct timespec timeout; + uv__stream_select_t* s; + int fds[2]; + int err; + int ret; + int kq; + int old_fd; + int max_fd; + size_t sread_sz; + size_t swrite_sz; + + kq = kqueue(); + if (kq == -1) { + perror("(libuv) kqueue()"); + return -errno; + } + + EV_SET(&filter[0], *fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0); + + /* Use small timeout, because we only want to capture EINVALs */ + timeout.tv_sec = 0; + timeout.tv_nsec = 1; + + do + ret = kevent(kq, filter, 1, events, 1, &timeout); + while (ret == -1 && errno == EINTR); + + uv__close(kq); + + if (ret == -1) + return -errno; + + if (ret == 0 || (events[0].flags & EV_ERROR) == 0 || events[0].data != EINVAL) + return 0; + + /* At this point we definitely know that this fd won't work with kqueue */ + + /* + * Create fds for io watcher and to interrupt the select() loop. + * NOTE: do it ahead of malloc below to allocate enough space for fd_sets + */ + if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) + return -errno; + + max_fd = *fd; + if (fds[1] > max_fd) + max_fd = fds[1]; + + sread_sz = ROUND_UP(max_fd + 1, sizeof(uint32_t) * NBBY) / NBBY; + swrite_sz = sread_sz; + + s = uv__malloc(sizeof(*s) + sread_sz + swrite_sz); + if (s == NULL) { + err = -ENOMEM; + goto failed_malloc; + } + + s->events = 0; + s->fd = *fd; + s->sread = (fd_set*) ((char*) s + sizeof(*s)); + s->sread_sz = sread_sz; + s->swrite = (fd_set*) ((char*) s->sread + sread_sz); + s->swrite_sz = swrite_sz; + + err = uv_async_init(stream->loop, &s->async, uv__stream_osx_select_cb); + if (err) + goto failed_async_init; + + s->async.flags |= UV__HANDLE_INTERNAL; + uv__handle_unref(&s->async); + + err = uv_sem_init(&s->close_sem, 0); + if (err != 0) + goto failed_close_sem_init; + + err = uv_sem_init(&s->async_sem, 0); + if (err != 0) + goto failed_async_sem_init; + + s->fake_fd = fds[0]; + s->int_fd = fds[1]; + + old_fd = *fd; + s->stream = stream; + stream->select = s; + *fd = s->fake_fd; + + err = uv_thread_create(&s->thread, uv__stream_osx_select, stream); + if (err != 0) + goto failed_thread_create; + + return 0; + +failed_thread_create: + s->stream = NULL; + stream->select = NULL; + *fd = old_fd; + + uv_sem_destroy(&s->async_sem); + +failed_async_sem_init: + uv_sem_destroy(&s->close_sem); + +failed_close_sem_init: + uv__close(fds[0]); + uv__close(fds[1]); + uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close); + return err; + +failed_async_init: + uv__free(s); + +failed_malloc: + uv__close(fds[0]); + uv__close(fds[1]); + + return err; +} +#endif /* defined(__APPLE__) */ + + +int uv__stream_open(uv_stream_t* stream, int fd, int flags) { +#if defined(__APPLE__) + int enable; +#endif + + if (!(stream->io_watcher.fd == -1 || stream->io_watcher.fd == fd)) + return -EBUSY; + + assert(fd >= 0); + stream->flags |= flags; + + if (stream->type == UV_TCP) { + if ((stream->flags & UV_TCP_NODELAY) && uv__tcp_nodelay(fd, 1)) + return -errno; + + /* TODO Use delay the user passed in. */ + if ((stream->flags & UV_TCP_KEEPALIVE) && uv__tcp_keepalive(fd, 1, 60)) + return -errno; + } + +#if defined(__APPLE__) + enable = 1; + if (setsockopt(fd, SOL_SOCKET, SO_OOBINLINE, &enable, sizeof(enable)) && + errno != ENOTSOCK && + errno != EINVAL) { + return -errno; + } +#endif + + stream->io_watcher.fd = fd; + + return 0; +} + + +void uv__stream_flush_write_queue(uv_stream_t* stream, int error) { + uv_write_t* req; + QUEUE* q; + while (!QUEUE_EMPTY(&stream->write_queue)) { + q = QUEUE_HEAD(&stream->write_queue); + QUEUE_REMOVE(q); + + req = QUEUE_DATA(q, uv_write_t, queue); + req->error = error; + + QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); + } +} + + +void uv__stream_destroy(uv_stream_t* stream) { + assert(!uv__io_active(&stream->io_watcher, POLLIN | POLLOUT)); + assert(stream->flags & UV_CLOSED); + + if (stream->connect_req) { + uv__req_unregister(stream->loop, stream->connect_req); + stream->connect_req->cb(stream->connect_req, -ECANCELED); + stream->connect_req = NULL; + } + + uv__stream_flush_write_queue(stream, -ECANCELED); + uv__write_callbacks(stream); + + if (stream->shutdown_req) { + /* The ECANCELED error code is a lie, the shutdown(2) syscall is a + * fait accompli at this point. Maybe we should revisit this in v0.11. + * A possible reason for leaving it unchanged is that it informs the + * callee that the handle has been destroyed. + */ + uv__req_unregister(stream->loop, stream->shutdown_req); + stream->shutdown_req->cb(stream->shutdown_req, -ECANCELED); + stream->shutdown_req = NULL; + } + + assert(stream->write_queue_size == 0); +} + + +/* Implements a best effort approach to mitigating accept() EMFILE errors. + * We have a spare file descriptor stashed away that we close to get below + * the EMFILE limit. Next, we accept all pending connections and close them + * immediately to signal the clients that we're overloaded - and we are, but + * we still keep on trucking. + * + * There is one caveat: it's not reliable in a multi-threaded environment. + * The file descriptor limit is per process. Our party trick fails if another + * thread opens a file or creates a socket in the time window between us + * calling close() and accept(). + */ +static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) { + int err; + int emfile_fd; + + if (loop->emfile_fd == -1) + return -EMFILE; + + uv__close(loop->emfile_fd); + loop->emfile_fd = -1; + + do { + err = uv__accept(accept_fd); + if (err >= 0) + uv__close(err); + } while (err >= 0 || err == -EINTR); + + emfile_fd = uv__open_cloexec("/", O_RDONLY); + if (emfile_fd >= 0) + loop->emfile_fd = emfile_fd; + + return err; +} + + +#if defined(UV_HAVE_KQUEUE) +# define UV_DEC_BACKLOG(w) w->rcount--; +#else +# define UV_DEC_BACKLOG(w) /* no-op */ +#endif /* defined(UV_HAVE_KQUEUE) */ + + +void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + uv_stream_t* stream; + int err; + + stream = container_of(w, uv_stream_t, io_watcher); + assert(events == POLLIN); + assert(stream->accepted_fd == -1); + assert(!(stream->flags & UV_CLOSING)); + + uv__io_start(stream->loop, &stream->io_watcher, POLLIN); + + /* connection_cb can close the server socket while we're + * in the loop so check it on each iteration. + */ + while (uv__stream_fd(stream) != -1) { + assert(stream->accepted_fd == -1); + +#if defined(UV_HAVE_KQUEUE) + if (w->rcount <= 0) + return; +#endif /* defined(UV_HAVE_KQUEUE) */ + + err = uv__accept(uv__stream_fd(stream)); + if (err < 0) { + if (err == -EAGAIN || err == -EWOULDBLOCK) + return; /* Not an error. */ + + if (err == -ECONNABORTED) + continue; /* Ignore. Nothing we can do about that. */ + + if (err == -EMFILE || err == -ENFILE) { + err = uv__emfile_trick(loop, uv__stream_fd(stream)); + if (err == -EAGAIN || err == -EWOULDBLOCK) + break; + } + + stream->connection_cb(stream, err); + continue; + } + + UV_DEC_BACKLOG(w) + stream->accepted_fd = err; + stream->connection_cb(stream, 0); + + if (stream->accepted_fd != -1) { + /* The user hasn't yet accepted called uv_accept() */ + uv__io_stop(loop, &stream->io_watcher, POLLIN); + return; + } + + if (stream->type == UV_TCP && (stream->flags & UV_TCP_SINGLE_ACCEPT)) { + /* Give other processes a chance to accept connections. */ + struct timespec timeout = { 0, 1 }; + nanosleep(&timeout, NULL); + } + } +} + + +#undef UV_DEC_BACKLOG + + +int uv_accept(uv_stream_t* server, uv_stream_t* client) { + int err; + + assert(server->loop == client->loop); + + if (server->accepted_fd == -1) + return -EAGAIN; + + switch (client->type) { + case UV_NAMED_PIPE: + case UV_TCP: + err = uv__stream_open(client, + server->accepted_fd, + UV_STREAM_READABLE | UV_STREAM_WRITABLE); + if (err) { + /* TODO handle error */ + uv__close(server->accepted_fd); + goto done; + } + break; + + case UV_UDP: + err = uv_udp_open((uv_udp_t*) client, server->accepted_fd); + if (err) { + uv__close(server->accepted_fd); + goto done; + } + break; + + default: + return -EINVAL; + } + + client->flags |= UV_HANDLE_BOUND; + +done: + /* Process queued fds */ + if (server->queued_fds != NULL) { + uv__stream_queued_fds_t* queued_fds; + + queued_fds = server->queued_fds; + + /* Read first */ + server->accepted_fd = queued_fds->fds[0]; + + /* All read, free */ + assert(queued_fds->offset > 0); + if (--queued_fds->offset == 0) { + uv__free(queued_fds); + server->queued_fds = NULL; + } else { + /* Shift rest */ + memmove(queued_fds->fds, + queued_fds->fds + 1, + queued_fds->offset * sizeof(*queued_fds->fds)); + } + } else { + server->accepted_fd = -1; + if (err == 0) + uv__io_start(server->loop, &server->io_watcher, POLLIN); + } + return err; +} + + +int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) { + int err; + + switch (stream->type) { + case UV_TCP: + err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb); + break; + + case UV_NAMED_PIPE: + err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb); + break; + + default: + err = -EINVAL; + } + + if (err == 0) + uv__handle_start(stream); + + return err; +} + + +static void uv__drain(uv_stream_t* stream) { + uv_shutdown_t* req; + int err; + + assert(QUEUE_EMPTY(&stream->write_queue)); + uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); + uv__stream_osx_interrupt_select(stream); + + /* Shutdown? */ + if ((stream->flags & UV_STREAM_SHUTTING) && + !(stream->flags & UV_CLOSING) && + !(stream->flags & UV_STREAM_SHUT)) { + assert(stream->shutdown_req); + + req = stream->shutdown_req; + stream->shutdown_req = NULL; + stream->flags &= ~UV_STREAM_SHUTTING; + uv__req_unregister(stream->loop, req); + + err = 0; + if (shutdown(uv__stream_fd(stream), SHUT_WR)) + err = -errno; + + if (err == 0) + stream->flags |= UV_STREAM_SHUT; + + if (req->cb != NULL) + req->cb(req, err); + } +} + + +static size_t uv__write_req_size(uv_write_t* req) { + size_t size; + + assert(req->bufs != NULL); + size = uv__count_bufs(req->bufs + req->write_index, + req->nbufs - req->write_index); + assert(req->handle->write_queue_size >= size); + + return size; +} + + +static void uv__write_req_finish(uv_write_t* req) { + uv_stream_t* stream = req->handle; + + /* Pop the req off tcp->write_queue. */ + QUEUE_REMOVE(&req->queue); + + /* Only free when there was no error. On error, we touch up write_queue_size + * right before making the callback. The reason we don't do that right away + * is that a write_queue_size > 0 is our only way to signal to the user that + * they should stop writing - which they should if we got an error. Something + * to revisit in future revisions of the libuv API. + */ + if (req->error == 0) { + if (req->bufs != req->bufsml) + uv__free(req->bufs); + req->bufs = NULL; + } + + /* Add it to the write_completed_queue where it will have its + * callback called in the near future. + */ + QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); + uv__io_feed(stream->loop, &stream->io_watcher); +} + + +static int uv__handle_fd(uv_handle_t* handle) { + switch (handle->type) { + case UV_NAMED_PIPE: + case UV_TCP: + return ((uv_stream_t*) handle)->io_watcher.fd; + + case UV_UDP: + return ((uv_udp_t*) handle)->io_watcher.fd; + + default: + return -1; + } +} + +static void uv__write(uv_stream_t* stream) { + struct iovec* iov; + QUEUE* q; + uv_write_t* req; + int iovmax; + int iovcnt; + ssize_t n; + +start: + + assert(uv__stream_fd(stream) >= 0); + + if (QUEUE_EMPTY(&stream->write_queue)) + return; + + q = QUEUE_HEAD(&stream->write_queue); + req = QUEUE_DATA(q, uv_write_t, queue); + assert(req->handle == stream); + + /* + * Cast to iovec. We had to have our own uv_buf_t instead of iovec + * because Windows's WSABUF is not an iovec. + */ + assert(sizeof(uv_buf_t) == sizeof(struct iovec)); + iov = (struct iovec*) &(req->bufs[req->write_index]); + iovcnt = req->nbufs - req->write_index; + + iovmax = uv__getiovmax(); + + /* Limit iov count to avoid EINVALs from writev() */ + if (iovcnt > iovmax) + iovcnt = iovmax; + + /* + * Now do the actual writev. Note that we've been updating the pointers + * inside the iov each time we write. So there is no need to offset it. + */ + + if (req->send_handle) { + struct msghdr msg; + struct cmsghdr *cmsg; + int fd_to_send = uv__handle_fd((uv_handle_t*) req->send_handle); + char scratch[64] = {0}; + + assert(fd_to_send >= 0); + + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_iov = iov; + msg.msg_iovlen = iovcnt; + msg.msg_flags = 0; + + msg.msg_control = (void*) scratch; + msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send)); + + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send)); + + /* silence aliasing warning */ + { + void* pv = CMSG_DATA(cmsg); + int* pi = pv; + *pi = fd_to_send; + } + + do { + n = sendmsg(uv__stream_fd(stream), &msg, 0); + } +#if defined(__APPLE__) + /* + * Due to a possible kernel bug at least in OS X 10.10 "Yosemite", + * EPROTOTYPE can be returned while trying to write to a socket that is + * shutting down. If we retry the write, we should get the expected EPIPE + * instead. + */ + while (n == -1 && (errno == EINTR || errno == EPROTOTYPE)); +#else + while (n == -1 && errno == EINTR); +#endif + } else { + do { + if (iovcnt == 1) { + n = write(uv__stream_fd(stream), iov[0].iov_base, iov[0].iov_len); + } else { + n = writev(uv__stream_fd(stream), iov, iovcnt); + } + } +#if defined(__APPLE__) + /* + * Due to a possible kernel bug at least in OS X 10.10 "Yosemite", + * EPROTOTYPE can be returned while trying to write to a socket that is + * shutting down. If we retry the write, we should get the expected EPIPE + * instead. + */ + while (n == -1 && (errno == EINTR || errno == EPROTOTYPE)); +#else + while (n == -1 && errno == EINTR); +#endif + } + + if (n < 0) { + if (errno != EAGAIN && errno != EWOULDBLOCK) { + /* Error */ + req->error = -errno; + uv__write_req_finish(req); + uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); + if (!uv__io_active(&stream->io_watcher, POLLIN)) + uv__handle_stop(stream); + uv__stream_osx_interrupt_select(stream); + return; + } else if (stream->flags & UV_STREAM_BLOCKING) { + /* If this is a blocking stream, try again. */ + goto start; + } + } else { + /* Successful write */ + + while (n >= 0) { + uv_buf_t* buf = &(req->bufs[req->write_index]); + size_t len = buf->len; + + assert(req->write_index < req->nbufs); + + if ((size_t)n < len) { + buf->base += n; + buf->len -= n; + stream->write_queue_size -= n; + n = 0; + + /* There is more to write. */ + if (stream->flags & UV_STREAM_BLOCKING) { + /* + * If we're blocking then we should not be enabling the write + * watcher - instead we need to try again. + */ + goto start; + } else { + /* Break loop and ensure the watcher is pending. */ + break; + } + + } else { + /* Finished writing the buf at index req->write_index. */ + req->write_index++; + + assert((size_t)n >= len); + n -= len; + + assert(stream->write_queue_size >= len); + stream->write_queue_size -= len; + + if (req->write_index == req->nbufs) { + /* Then we're done! */ + assert(n == 0); + uv__write_req_finish(req); + /* TODO: start trying to write the next request. */ + return; + } + } + } + } + + /* Either we've counted n down to zero or we've got EAGAIN. */ + assert(n == 0 || n == -1); + + /* Only non-blocking streams should use the write_watcher. */ + assert(!(stream->flags & UV_STREAM_BLOCKING)); + + /* We're not done. */ + uv__io_start(stream->loop, &stream->io_watcher, POLLOUT); + + /* Notify select() thread about state change */ + uv__stream_osx_interrupt_select(stream); +} + + +static void uv__write_callbacks(uv_stream_t* stream) { + uv_write_t* req; + QUEUE* q; + + while (!QUEUE_EMPTY(&stream->write_completed_queue)) { + /* Pop a req off write_completed_queue. */ + q = QUEUE_HEAD(&stream->write_completed_queue); + req = QUEUE_DATA(q, uv_write_t, queue); + QUEUE_REMOVE(q); + uv__req_unregister(stream->loop, req); + + if (req->bufs != NULL) { + stream->write_queue_size -= uv__write_req_size(req); + if (req->bufs != req->bufsml) + uv__free(req->bufs); + req->bufs = NULL; + } + + /* NOTE: call callback AFTER freeing the request data. */ + if (req->cb) + req->cb(req, req->error); + } + + assert(QUEUE_EMPTY(&stream->write_completed_queue)); +} + + +uv_handle_type uv__handle_type(int fd) { + struct sockaddr_storage ss; + socklen_t sslen; + socklen_t len; + int type; + + memset(&ss, 0, sizeof(ss)); + sslen = sizeof(ss); + + if (getsockname(fd, (struct sockaddr*)&ss, &sslen)) + return UV_UNKNOWN_HANDLE; + + len = sizeof type; + + if (getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &len)) + return UV_UNKNOWN_HANDLE; + + if (type == SOCK_STREAM) { +#if defined(_AIX) || defined(__DragonFly__) + /* on AIX/DragonFly the getsockname call returns an empty sa structure + * for sockets of type AF_UNIX. For all other types it will + * return a properly filled in structure. + */ + if (sslen == 0) + return UV_NAMED_PIPE; +#endif + switch (ss.ss_family) { + case AF_UNIX: + return UV_NAMED_PIPE; + case AF_INET: + case AF_INET6: + return UV_TCP; + } + } + + if (type == SOCK_DGRAM && + (ss.ss_family == AF_INET || ss.ss_family == AF_INET6)) + return UV_UDP; + + return UV_UNKNOWN_HANDLE; +} + + +static void uv__stream_eof(uv_stream_t* stream, const uv_buf_t* buf) { + stream->flags |= UV_STREAM_READ_EOF; + uv__io_stop(stream->loop, &stream->io_watcher, POLLIN); + if (!uv__io_active(&stream->io_watcher, POLLOUT)) + uv__handle_stop(stream); + uv__stream_osx_interrupt_select(stream); + stream->read_cb(stream, UV_EOF, buf); + stream->flags &= ~UV_STREAM_READING; +} + + +static int uv__stream_queue_fd(uv_stream_t* stream, int fd) { + uv__stream_queued_fds_t* queued_fds; + unsigned int queue_size; + + queued_fds = stream->queued_fds; + if (queued_fds == NULL) { + queue_size = 8; + queued_fds = uv__malloc((queue_size - 1) * sizeof(*queued_fds->fds) + + sizeof(*queued_fds)); + if (queued_fds == NULL) + return -ENOMEM; + queued_fds->size = queue_size; + queued_fds->offset = 0; + stream->queued_fds = queued_fds; + + /* Grow */ + } else if (queued_fds->size == queued_fds->offset) { + queue_size = queued_fds->size + 8; + queued_fds = uv__realloc(queued_fds, + (queue_size - 1) * sizeof(*queued_fds->fds) + + sizeof(*queued_fds)); + + /* + * Allocation failure, report back. + * NOTE: if it is fatal - sockets will be closed in uv__stream_close + */ + if (queued_fds == NULL) + return -ENOMEM; + queued_fds->size = queue_size; + stream->queued_fds = queued_fds; + } + + /* Put fd in a queue */ + queued_fds->fds[queued_fds->offset++] = fd; + + return 0; +} + + +#define UV__CMSG_FD_COUNT 64 +#define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int)) + + +static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) { + struct cmsghdr* cmsg; + + for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { + char* start; + char* end; + int err; + void* pv; + int* pi; + unsigned int i; + unsigned int count; + + if (cmsg->cmsg_type != SCM_RIGHTS) { + fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n", + cmsg->cmsg_type); + continue; + } + + /* silence aliasing warning */ + pv = CMSG_DATA(cmsg); + pi = pv; + + /* Count available fds */ + start = (char*) cmsg; + end = (char*) cmsg + cmsg->cmsg_len; + count = 0; + while (start + CMSG_LEN(count * sizeof(*pi)) < end) + count++; + assert(start + CMSG_LEN(count * sizeof(*pi)) == end); + + for (i = 0; i < count; i++) { + /* Already has accepted fd, queue now */ + if (stream->accepted_fd != -1) { + err = uv__stream_queue_fd(stream, pi[i]); + if (err != 0) { + /* Close rest */ + for (; i < count; i++) + uv__close(pi[i]); + return err; + } + } else { + stream->accepted_fd = pi[i]; + } + } + } + + return 0; +} + + +#ifdef __clang__ +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wgnu-folding-constant" +#endif + +static void uv__read(uv_stream_t* stream) { + uv_buf_t buf; + ssize_t nread; + struct msghdr msg; + char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)]; + int count; + int err; + int is_ipc; + + stream->flags &= ~UV_STREAM_READ_PARTIAL; + + /* Prevent loop starvation when the data comes in as fast as (or faster than) + * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O. + */ + count = 32; + + is_ipc = stream->type == UV_NAMED_PIPE && ((uv_pipe_t*) stream)->ipc; + + /* XXX: Maybe instead of having UV_STREAM_READING we just test if + * tcp->read_cb is NULL or not? + */ + while (stream->read_cb + && (stream->flags & UV_STREAM_READING) + && (count-- > 0)) { + assert(stream->alloc_cb != NULL); + + buf = uv_buf_init(NULL, 0); + stream->alloc_cb((uv_handle_t*)stream, 64 * 1024, &buf); + if (buf.base == NULL || buf.len == 0) { + /* User indicates it can't or won't handle the read. */ + stream->read_cb(stream, UV_ENOBUFS, &buf); + return; + } + + assert(buf.base != NULL); + assert(uv__stream_fd(stream) >= 0); + + if (!is_ipc) { + do { + nread = read(uv__stream_fd(stream), buf.base, buf.len); + } + while (nread < 0 && errno == EINTR); + } else { + /* ipc uses recvmsg */ + msg.msg_flags = 0; + msg.msg_iov = (struct iovec*) &buf; + msg.msg_iovlen = 1; + msg.msg_name = NULL; + msg.msg_namelen = 0; + /* Set up to receive a descriptor even if one isn't in the message */ + msg.msg_controllen = sizeof(cmsg_space); + msg.msg_control = cmsg_space; + + do { + nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0); + } + while (nread < 0 && errno == EINTR); + } + + if (nread < 0) { + /* Error */ + if (errno == EAGAIN || errno == EWOULDBLOCK) { + /* Wait for the next one. */ + if (stream->flags & UV_STREAM_READING) { + uv__io_start(stream->loop, &stream->io_watcher, POLLIN); + uv__stream_osx_interrupt_select(stream); + } + stream->read_cb(stream, 0, &buf); + } else { + /* Error. User should call uv_close(). */ + stream->read_cb(stream, -errno, &buf); + if (stream->flags & UV_STREAM_READING) { + stream->flags &= ~UV_STREAM_READING; + uv__io_stop(stream->loop, &stream->io_watcher, POLLIN); + if (!uv__io_active(&stream->io_watcher, POLLOUT)) + uv__handle_stop(stream); + uv__stream_osx_interrupt_select(stream); + } + } + return; + } else if (nread == 0) { + uv__stream_eof(stream, &buf); + return; + } else { + /* Successful read */ + ssize_t buflen = buf.len; + + if (is_ipc) { + err = uv__stream_recv_cmsg(stream, &msg); + if (err != 0) { + stream->read_cb(stream, err, &buf); + return; + } + } + stream->read_cb(stream, nread, &buf); + + /* Return if we didn't fill the buffer, there is no more data to read. */ + if (nread < buflen) { + stream->flags |= UV_STREAM_READ_PARTIAL; + return; + } + } + } +} + + +#ifdef __clang__ +# pragma clang diagnostic pop +#endif + +#undef UV__CMSG_FD_COUNT +#undef UV__CMSG_FD_SIZE + + +int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { + assert((stream->type == UV_TCP || stream->type == UV_NAMED_PIPE) && + "uv_shutdown (unix) only supports uv_handle_t right now"); + + if (!(stream->flags & UV_STREAM_WRITABLE) || + stream->flags & UV_STREAM_SHUT || + stream->flags & UV_STREAM_SHUTTING || + stream->flags & UV_CLOSED || + stream->flags & UV_CLOSING) { + return -ENOTCONN; + } + + assert(uv__stream_fd(stream) >= 0); + + /* Initialize request */ + uv__req_init(stream->loop, req, UV_SHUTDOWN); + req->handle = stream; + req->cb = cb; + stream->shutdown_req = req; + stream->flags |= UV_STREAM_SHUTTING; + + uv__io_start(stream->loop, &stream->io_watcher, POLLOUT); + uv__stream_osx_interrupt_select(stream); + + return 0; +} + + +static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { + uv_stream_t* stream; + + stream = container_of(w, uv_stream_t, io_watcher); + + assert(stream->type == UV_TCP || + stream->type == UV_NAMED_PIPE || + stream->type == UV_TTY); + assert(!(stream->flags & UV_CLOSING)); + + if (stream->connect_req) { + uv__stream_connect(stream); + return; + } + + assert(uv__stream_fd(stream) >= 0); + + /* Ignore POLLHUP here. Even it it's set, there may still be data to read. */ + if (events & (POLLIN | POLLERR | POLLHUP)) + uv__read(stream); + + if (uv__stream_fd(stream) == -1) + return; /* read_cb closed stream. */ + + /* Short-circuit iff POLLHUP is set, the user is still interested in read + * events and uv__read() reported a partial read but not EOF. If the EOF + * flag is set, uv__read() called read_cb with err=UV_EOF and we don't + * have to do anything. If the partial read flag is not set, we can't + * report the EOF yet because there is still data to read. + */ + if ((events & POLLHUP) && + (stream->flags & UV_STREAM_READING) && + (stream->flags & UV_STREAM_READ_PARTIAL) && + !(stream->flags & UV_STREAM_READ_EOF)) { + uv_buf_t buf = { NULL, 0 }; + uv__stream_eof(stream, &buf); + } + + if (uv__stream_fd(stream) == -1) + return; /* read_cb closed stream. */ + + if (events & (POLLOUT | POLLERR | POLLHUP)) { + uv__write(stream); + uv__write_callbacks(stream); + + /* Write queue drained. */ + if (QUEUE_EMPTY(&stream->write_queue)) + uv__drain(stream); + } +} + + +/** + * We get called here from directly following a call to connect(2). + * In order to determine if we've errored out or succeeded must call + * getsockopt. + */ +static void uv__stream_connect(uv_stream_t* stream) { + int error; + uv_connect_t* req = stream->connect_req; + socklen_t errorsize = sizeof(int); + + assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE); + assert(req); + + if (stream->delayed_error) { + /* To smooth over the differences between unixes errors that + * were reported synchronously on the first connect can be delayed + * until the next tick--which is now. + */ + error = stream->delayed_error; + stream->delayed_error = 0; + } else { + /* Normal situation: we need to get the socket error from the kernel. */ + assert(uv__stream_fd(stream) >= 0); + getsockopt(uv__stream_fd(stream), + SOL_SOCKET, + SO_ERROR, + &error, + &errorsize); + error = -error; + } + + if (error == -EINPROGRESS) + return; + + stream->connect_req = NULL; + uv__req_unregister(stream->loop, req); + + if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) { + uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); + } + + if (req->cb) + req->cb(req, error); + + if (uv__stream_fd(stream) == -1) + return; + + if (error < 0) { + uv__stream_flush_write_queue(stream, -ECANCELED); + uv__write_callbacks(stream); + } +} + + +int uv_write2(uv_write_t* req, + uv_stream_t* stream, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_stream_t* send_handle, + uv_write_cb cb) { + int empty_queue; + + assert(nbufs > 0); + assert((stream->type == UV_TCP || + stream->type == UV_NAMED_PIPE || + stream->type == UV_TTY) && + "uv_write (unix) does not yet support other types of streams"); + + if (uv__stream_fd(stream) < 0) + return -EBADF; + + if (send_handle) { + if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc) + return -EINVAL; + + /* XXX We abuse uv_write2() to send over UDP handles to child processes. + * Don't call uv__stream_fd() on those handles, it's a macro that on OS X + * evaluates to a function that operates on a uv_stream_t with a couple of + * OS X specific fields. On other Unices it does (handle)->io_watcher.fd, + * which works but only by accident. + */ + if (uv__handle_fd((uv_handle_t*) send_handle) < 0) + return -EBADF; + } + + /* It's legal for write_queue_size > 0 even when the write_queue is empty; + * it means there are error-state requests in the write_completed_queue that + * will touch up write_queue_size later, see also uv__write_req_finish(). + * We could check that write_queue is empty instead but that implies making + * a write() syscall when we know that the handle is in error mode. + */ + empty_queue = (stream->write_queue_size == 0); + + /* Initialize the req */ + uv__req_init(stream->loop, req, UV_WRITE); + req->cb = cb; + req->handle = stream; + req->error = 0; + req->send_handle = send_handle; + QUEUE_INIT(&req->queue); + + req->bufs = req->bufsml; + if (nbufs > ARRAY_SIZE(req->bufsml)) + req->bufs = uv__malloc(nbufs * sizeof(bufs[0])); + + if (req->bufs == NULL) + return -ENOMEM; + + memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); + req->nbufs = nbufs; + req->write_index = 0; + stream->write_queue_size += uv__count_bufs(bufs, nbufs); + + /* Append the request to write_queue. */ + QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue); + + /* If the queue was empty when this function began, we should attempt to + * do the write immediately. Otherwise start the write_watcher and wait + * for the fd to become writable. + */ + if (stream->connect_req) { + /* Still connecting, do nothing. */ + } + else if (empty_queue) { + uv__write(stream); + } + else { + /* + * blocking streams should never have anything in the queue. + * if this assert fires then somehow the blocking stream isn't being + * sufficiently flushed in uv__write. + */ + assert(!(stream->flags & UV_STREAM_BLOCKING)); + uv__io_start(stream->loop, &stream->io_watcher, POLLOUT); + uv__stream_osx_interrupt_select(stream); + } + + return 0; +} + + +/* The buffers to be written must remain valid until the callback is called. + * This is not required for the uv_buf_t array. + */ +int uv_write(uv_write_t* req, + uv_stream_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_write_cb cb) { + return uv_write2(req, handle, bufs, nbufs, NULL, cb); +} + + +void uv_try_write_cb(uv_write_t* req, int status) { + /* Should not be called */ + abort(); +} + + +int uv_try_write(uv_stream_t* stream, + const uv_buf_t bufs[], + unsigned int nbufs) { + int r; + int has_pollout; + size_t written; + size_t req_size; + uv_write_t req; + + /* Connecting or already writing some data */ + if (stream->connect_req != NULL || stream->write_queue_size != 0) + return -EAGAIN; + + has_pollout = uv__io_active(&stream->io_watcher, POLLOUT); + + r = uv_write(&req, stream, bufs, nbufs, uv_try_write_cb); + if (r != 0) + return r; + + /* Remove not written bytes from write queue size */ + written = uv__count_bufs(bufs, nbufs); + if (req.bufs != NULL) + req_size = uv__write_req_size(&req); + else + req_size = 0; + written -= req_size; + stream->write_queue_size -= req_size; + + /* Unqueue request, regardless of immediateness */ + QUEUE_REMOVE(&req.queue); + uv__req_unregister(stream->loop, &req); + if (req.bufs != req.bufsml) + uv__free(req.bufs); + req.bufs = NULL; + + /* Do not poll for writable, if we wasn't before calling this */ + if (!has_pollout) { + uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); + uv__stream_osx_interrupt_select(stream); + } + + if (written == 0 && req_size != 0) + return -EAGAIN; + else + return written; +} + + +int uv_read_start(uv_stream_t* stream, + uv_alloc_cb alloc_cb, + uv_read_cb read_cb) { + assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE || + stream->type == UV_TTY); + + if (stream->flags & UV_CLOSING) + return -EINVAL; + + /* The UV_STREAM_READING flag is irrelevant of the state of the tcp - it just + * expresses the desired state of the user. + */ + stream->flags |= UV_STREAM_READING; + + /* TODO: try to do the read inline? */ + /* TODO: keep track of tcp state. If we've gotten a EOF then we should + * not start the IO watcher. + */ + assert(uv__stream_fd(stream) >= 0); + assert(alloc_cb); + + stream->read_cb = read_cb; + stream->alloc_cb = alloc_cb; + + uv__io_start(stream->loop, &stream->io_watcher, POLLIN); + uv__handle_start(stream); + uv__stream_osx_interrupt_select(stream); + + return 0; +} + + +int uv_read_stop(uv_stream_t* stream) { + if (!(stream->flags & UV_STREAM_READING)) + return 0; + + stream->flags &= ~UV_STREAM_READING; + uv__io_stop(stream->loop, &stream->io_watcher, POLLIN); + if (!uv__io_active(&stream->io_watcher, POLLOUT)) + uv__handle_stop(stream); + uv__stream_osx_interrupt_select(stream); + + stream->read_cb = NULL; + stream->alloc_cb = NULL; + return 0; +} + + +int uv_is_readable(const uv_stream_t* stream) { + return !!(stream->flags & UV_STREAM_READABLE); +} + + +int uv_is_writable(const uv_stream_t* stream) { + return !!(stream->flags & UV_STREAM_WRITABLE); +} + + +#if defined(__APPLE__) +int uv___stream_fd(const uv_stream_t* handle) { + const uv__stream_select_t* s; + + assert(handle->type == UV_TCP || + handle->type == UV_TTY || + handle->type == UV_NAMED_PIPE); + + s = handle->select; + if (s != NULL) + return s->fd; + + return handle->io_watcher.fd; +} +#endif /* defined(__APPLE__) */ + + +void uv__stream_close(uv_stream_t* handle) { + unsigned int i; + uv__stream_queued_fds_t* queued_fds; + +#if defined(__APPLE__) + /* Terminate select loop first */ + if (handle->select != NULL) { + uv__stream_select_t* s; + + s = handle->select; + + uv_sem_post(&s->close_sem); + uv_sem_post(&s->async_sem); + uv__stream_osx_interrupt_select(handle); + uv_thread_join(&s->thread); + uv_sem_destroy(&s->close_sem); + uv_sem_destroy(&s->async_sem); + uv__close(s->fake_fd); + uv__close(s->int_fd); + uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close); + + handle->select = NULL; + } +#endif /* defined(__APPLE__) */ + + uv__io_close(handle->loop, &handle->io_watcher); + uv_read_stop(handle); + uv__handle_stop(handle); + + if (handle->io_watcher.fd != -1) { + /* Don't close stdio file descriptors. Nothing good comes from it. */ + if (handle->io_watcher.fd > STDERR_FILENO) + uv__close(handle->io_watcher.fd); + handle->io_watcher.fd = -1; + } + + if (handle->accepted_fd != -1) { + uv__close(handle->accepted_fd); + handle->accepted_fd = -1; + } + + /* Close all queued fds */ + if (handle->queued_fds != NULL) { + queued_fds = handle->queued_fds; + for (i = 0; i < queued_fds->offset; i++) + uv__close(queued_fds->fds[i]); + uv__free(handle->queued_fds); + handle->queued_fds = NULL; + } + + assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT)); +} + + +int uv_stream_set_blocking(uv_stream_t* handle, int blocking) { + /* Don't need to check the file descriptor, uv__nonblock() + * will fail with EBADF if it's not valid. + */ + return uv__nonblock(uv__stream_fd(handle), !blocking); +} diff --git a/deps/libuv/src/unix/sunos.c b/deps/libuv/src/unix/sunos.c new file mode 100644 index 00000000..3e7a7592 --- /dev/null +++ b/deps/libuv/src/unix/sunos.c @@ -0,0 +1,821 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include +#include +#include + +#ifndef SUNOS_NO_IFADDRS +# include +#endif +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#define PORT_FIRED 0x69 +#define PORT_UNUSED 0x0 +#define PORT_LOADED 0x99 +#define PORT_DELETED -1 + +#if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64) +#define PROCFS_FILE_OFFSET_BITS_HACK 1 +#undef _FILE_OFFSET_BITS +#else +#define PROCFS_FILE_OFFSET_BITS_HACK 0 +#endif + +#include + +#if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1) +#define _FILE_OFFSET_BITS 64 +#endif + + +int uv__platform_loop_init(uv_loop_t* loop) { + int err; + int fd; + + loop->fs_fd = -1; + loop->backend_fd = -1; + + fd = port_create(); + if (fd == -1) + return -errno; + + err = uv__cloexec(fd, 1); + if (err) { + uv__close(fd); + return err; + } + loop->backend_fd = fd; + + return 0; +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { + if (loop->fs_fd != -1) { + uv__close(loop->fs_fd); + loop->fs_fd = -1; + } + + if (loop->backend_fd != -1) { + uv__close(loop->backend_fd); + loop->backend_fd = -1; + } +} + + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + struct port_event* events; + uintptr_t i; + uintptr_t nfds; + + assert(loop->watchers != NULL); + + events = (struct port_event*) loop->watchers[loop->nwatchers]; + nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; + if (events == NULL) + return; + + /* Invalidate events with same file descriptor */ + for (i = 0; i < nfds; i++) + if ((int) events[i].portev_object == fd) + events[i].portev_object = -1; +} + + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0)) + return -errno; + + if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd)) + abort(); + + return 0; +} + + +void uv__io_poll(uv_loop_t* loop, int timeout) { + struct port_event events[1024]; + struct port_event* pe; + struct timespec spec; + QUEUE* q; + uv__io_t* w; + sigset_t* pset; + sigset_t set; + uint64_t base; + uint64_t diff; + unsigned int nfds; + unsigned int i; + int saved_errno; + int have_signals; + int nevents; + int count; + int err; + int fd; + + if (loop->nfds == 0) { + assert(QUEUE_EMPTY(&loop->watcher_queue)); + return; + } + + while (!QUEUE_EMPTY(&loop->watcher_queue)) { + q = QUEUE_HEAD(&loop->watcher_queue); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + + w = QUEUE_DATA(q, uv__io_t, watcher_queue); + assert(w->pevents != 0); + + if (port_associate(loop->backend_fd, PORT_SOURCE_FD, w->fd, w->pevents, 0)) + abort(); + + w->events = w->pevents; + } + + pset = NULL; + if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { + pset = &set; + sigemptyset(pset); + sigaddset(pset, SIGPROF); + } + + assert(timeout >= -1); + base = loop->time; + count = 48; /* Benchmarks suggest this gives the best throughput. */ + + for (;;) { + if (timeout != -1) { + spec.tv_sec = timeout / 1000; + spec.tv_nsec = (timeout % 1000) * 1000000; + } + + /* Work around a kernel bug where nfds is not updated. */ + events[0].portev_source = 0; + + nfds = 1; + saved_errno = 0; + + if (pset != NULL) + pthread_sigmask(SIG_BLOCK, pset, NULL); + + err = port_getn(loop->backend_fd, + events, + ARRAY_SIZE(events), + &nfds, + timeout == -1 ? NULL : &spec); + + if (pset != NULL) + pthread_sigmask(SIG_UNBLOCK, pset, NULL); + + if (err) { + /* Work around another kernel bug: port_getn() may return events even + * on error. + */ + if (errno == EINTR || errno == ETIME) + saved_errno = errno; + else + abort(); + } + + /* Update loop->time unconditionally. It's tempting to skip the update when + * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the + * operating system didn't reschedule our process while in the syscall. + */ + SAVE_ERRNO(uv__update_time(loop)); + + if (events[0].portev_source == 0) { + if (timeout == 0) + return; + + if (timeout == -1) + continue; + + goto update_timeout; + } + + if (nfds == 0) { + assert(timeout != -1); + return; + } + + have_signals = 0; + nevents = 0; + + assert(loop->watchers != NULL); + loop->watchers[loop->nwatchers] = (void*) events; + loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; + for (i = 0; i < nfds; i++) { + pe = events + i; + fd = pe->portev_object; + + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (fd == -1) + continue; + + assert(fd >= 0); + assert((unsigned) fd < loop->nwatchers); + + w = loop->watchers[fd]; + + /* File descriptor that we've stopped watching, ignore. */ + if (w == NULL) + continue; + + /* Run signal watchers last. This also affects child process watchers + * because those are implemented in terms of signal watchers. + */ + if (w == &loop->signal_io_watcher) + have_signals = 1; + else + w->cb(loop, w, pe->portev_events); + + nevents++; + + if (w != loop->watchers[fd]) + continue; /* Disabled by callback. */ + + /* Events Ports operates in oneshot mode, rearm timer on next run. */ + if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) + QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); + } + + if (have_signals != 0) + loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); + + loop->watchers[loop->nwatchers] = NULL; + loop->watchers[loop->nwatchers + 1] = NULL; + + if (have_signals != 0) + return; /* Event loop should cycle now so don't poll again. */ + + if (nevents != 0) { + if (nfds == ARRAY_SIZE(events) && --count != 0) { + /* Poll for more events but don't block this time. */ + timeout = 0; + continue; + } + return; + } + + if (saved_errno == ETIME) { + assert(timeout != -1); + return; + } + + if (timeout == 0) + return; + + if (timeout == -1) + continue; + +update_timeout: + assert(timeout > 0); + + diff = loop->time - base; + if (diff >= (uint64_t) timeout) + return; + + timeout -= diff; + } +} + + +uint64_t uv__hrtime(uv_clocktype_t type) { + return gethrtime(); +} + + +/* + * We could use a static buffer for the path manipulations that we need outside + * of the function, but this function could be called by multiple consumers and + * we don't want to potentially create a race condition in the use of snprintf. + */ +int uv_exepath(char* buffer, size_t* size) { + ssize_t res; + char buf[128]; + + if (buffer == NULL || size == NULL || *size == 0) + return -EINVAL; + + snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid()); + + res = *size - 1; + if (res > 0) + res = readlink(buf, buffer, res); + + if (res == -1) + return -errno; + + buffer[res] = '\0'; + *size = res; + return 0; +} + + +uint64_t uv_get_free_memory(void) { + return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES); +} + + +uint64_t uv_get_total_memory(void) { + return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES); +} + + +void uv_loadavg(double avg[3]) { + (void) getloadavg(avg, 3); +} + + +#if defined(PORT_SOURCE_FILE) + +static int uv__fs_event_rearm(uv_fs_event_t *handle) { + if (handle->fd == -1) + return -EBADF; + + if (port_associate(handle->loop->fs_fd, + PORT_SOURCE_FILE, + (uintptr_t) &handle->fo, + FILE_ATTRIB | FILE_MODIFIED, + handle) == -1) { + return -errno; + } + handle->fd = PORT_LOADED; + + return 0; +} + + +static void uv__fs_event_read(uv_loop_t* loop, + uv__io_t* w, + unsigned int revents) { + uv_fs_event_t *handle = NULL; + timespec_t timeout; + port_event_t pe; + int events; + int r; + + (void) w; + (void) revents; + + do { + uint_t n = 1; + + /* + * Note that our use of port_getn() here (and not port_get()) is deliberate: + * there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout + * causes port_get() to return success instead of ETIME when there aren't + * actually any events (!); by using port_getn() in lieu of port_get(), + * we can at least workaround the bug by checking for zero returned events + * and treating it as we would ETIME. + */ + do { + memset(&timeout, 0, sizeof timeout); + r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout); + } + while (r == -1 && errno == EINTR); + + if ((r == -1 && errno == ETIME) || n == 0) + break; + + handle = (uv_fs_event_t*) pe.portev_user; + assert((r == 0) && "unexpected port_get() error"); + + events = 0; + if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED)) + events |= UV_CHANGE; + if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED)) + events |= UV_RENAME; + assert(events != 0); + handle->fd = PORT_FIRED; + handle->cb(handle, NULL, events, 0); + + if (handle->fd != PORT_DELETED) { + r = uv__fs_event_rearm(handle); + if (r != 0) + handle->cb(handle, NULL, 0, r); + } + } + while (handle->fd != PORT_DELETED); +} + + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); + return 0; +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* path, + unsigned int flags) { + int portfd; + int first_run; + int err; + + if (uv__is_active(handle)) + return -EINVAL; + + first_run = 0; + if (handle->loop->fs_fd == -1) { + portfd = port_create(); + if (portfd == -1) + return -errno; + handle->loop->fs_fd = portfd; + first_run = 1; + } + + uv__handle_start(handle); + handle->path = uv__strdup(path); + handle->fd = PORT_UNUSED; + handle->cb = cb; + + memset(&handle->fo, 0, sizeof handle->fo); + handle->fo.fo_name = handle->path; + err = uv__fs_event_rearm(handle); + if (err != 0) + return err; + + if (first_run) { + uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd); + uv__io_start(handle->loop, &handle->loop->fs_event_watcher, POLLIN); + } + + return 0; +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { + if (!uv__is_active(handle)) + return 0; + + if (handle->fd == PORT_FIRED || handle->fd == PORT_LOADED) { + port_dissociate(handle->loop->fs_fd, + PORT_SOURCE_FILE, + (uintptr_t) &handle->fo); + } + + handle->fd = PORT_DELETED; + uv__free(handle->path); + handle->path = NULL; + handle->fo.fo_name = NULL; + uv__handle_stop(handle); + + return 0; +} + +void uv__fs_event_close(uv_fs_event_t* handle) { + uv_fs_event_stop(handle); +} + +#else /* !defined(PORT_SOURCE_FILE) */ + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { + return -ENOSYS; +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* filename, + unsigned int flags) { + return -ENOSYS; +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { + return -ENOSYS; +} + + +void uv__fs_event_close(uv_fs_event_t* handle) { + UNREACHABLE(); +} + +#endif /* defined(PORT_SOURCE_FILE) */ + + +char** uv_setup_args(int argc, char** argv) { + return argv; +} + + +int uv_set_process_title(const char* title) { + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + if (buffer == NULL || size == 0) + return -EINVAL; + + buffer[0] = '\0'; + return 0; +} + + +int uv_resident_set_memory(size_t* rss) { + psinfo_t psinfo; + int err; + int fd; + + fd = open("/proc/self/psinfo", O_RDONLY); + if (fd == -1) + return -errno; + + /* FIXME(bnoordhuis) Handle EINTR. */ + err = -EINVAL; + if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) { + *rss = (size_t)psinfo.pr_rssize * 1024; + err = 0; + } + uv__close(fd); + + return err; +} + + +int uv_uptime(double* uptime) { + kstat_ctl_t *kc; + kstat_t *ksp; + kstat_named_t *knp; + + long hz = sysconf(_SC_CLK_TCK); + + kc = kstat_open(); + if (kc == NULL) + return -EPERM; + + ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc"); + if (kstat_read(kc, ksp, NULL) == -1) { + *uptime = -1; + } else { + knp = (kstat_named_t*) kstat_data_lookup(ksp, (char*) "clk_intr"); + *uptime = knp->value.ul / hz; + } + kstat_close(kc); + + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { + int lookup_instance; + kstat_ctl_t *kc; + kstat_t *ksp; + kstat_named_t *knp; + uv_cpu_info_t* cpu_info; + + kc = kstat_open(); + if (kc == NULL) + return -EPERM; + + /* Get count of cpus */ + lookup_instance = 0; + while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) { + lookup_instance++; + } + + *cpu_infos = uv__malloc(lookup_instance * sizeof(**cpu_infos)); + if (!(*cpu_infos)) { + kstat_close(kc); + return -ENOMEM; + } + + *count = lookup_instance; + + cpu_info = *cpu_infos; + lookup_instance = 0; + while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) { + if (kstat_read(kc, ksp, NULL) == -1) { + cpu_info->speed = 0; + cpu_info->model = NULL; + } else { + knp = kstat_data_lookup(ksp, (char*) "clock_MHz"); + assert(knp->data_type == KSTAT_DATA_INT32 || + knp->data_type == KSTAT_DATA_INT64); + cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32 + : knp->value.i64; + + knp = kstat_data_lookup(ksp, (char*) "brand"); + assert(knp->data_type == KSTAT_DATA_STRING); + cpu_info->model = uv__strdup(KSTAT_NAMED_STR_PTR(knp)); + } + + lookup_instance++; + cpu_info++; + } + + cpu_info = *cpu_infos; + lookup_instance = 0; + for (;;) { + ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys"); + + if (ksp == NULL) + break; + + if (kstat_read(kc, ksp, NULL) == -1) { + cpu_info->cpu_times.user = 0; + cpu_info->cpu_times.nice = 0; + cpu_info->cpu_times.sys = 0; + cpu_info->cpu_times.idle = 0; + cpu_info->cpu_times.irq = 0; + } else { + knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user"); + assert(knp->data_type == KSTAT_DATA_UINT64); + cpu_info->cpu_times.user = knp->value.ui64; + + knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel"); + assert(knp->data_type == KSTAT_DATA_UINT64); + cpu_info->cpu_times.sys = knp->value.ui64; + + knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle"); + assert(knp->data_type == KSTAT_DATA_UINT64); + cpu_info->cpu_times.idle = knp->value.ui64; + + knp = kstat_data_lookup(ksp, (char*) "intr"); + assert(knp->data_type == KSTAT_DATA_UINT64); + cpu_info->cpu_times.irq = knp->value.ui64; + cpu_info->cpu_times.nice = 0; + } + + lookup_instance++; + cpu_info++; + } + + kstat_close(kc); + + return 0; +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + +/* + * Inspired By: + * https://blogs.oracle.com/paulie/entry/retrieving_mac_address_in_solaris + * http://www.pauliesworld.org/project/getmac.c + */ +static int uv__set_phys_addr(uv_interface_address_t* address, + struct ifaddrs* ent) { + + struct sockaddr_dl* sa_addr; + int sockfd; + int i; + struct arpreq arpreq; + + /* This appears to only work as root */ + sa_addr = (struct sockaddr_dl*)(ent->ifa_addr); + memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr)); + for (i = 0; i < sizeof(address->phys_addr); i++) { + if (address->phys_addr[i] != 0) + return 0; + } + memset(&arpreq, 0, sizeof(arpreq)); + if (address->address.address4.sin_family == AF_INET) { + struct sockaddr_in* sin = ((struct sockaddr_in*)&arpreq.arp_pa); + sin->sin_addr.s_addr = address->address.address4.sin_addr.s_addr; + } else if (address->address.address4.sin_family == AF_INET6) { + struct sockaddr_in6* sin = ((struct sockaddr_in6*)&arpreq.arp_pa); + memcpy(sin->sin6_addr.s6_addr, + address->address.address6.sin6_addr.s6_addr, + sizeof(address->address.address6.sin6_addr.s6_addr)); + } else { + return 0; + } + + sockfd = socket(AF_INET, SOCK_DGRAM, 0); + if (sockfd < 0) + return -errno; + + if (ioctl(sockfd, SIOCGARP, (char*)&arpreq) == -1) { + uv__close(sockfd); + return -errno; + } + memcpy(address->phys_addr, arpreq.arp_ha.sa_data, sizeof(address->phys_addr)); + uv__close(sockfd); + return 0; +} + +int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { +#ifdef SUNOS_NO_IFADDRS + return -ENOSYS; +#else + uv_interface_address_t* address; + struct ifaddrs* addrs; + struct ifaddrs* ent; + int i; + + if (getifaddrs(&addrs)) + return -errno; + + *count = 0; + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)) || + (ent->ifa_addr == NULL) || + (ent->ifa_addr->sa_family == PF_PACKET)) { + continue; + } + + (*count)++; + } + + *addresses = uv__malloc(*count * sizeof(**addresses)); + if (!(*addresses)) { + freeifaddrs(addrs); + return -ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + continue; + + if (ent->ifa_addr == NULL) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) || + (ent->ifa_flags & IFF_LOOPBACK)); + + uv__set_phys_addr(address, ent); + address++; + } + + freeifaddrs(addrs); + + return 0; +#endif /* SUNOS_NO_IFADDRS */ +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} diff --git a/deps/libuv/src/unix/tcp.c b/deps/libuv/src/unix/tcp.c new file mode 100644 index 00000000..c423dcb1 --- /dev/null +++ b/deps/libuv/src/unix/tcp.c @@ -0,0 +1,395 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include + + +static int maybe_new_socket(uv_tcp_t* handle, int domain, int flags) { + int sockfd; + int err; + + if (domain == AF_UNSPEC || uv__stream_fd(handle) != -1) { + handle->flags |= flags; + return 0; + } + + err = uv__socket(domain, SOCK_STREAM, 0); + if (err < 0) + return err; + sockfd = err; + + err = uv__stream_open((uv_stream_t*) handle, sockfd, flags); + if (err) { + uv__close(sockfd); + return err; + } + + return 0; +} + + +int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) { + int domain; + + /* Use the lower 8 bits for the domain */ + domain = flags & 0xFF; + if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC) + return -EINVAL; + + if (flags & ~0xFF) + return -EINVAL; + + uv__stream_init(loop, (uv_stream_t*)tcp, UV_TCP); + + /* If anything fails beyond this point we need to remove the handle from + * the handle queue, since it was added by uv__handle_init in uv_stream_init. + */ + + if (domain != AF_UNSPEC) { + int err = maybe_new_socket(tcp, domain, 0); + if (err) { + QUEUE_REMOVE(&tcp->handle_queue); + return err; + } + } + + return 0; +} + + +int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* tcp) { + return uv_tcp_init_ex(loop, tcp, AF_UNSPEC); +} + + +int uv__tcp_bind(uv_tcp_t* tcp, + const struct sockaddr* addr, + unsigned int addrlen, + unsigned int flags) { + int err; + int on; + + /* Cannot set IPv6-only mode on non-IPv6 socket. */ + if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6) + return -EINVAL; + + err = maybe_new_socket(tcp, + addr->sa_family, + UV_STREAM_READABLE | UV_STREAM_WRITABLE); + if (err) + return err; + + on = 1; + if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on))) + return -errno; + +#ifdef IPV6_V6ONLY + if (addr->sa_family == AF_INET6) { + on = (flags & UV_TCP_IPV6ONLY) != 0; + if (setsockopt(tcp->io_watcher.fd, + IPPROTO_IPV6, + IPV6_V6ONLY, + &on, + sizeof on) == -1) { +#if defined(__MVS__) + if (errno == EOPNOTSUPP) + return -EINVAL; +#endif + return -errno; + } + } +#endif + + errno = 0; + if (bind(tcp->io_watcher.fd, addr, addrlen) && errno != EADDRINUSE) { + if (errno == EAFNOSUPPORT) + /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a + * socket created with AF_INET to an AF_INET6 address or vice versa. */ + return -EINVAL; + return -errno; + } + tcp->delayed_error = -errno; + + tcp->flags |= UV_HANDLE_BOUND; + if (addr->sa_family == AF_INET6) + tcp->flags |= UV_HANDLE_IPV6; + + return 0; +} + + +int uv__tcp_connect(uv_connect_t* req, + uv_tcp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + uv_connect_cb cb) { + int err; + int r; + + assert(handle->type == UV_TCP); + + if (handle->connect_req != NULL) + return -EALREADY; /* FIXME(bnoordhuis) -EINVAL or maybe -EBUSY. */ + + err = maybe_new_socket(handle, + addr->sa_family, + UV_STREAM_READABLE | UV_STREAM_WRITABLE); + if (err) + return err; + + handle->delayed_error = 0; + + do { + errno = 0; + r = connect(uv__stream_fd(handle), addr, addrlen); + } while (r == -1 && errno == EINTR); + + /* We not only check the return value, but also check the errno != 0. + * Because in rare cases connect() will return -1 but the errno + * is 0 (for example, on Android 4.3, OnePlus phone A0001_12_150227) + * and actually the tcp three-way handshake is completed. + */ + if (r == -1 && errno != 0) { + if (errno == EINPROGRESS) + ; /* not an error */ + else if (errno == ECONNREFUSED) + /* If we get a ECONNREFUSED wait until the next tick to report the + * error. Solaris wants to report immediately--other unixes want to + * wait. + */ + handle->delayed_error = -errno; + else + return -errno; + } + + uv__req_init(handle->loop, req, UV_CONNECT); + req->cb = cb; + req->handle = (uv_stream_t*) handle; + QUEUE_INIT(&req->queue); + handle->connect_req = req; + + uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); + + if (handle->delayed_error) + uv__io_feed(handle->loop, &handle->io_watcher); + + return 0; +} + + +int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) { + int err; + + err = uv__nonblock(sock, 1); + if (err) + return err; + + return uv__stream_open((uv_stream_t*)handle, + sock, + UV_STREAM_READABLE | UV_STREAM_WRITABLE); +} + + +int uv_tcp_getsockname(const uv_tcp_t* handle, + struct sockaddr* name, + int* namelen) { + socklen_t socklen; + + if (handle->delayed_error) + return handle->delayed_error; + + if (uv__stream_fd(handle) < 0) + return -EINVAL; /* FIXME(bnoordhuis) -EBADF */ + + /* sizeof(socklen_t) != sizeof(int) on some systems. */ + socklen = (socklen_t) *namelen; + + if (getsockname(uv__stream_fd(handle), name, &socklen)) + return -errno; + + *namelen = (int) socklen; + return 0; +} + + +int uv_tcp_getpeername(const uv_tcp_t* handle, + struct sockaddr* name, + int* namelen) { + socklen_t socklen; + + if (handle->delayed_error) + return handle->delayed_error; + + if (uv__stream_fd(handle) < 0) + return -EINVAL; /* FIXME(bnoordhuis) -EBADF */ + + /* sizeof(socklen_t) != sizeof(int) on some systems. */ + socklen = (socklen_t) *namelen; + + if (getpeername(uv__stream_fd(handle), name, &socklen)) + return -errno; + + *namelen = (int) socklen; + return 0; +} + + +int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) { + static int single_accept = -1; + int err; + + if (tcp->delayed_error) + return tcp->delayed_error; + + if (single_accept == -1) { + const char* val = getenv("UV_TCP_SINGLE_ACCEPT"); + single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */ + } + + if (single_accept) + tcp->flags |= UV_TCP_SINGLE_ACCEPT; + + err = maybe_new_socket(tcp, AF_INET, UV_STREAM_READABLE); + if (err) + return err; + +#ifdef __MVS__ + /* on zOS the listen call does not bind automatically + if the socket is unbound. Hence the manual binding to + an arbitrary port is required to be done manually + */ + + if (!(tcp->flags & UV_HANDLE_BOUND)) { + struct sockaddr_storage saddr; + socklen_t slen = sizeof(saddr); + memset(&saddr, 0, sizeof(saddr)); + + if (getsockname(tcp->io_watcher.fd, (struct sockaddr*) &saddr, &slen)) + return -errno; + + if (bind(tcp->io_watcher.fd, (struct sockaddr*) &saddr, slen)) + return -errno; + + tcp->flags |= UV_HANDLE_BOUND; + } +#endif + + if (listen(tcp->io_watcher.fd, backlog)) + return -errno; + + tcp->connection_cb = cb; + tcp->flags |= UV_HANDLE_BOUND; + + /* Start listening for connections. */ + tcp->io_watcher.cb = uv__server_io; + uv__io_start(tcp->loop, &tcp->io_watcher, POLLIN); + + return 0; +} + + +int uv__tcp_nodelay(int fd, int on) { + if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on))) + return -errno; + return 0; +} + + +int uv__tcp_keepalive(int fd, int on, unsigned int delay) { + if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on))) + return -errno; + +#ifdef TCP_KEEPIDLE + if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof(delay))) + return -errno; +#endif + + /* Solaris/SmartOS, if you don't support keep-alive, + * then don't advertise it in your system headers... + */ + /* FIXME(bnoordhuis) That's possibly because sizeof(delay) should be 1. */ +#if defined(TCP_KEEPALIVE) && !defined(__sun) + if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof(delay))) + return -errno; +#endif + + return 0; +} + + +int uv_tcp_nodelay(uv_tcp_t* handle, int on) { + int err; + + if (uv__stream_fd(handle) != -1) { + err = uv__tcp_nodelay(uv__stream_fd(handle), on); + if (err) + return err; + } + + if (on) + handle->flags |= UV_TCP_NODELAY; + else + handle->flags &= ~UV_TCP_NODELAY; + + return 0; +} + + +int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) { + int err; + + if (uv__stream_fd(handle) != -1) { + err =uv__tcp_keepalive(uv__stream_fd(handle), on, delay); + if (err) + return err; + } + + if (on) + handle->flags |= UV_TCP_KEEPALIVE; + else + handle->flags &= ~UV_TCP_KEEPALIVE; + + /* TODO Store delay if uv__stream_fd(handle) == -1 but don't want to enlarge + * uv_tcp_t with an int that's almost never used... + */ + + return 0; +} + + +int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) { + if (enable) + handle->flags &= ~UV_TCP_SINGLE_ACCEPT; + else + handle->flags |= UV_TCP_SINGLE_ACCEPT; + return 0; +} + + +void uv__tcp_close(uv_tcp_t* handle) { + uv__stream_close((uv_stream_t*)handle); +} diff --git a/deps/libuv/src/unix/thread.c b/deps/libuv/src/unix/thread.c new file mode 100644 index 00000000..a9b5e4c0 --- /dev/null +++ b/deps/libuv/src/unix/thread.c @@ -0,0 +1,575 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include + +#include +#include /* getrlimit() */ +#include /* getpagesize() */ + +#include + +#ifdef __MVS__ +#include +#include +#endif + +#undef NANOSEC +#define NANOSEC ((uint64_t) 1e9) + + +int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { + int err; + pthread_attr_t* attr; +#if defined(__APPLE__) + pthread_attr_t attr_storage; + struct rlimit lim; +#endif + + /* On OSX threads other than the main thread are created with a reduced stack + * size by default, adjust it to RLIMIT_STACK. + */ +#if defined(__APPLE__) + if (getrlimit(RLIMIT_STACK, &lim)) + abort(); + + attr = &attr_storage; + if (pthread_attr_init(attr)) + abort(); + + if (lim.rlim_cur != RLIM_INFINITY) { + /* pthread_attr_setstacksize() expects page-aligned values. */ + lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize(); + + if (lim.rlim_cur >= PTHREAD_STACK_MIN) + if (pthread_attr_setstacksize(attr, lim.rlim_cur)) + abort(); + } +#else + attr = NULL; +#endif + + err = pthread_create(tid, attr, (void*(*)(void*)) entry, arg); + + if (attr != NULL) + pthread_attr_destroy(attr); + + return -err; +} + + +uv_thread_t uv_thread_self(void) { + return pthread_self(); +} + +int uv_thread_join(uv_thread_t *tid) { + return -pthread_join(*tid, NULL); +} + + +int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) { + return pthread_equal(*t1, *t2); +} + + +int uv_mutex_init(uv_mutex_t* mutex) { +#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK) + return -pthread_mutex_init(mutex, NULL); +#else + pthread_mutexattr_t attr; + int err; + + if (pthread_mutexattr_init(&attr)) + abort(); + + if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)) + abort(); + + err = pthread_mutex_init(mutex, &attr); + + if (pthread_mutexattr_destroy(&attr)) + abort(); + + return -err; +#endif +} + + +void uv_mutex_destroy(uv_mutex_t* mutex) { + if (pthread_mutex_destroy(mutex)) + abort(); +} + + +void uv_mutex_lock(uv_mutex_t* mutex) { + if (pthread_mutex_lock(mutex)) + abort(); +} + + +int uv_mutex_trylock(uv_mutex_t* mutex) { + int err; + + err = pthread_mutex_trylock(mutex); + if (err) { + if (err != EBUSY && err != EAGAIN) + abort(); + return -EBUSY; + } + + return 0; +} + + +void uv_mutex_unlock(uv_mutex_t* mutex) { + if (pthread_mutex_unlock(mutex)) + abort(); +} + + +int uv_rwlock_init(uv_rwlock_t* rwlock) { + return -pthread_rwlock_init(rwlock, NULL); +} + + +void uv_rwlock_destroy(uv_rwlock_t* rwlock) { + if (pthread_rwlock_destroy(rwlock)) + abort(); +} + + +void uv_rwlock_rdlock(uv_rwlock_t* rwlock) { + if (pthread_rwlock_rdlock(rwlock)) + abort(); +} + + +int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) { + int err; + + err = pthread_rwlock_tryrdlock(rwlock); + if (err) { + if (err != EBUSY && err != EAGAIN) + abort(); + return -EBUSY; + } + + return 0; +} + + +void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) { + if (pthread_rwlock_unlock(rwlock)) + abort(); +} + + +void uv_rwlock_wrlock(uv_rwlock_t* rwlock) { + if (pthread_rwlock_wrlock(rwlock)) + abort(); +} + + +int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) { + int err; + + err = pthread_rwlock_trywrlock(rwlock); + if (err) { + if (err != EBUSY && err != EAGAIN) + abort(); + return -EBUSY; + } + + return 0; +} + + +void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) { + if (pthread_rwlock_unlock(rwlock)) + abort(); +} + + +void uv_once(uv_once_t* guard, void (*callback)(void)) { + if (pthread_once(guard, callback)) + abort(); +} + +#if defined(__APPLE__) && defined(__MACH__) + +int uv_sem_init(uv_sem_t* sem, unsigned int value) { + kern_return_t err; + + err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value); + if (err == KERN_SUCCESS) + return 0; + if (err == KERN_INVALID_ARGUMENT) + return -EINVAL; + if (err == KERN_RESOURCE_SHORTAGE) + return -ENOMEM; + + abort(); + return -EINVAL; /* Satisfy the compiler. */ +} + + +void uv_sem_destroy(uv_sem_t* sem) { + if (semaphore_destroy(mach_task_self(), *sem)) + abort(); +} + + +void uv_sem_post(uv_sem_t* sem) { + if (semaphore_signal(*sem)) + abort(); +} + + +void uv_sem_wait(uv_sem_t* sem) { + int r; + + do + r = semaphore_wait(*sem); + while (r == KERN_ABORTED); + + if (r != KERN_SUCCESS) + abort(); +} + + +int uv_sem_trywait(uv_sem_t* sem) { + mach_timespec_t interval; + kern_return_t err; + + interval.tv_sec = 0; + interval.tv_nsec = 0; + + err = semaphore_timedwait(*sem, interval); + if (err == KERN_SUCCESS) + return 0; + if (err == KERN_OPERATION_TIMED_OUT) + return -EAGAIN; + + abort(); + return -EINVAL; /* Satisfy the compiler. */ +} + +#elif defined(__MVS__) + +int uv_sem_init(uv_sem_t* sem, unsigned int value) { + uv_sem_t semid; + struct sembuf buf; + int err; + + buf.sem_num = 0; + buf.sem_op = value; + buf.sem_flg = 0; + + semid = semget(IPC_PRIVATE, 1, S_IRUSR | S_IWUSR); + if (semid == -1) + return -errno; + + if (-1 == semop(semid, &buf, 1)) { + err = errno; + if (-1 == semctl(*sem, 0, IPC_RMID)) + abort(); + return -err; + } + + *sem = semid; + return 0; +} + +void uv_sem_destroy(uv_sem_t* sem) { + if (-1 == semctl(*sem, 0, IPC_RMID)) + abort(); +} + +void uv_sem_post(uv_sem_t* sem) { + struct sembuf buf; + + buf.sem_num = 0; + buf.sem_op = 1; + buf.sem_flg = 0; + + if (-1 == semop(*sem, &buf, 1)) + abort(); +} + +void uv_sem_wait(uv_sem_t* sem) { + struct sembuf buf; + int op_status; + + buf.sem_num = 0; + buf.sem_op = -1; + buf.sem_flg = 0; + + do + op_status = semop(*sem, &buf, 1); + while (op_status == -1 && errno == EINTR); + + if (op_status) + abort(); +} + +int uv_sem_trywait(uv_sem_t* sem) { + struct sembuf buf; + int op_status; + + buf.sem_num = 0; + buf.sem_op = -1; + buf.sem_flg = IPC_NOWAIT; + + do + op_status = semop(*sem, &buf, 1); + while (op_status == -1 && errno == EINTR); + + if (op_status) { + if (errno == EAGAIN) + return -EAGAIN; + abort(); + } + + return 0; +} + +#else /* !(defined(__APPLE__) && defined(__MACH__)) */ + +int uv_sem_init(uv_sem_t* sem, unsigned int value) { + if (sem_init(sem, 0, value)) + return -errno; + return 0; +} + + +void uv_sem_destroy(uv_sem_t* sem) { + if (sem_destroy(sem)) + abort(); +} + + +void uv_sem_post(uv_sem_t* sem) { + if (sem_post(sem)) + abort(); +} + + +void uv_sem_wait(uv_sem_t* sem) { + int r; + + do + r = sem_wait(sem); + while (r == -1 && errno == EINTR); + + if (r) + abort(); +} + + +int uv_sem_trywait(uv_sem_t* sem) { + int r; + + do + r = sem_trywait(sem); + while (r == -1 && errno == EINTR); + + if (r) { + if (errno == EAGAIN) + return -EAGAIN; + abort(); + } + + return 0; +} + +#endif /* defined(__APPLE__) && defined(__MACH__) */ + + +#if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__) + +int uv_cond_init(uv_cond_t* cond) { + return -pthread_cond_init(cond, NULL); +} + +#else /* !(defined(__APPLE__) && defined(__MACH__)) */ + +int uv_cond_init(uv_cond_t* cond) { + pthread_condattr_t attr; + int err; + + err = pthread_condattr_init(&attr); + if (err) + return -err; + +#if !(defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)) + err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); + if (err) + goto error2; +#endif + + err = pthread_cond_init(cond, &attr); + if (err) + goto error2; + + err = pthread_condattr_destroy(&attr); + if (err) + goto error; + + return 0; + +error: + pthread_cond_destroy(cond); +error2: + pthread_condattr_destroy(&attr); + return -err; +} + +#endif /* defined(__APPLE__) && defined(__MACH__) */ + +void uv_cond_destroy(uv_cond_t* cond) { +#if defined(__APPLE__) && defined(__MACH__) + /* It has been reported that destroying condition variables that have been + * signalled but not waited on can sometimes result in application crashes. + * See https://codereview.chromium.org/1323293005. + */ + pthread_mutex_t mutex; + struct timespec ts; + int err; + + if (pthread_mutex_init(&mutex, NULL)) + abort(); + + if (pthread_mutex_lock(&mutex)) + abort(); + + ts.tv_sec = 0; + ts.tv_nsec = 1; + + err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts); + if (err != 0 && err != ETIMEDOUT) + abort(); + + if (pthread_mutex_unlock(&mutex)) + abort(); + + if (pthread_mutex_destroy(&mutex)) + abort(); +#endif /* defined(__APPLE__) && defined(__MACH__) */ + + if (pthread_cond_destroy(cond)) + abort(); +} + +void uv_cond_signal(uv_cond_t* cond) { + if (pthread_cond_signal(cond)) + abort(); +} + +void uv_cond_broadcast(uv_cond_t* cond) { + if (pthread_cond_broadcast(cond)) + abort(); +} + +void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) { + if (pthread_cond_wait(cond, mutex)) + abort(); +} + + +int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) { + int r; + struct timespec ts; + +#if defined(__APPLE__) && defined(__MACH__) + ts.tv_sec = timeout / NANOSEC; + ts.tv_nsec = timeout % NANOSEC; + r = pthread_cond_timedwait_relative_np(cond, mutex, &ts); +#else + timeout += uv__hrtime(UV_CLOCK_PRECISE); + ts.tv_sec = timeout / NANOSEC; + ts.tv_nsec = timeout % NANOSEC; +#if defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC) + /* + * The bionic pthread implementation doesn't support CLOCK_MONOTONIC, + * but has this alternative function instead. + */ + r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts); +#else + r = pthread_cond_timedwait(cond, mutex, &ts); +#endif /* __ANDROID__ */ +#endif + + + if (r == 0) + return 0; + + if (r == ETIMEDOUT) + return -ETIMEDOUT; + + abort(); + return -EINVAL; /* Satisfy the compiler. */ +} + + +int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { + return -pthread_barrier_init(barrier, NULL, count); +} + + +void uv_barrier_destroy(uv_barrier_t* barrier) { + if (pthread_barrier_destroy(barrier)) + abort(); +} + + +int uv_barrier_wait(uv_barrier_t* barrier) { + int r = pthread_barrier_wait(barrier); + if (r && r != PTHREAD_BARRIER_SERIAL_THREAD) + abort(); + return r == PTHREAD_BARRIER_SERIAL_THREAD; +} + + +int uv_key_create(uv_key_t* key) { + return -pthread_key_create(key, NULL); +} + + +void uv_key_delete(uv_key_t* key) { + if (pthread_key_delete(*key)) + abort(); +} + + +void* uv_key_get(uv_key_t* key) { + return pthread_getspecific(*key); +} + + +void uv_key_set(uv_key_t* key, void* value) { + if (pthread_setspecific(*key, value)) + abort(); +} diff --git a/deps/libuv/src/unix/timer.c b/deps/libuv/src/unix/timer.c new file mode 100644 index 00000000..f46bdf4b --- /dev/null +++ b/deps/libuv/src/unix/timer.c @@ -0,0 +1,172 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" +#include "heap-inl.h" + +#include +#include + + +static int timer_less_than(const struct heap_node* ha, + const struct heap_node* hb) { + const uv_timer_t* a; + const uv_timer_t* b; + + a = container_of(ha, uv_timer_t, heap_node); + b = container_of(hb, uv_timer_t, heap_node); + + if (a->timeout < b->timeout) + return 1; + if (b->timeout < a->timeout) + return 0; + + /* Compare start_id when both have the same timeout. start_id is + * allocated with loop->timer_counter in uv_timer_start(). + */ + if (a->start_id < b->start_id) + return 1; + if (b->start_id < a->start_id) + return 0; + + return 0; +} + + +int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) { + uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER); + handle->timer_cb = NULL; + handle->repeat = 0; + return 0; +} + + +int uv_timer_start(uv_timer_t* handle, + uv_timer_cb cb, + uint64_t timeout, + uint64_t repeat) { + uint64_t clamped_timeout; + + if (cb == NULL) + return -EINVAL; + + if (uv__is_active(handle)) + uv_timer_stop(handle); + + clamped_timeout = handle->loop->time + timeout; + if (clamped_timeout < timeout) + clamped_timeout = (uint64_t) -1; + + handle->timer_cb = cb; + handle->timeout = clamped_timeout; + handle->repeat = repeat; + /* start_id is the second index to be compared in uv__timer_cmp() */ + handle->start_id = handle->loop->timer_counter++; + + heap_insert((struct heap*) &handle->loop->timer_heap, + (struct heap_node*) &handle->heap_node, + timer_less_than); + uv__handle_start(handle); + + return 0; +} + + +int uv_timer_stop(uv_timer_t* handle) { + if (!uv__is_active(handle)) + return 0; + + heap_remove((struct heap*) &handle->loop->timer_heap, + (struct heap_node*) &handle->heap_node, + timer_less_than); + uv__handle_stop(handle); + + return 0; +} + + +int uv_timer_again(uv_timer_t* handle) { + if (handle->timer_cb == NULL) + return -EINVAL; + + if (handle->repeat) { + uv_timer_stop(handle); + uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat); + } + + return 0; +} + + +void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) { + handle->repeat = repeat; +} + + +uint64_t uv_timer_get_repeat(const uv_timer_t* handle) { + return handle->repeat; +} + + +int uv__next_timeout(const uv_loop_t* loop) { + const struct heap_node* heap_node; + const uv_timer_t* handle; + uint64_t diff; + + heap_node = heap_min((const struct heap*) &loop->timer_heap); + if (heap_node == NULL) + return -1; /* block indefinitely */ + + handle = container_of(heap_node, uv_timer_t, heap_node); + if (handle->timeout <= loop->time) + return 0; + + diff = handle->timeout - loop->time; + if (diff > INT_MAX) + diff = INT_MAX; + + return diff; +} + + +void uv__run_timers(uv_loop_t* loop) { + struct heap_node* heap_node; + uv_timer_t* handle; + + for (;;) { + heap_node = heap_min((struct heap*) &loop->timer_heap); + if (heap_node == NULL) + break; + + handle = container_of(heap_node, uv_timer_t, heap_node); + if (handle->timeout > loop->time) + break; + + uv_timer_stop(handle); + uv_timer_again(handle); + handle->timer_cb(handle); + } +} + + +void uv__timer_close(uv_timer_t* handle) { + uv_timer_stop(handle); +} diff --git a/deps/libuv/src/unix/tty.c b/deps/libuv/src/unix/tty.c new file mode 100644 index 00000000..b2d37f4c --- /dev/null +++ b/deps/libuv/src/unix/tty.c @@ -0,0 +1,336 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" +#include "spinlock.h" + +#include +#include +#include +#include +#include +#include + +#if defined(__MVS__) && !defined(IMAXBEL) +#define IMAXBEL 0 +#endif + +static int orig_termios_fd = -1; +static struct termios orig_termios; +static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER; + +static int uv__tty_is_slave(const int fd) { + int result; +#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) + int dummy; + + result = ioctl(fd, TIOCGPTN, &dummy) != 0; +#elif defined(__APPLE__) + char dummy[256]; + + result = ioctl(fd, TIOCPTYGNAME, &dummy) != 0; +#else + /* Fallback to ptsname + */ + result = ptsname(fd) == NULL; +#endif + return result; +} + +int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int readable) { + uv_handle_type type; + int flags; + int newfd; + int r; + int saved_flags; + char path[256]; + + /* File descriptors that refer to files cannot be monitored with epoll. + * That restriction also applies to character devices like /dev/random + * (but obviously not /dev/tty.) + */ + type = uv_guess_handle(fd); + if (type == UV_FILE || type == UV_UNKNOWN_HANDLE) + return -EINVAL; + + flags = 0; + newfd = -1; + + /* Reopen the file descriptor when it refers to a tty. This lets us put the + * tty in non-blocking mode without affecting other processes that share it + * with us. + * + * Example: `node | cat` - if we put our fd 0 in non-blocking mode, it also + * affects fd 1 of `cat` because both file descriptors refer to the same + * struct file in the kernel. When we reopen our fd 0, it points to a + * different struct file, hence changing its properties doesn't affect + * other processes. + */ + if (type == UV_TTY) { + /* Reopening a pty in master mode won't work either because the reopened + * pty will be in slave mode (*BSD) or reopening will allocate a new + * master/slave pair (Linux). Therefore check if the fd points to a + * slave device. + */ + if (uv__tty_is_slave(fd) && ttyname_r(fd, path, sizeof(path)) == 0) + r = uv__open_cloexec(path, O_RDWR); + else + r = -1; + + if (r < 0) { + /* fallback to using blocking writes */ + if (!readable) + flags |= UV_STREAM_BLOCKING; + goto skip; + } + + newfd = r; + + r = uv__dup2_cloexec(newfd, fd); + if (r < 0 && r != -EINVAL) { + /* EINVAL means newfd == fd which could conceivably happen if another + * thread called close(fd) between our calls to isatty() and open(). + * That's a rather unlikely event but let's handle it anyway. + */ + uv__close(newfd); + return r; + } + + fd = newfd; + } + +#if defined(__APPLE__) + /* Save the fd flags in case we need to restore them due to an error. */ + do + saved_flags = fcntl(fd, F_GETFL); + while (saved_flags == -1 && errno == EINTR); + + if (saved_flags == -1) { + if (newfd != -1) + uv__close(newfd); + return -errno; + } +#endif + + /* Pacify the compiler. */ + (void) &saved_flags; + +skip: + uv__stream_init(loop, (uv_stream_t*) tty, UV_TTY); + + /* If anything fails beyond this point we need to remove the handle from + * the handle queue, since it was added by uv__handle_init in uv_stream_init. + */ + + if (!(flags & UV_STREAM_BLOCKING)) + uv__nonblock(fd, 1); + +#if defined(__APPLE__) + r = uv__stream_try_select((uv_stream_t*) tty, &fd); + if (r) { + int rc = r; + if (newfd != -1) + uv__close(newfd); + QUEUE_REMOVE(&tty->handle_queue); + do + r = fcntl(fd, F_SETFL, saved_flags); + while (r == -1 && errno == EINTR); + return rc; + } +#endif + + if (readable) + flags |= UV_STREAM_READABLE; + else + flags |= UV_STREAM_WRITABLE; + + uv__stream_open((uv_stream_t*) tty, fd, flags); + tty->mode = UV_TTY_MODE_NORMAL; + + return 0; +} + +static void uv__tty_make_raw(struct termios* tio) { + assert(tio != NULL); + +#if defined __sun || defined __MVS__ + /* + * This implementation of cfmakeraw for Solaris and derivatives is taken from + * http://www.perkin.org.uk/posts/solaris-portability-cfmakeraw.html. + */ + tio->c_iflag &= ~(IMAXBEL | IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | + IGNCR | ICRNL | IXON); + tio->c_oflag &= ~OPOST; + tio->c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); + tio->c_cflag &= ~(CSIZE | PARENB); + tio->c_cflag |= CS8; +#else + cfmakeraw(tio); +#endif /* #ifdef __sun */ +} + +int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) { + struct termios tmp; + int fd; + + if (tty->mode == (int) mode) + return 0; + + fd = uv__stream_fd(tty); + if (tty->mode == UV_TTY_MODE_NORMAL && mode != UV_TTY_MODE_NORMAL) { + if (tcgetattr(fd, &tty->orig_termios)) + return -errno; + + /* This is used for uv_tty_reset_mode() */ + uv_spinlock_lock(&termios_spinlock); + if (orig_termios_fd == -1) { + orig_termios = tty->orig_termios; + orig_termios_fd = fd; + } + uv_spinlock_unlock(&termios_spinlock); + } + + tmp = tty->orig_termios; + switch (mode) { + case UV_TTY_MODE_NORMAL: + break; + case UV_TTY_MODE_RAW: + tmp.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON); + tmp.c_oflag |= (ONLCR); + tmp.c_cflag |= (CS8); + tmp.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG); + tmp.c_cc[VMIN] = 1; + tmp.c_cc[VTIME] = 0; + break; + case UV_TTY_MODE_IO: + uv__tty_make_raw(&tmp); + break; + } + + /* Apply changes after draining */ + if (tcsetattr(fd, TCSADRAIN, &tmp)) + return -errno; + + tty->mode = mode; + return 0; +} + + +int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) { + struct winsize ws; + int err; + + do + err = ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws); + while (err == -1 && errno == EINTR); + + if (err == -1) + return -errno; + + *width = ws.ws_col; + *height = ws.ws_row; + + return 0; +} + + +uv_handle_type uv_guess_handle(uv_file file) { + struct sockaddr sa; + struct stat s; + socklen_t len; + int type; + + if (file < 0) + return UV_UNKNOWN_HANDLE; + + if (isatty(file)) + return UV_TTY; + + if (fstat(file, &s)) + return UV_UNKNOWN_HANDLE; + + if (S_ISREG(s.st_mode)) + return UV_FILE; + + if (S_ISCHR(s.st_mode)) + return UV_FILE; /* XXX UV_NAMED_PIPE? */ + + if (S_ISFIFO(s.st_mode)) + return UV_NAMED_PIPE; + + if (!S_ISSOCK(s.st_mode)) + return UV_UNKNOWN_HANDLE; + + len = sizeof(type); + if (getsockopt(file, SOL_SOCKET, SO_TYPE, &type, &len)) + return UV_UNKNOWN_HANDLE; + + len = sizeof(sa); + if (getsockname(file, &sa, &len)) + return UV_UNKNOWN_HANDLE; + + if (type == SOCK_DGRAM) + if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6) + return UV_UDP; + + if (type == SOCK_STREAM) { +#if defined(_AIX) || defined(__DragonFly__) + /* on AIX/DragonFly the getsockname call returns an empty sa structure + * for sockets of type AF_UNIX. For all other types it will + * return a properly filled in structure. + */ + if (len == 0) + return UV_NAMED_PIPE; +#endif /* defined(_AIX) || defined(__DragonFly__) */ + + if (sa.sa_family == AF_INET || sa.sa_family == AF_INET6) + return UV_TCP; + if (sa.sa_family == AF_UNIX) + return UV_NAMED_PIPE; + } + + return UV_UNKNOWN_HANDLE; +} + + +/* This function is async signal-safe, meaning that it's safe to call from + * inside a signal handler _unless_ execution was inside uv_tty_set_mode()'s + * critical section when the signal was raised. + */ +int uv_tty_reset_mode(void) { + int saved_errno; + int err; + + saved_errno = errno; + if (!uv_spinlock_trylock(&termios_spinlock)) + return -EBUSY; /* In uv_tty_set_mode(). */ + + err = 0; + if (orig_termios_fd != -1) + if (tcsetattr(orig_termios_fd, TCSANOW, &orig_termios)) + err = -errno; + + uv_spinlock_unlock(&termios_spinlock); + errno = saved_errno; + + return err; +} diff --git a/deps/libuv/src/unix/udp.c b/deps/libuv/src/unix/udp.c new file mode 100644 index 00000000..1cd49257 --- /dev/null +++ b/deps/libuv/src/unix/udp.c @@ -0,0 +1,895 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include +#include +#if defined(__MVS__) +#include +#endif + +#if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP) +# define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP +#endif + +#if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP) +# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP +#endif + + +static void uv__udp_run_completed(uv_udp_t* handle); +static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents); +static void uv__udp_recvmsg(uv_udp_t* handle); +static void uv__udp_sendmsg(uv_udp_t* handle); +static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, + int domain, + unsigned int flags); + + +void uv__udp_close(uv_udp_t* handle) { + uv__io_close(handle->loop, &handle->io_watcher); + uv__handle_stop(handle); + + if (handle->io_watcher.fd != -1) { + uv__close(handle->io_watcher.fd); + handle->io_watcher.fd = -1; + } +} + + +void uv__udp_finish_close(uv_udp_t* handle) { + uv_udp_send_t* req; + QUEUE* q; + + assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT)); + assert(handle->io_watcher.fd == -1); + + while (!QUEUE_EMPTY(&handle->write_queue)) { + q = QUEUE_HEAD(&handle->write_queue); + QUEUE_REMOVE(q); + + req = QUEUE_DATA(q, uv_udp_send_t, queue); + req->status = -ECANCELED; + QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); + } + + uv__udp_run_completed(handle); + + assert(handle->send_queue_size == 0); + assert(handle->send_queue_count == 0); + + /* Now tear down the handle. */ + handle->recv_cb = NULL; + handle->alloc_cb = NULL; + /* but _do not_ touch close_cb */ +} + + +static void uv__udp_run_completed(uv_udp_t* handle) { + uv_udp_send_t* req; + QUEUE* q; + + assert(!(handle->flags & UV_UDP_PROCESSING)); + handle->flags |= UV_UDP_PROCESSING; + + while (!QUEUE_EMPTY(&handle->write_completed_queue)) { + q = QUEUE_HEAD(&handle->write_completed_queue); + QUEUE_REMOVE(q); + + req = QUEUE_DATA(q, uv_udp_send_t, queue); + uv__req_unregister(handle->loop, req); + + handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); + handle->send_queue_count--; + + if (req->bufs != req->bufsml) + uv__free(req->bufs); + req->bufs = NULL; + + if (req->send_cb == NULL) + continue; + + /* req->status >= 0 == bytes written + * req->status < 0 == errno + */ + if (req->status >= 0) + req->send_cb(req, 0); + else + req->send_cb(req, req->status); + } + + if (QUEUE_EMPTY(&handle->write_queue)) { + /* Pending queue and completion queue empty, stop watcher. */ + uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT); + if (!uv__io_active(&handle->io_watcher, POLLIN)) + uv__handle_stop(handle); + } + + handle->flags &= ~UV_UDP_PROCESSING; +} + + +static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) { + uv_udp_t* handle; + + handle = container_of(w, uv_udp_t, io_watcher); + assert(handle->type == UV_UDP); + + if (revents & POLLIN) + uv__udp_recvmsg(handle); + + if (revents & POLLOUT) { + uv__udp_sendmsg(handle); + uv__udp_run_completed(handle); + } +} + + +static void uv__udp_recvmsg(uv_udp_t* handle) { + struct sockaddr_storage peer; + struct msghdr h; + ssize_t nread; + uv_buf_t buf; + int flags; + int count; + + assert(handle->recv_cb != NULL); + assert(handle->alloc_cb != NULL); + + /* Prevent loop starvation when the data comes in as fast as (or faster than) + * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O. + */ + count = 32; + + memset(&h, 0, sizeof(h)); + h.msg_name = &peer; + + do { + buf = uv_buf_init(NULL, 0); + handle->alloc_cb((uv_handle_t*) handle, 64 * 1024, &buf); + if (buf.base == NULL || buf.len == 0) { + handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0); + return; + } + assert(buf.base != NULL); + + h.msg_namelen = sizeof(peer); + h.msg_iov = (void*) &buf; + h.msg_iovlen = 1; + + do { + nread = recvmsg(handle->io_watcher.fd, &h, 0); + } + while (nread == -1 && errno == EINTR); + + if (nread == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + handle->recv_cb(handle, 0, &buf, NULL, 0); + else + handle->recv_cb(handle, -errno, &buf, NULL, 0); + } + else { + const struct sockaddr *addr; + if (h.msg_namelen == 0) + addr = NULL; + else + addr = (const struct sockaddr*) &peer; + + flags = 0; + if (h.msg_flags & MSG_TRUNC) + flags |= UV_UDP_PARTIAL; + + handle->recv_cb(handle, nread, &buf, addr, flags); + } + } + /* recv_cb callback may decide to pause or close the handle */ + while (nread != -1 + && count-- > 0 + && handle->io_watcher.fd != -1 + && handle->recv_cb != NULL); +} + + +static void uv__udp_sendmsg(uv_udp_t* handle) { + uv_udp_send_t* req; + QUEUE* q; + struct msghdr h; + ssize_t size; + + while (!QUEUE_EMPTY(&handle->write_queue)) { + q = QUEUE_HEAD(&handle->write_queue); + assert(q != NULL); + + req = QUEUE_DATA(q, uv_udp_send_t, queue); + assert(req != NULL); + + memset(&h, 0, sizeof h); + h.msg_name = &req->addr; + h.msg_namelen = (req->addr.ss_family == AF_INET6 ? + sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in)); + h.msg_iov = (struct iovec*) req->bufs; + h.msg_iovlen = req->nbufs; + + do { + size = sendmsg(handle->io_watcher.fd, &h, 0); + } while (size == -1 && errno == EINTR); + + if (size == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) + break; + + req->status = (size == -1 ? -errno : size); + + /* Sending a datagram is an atomic operation: either all data + * is written or nothing is (and EMSGSIZE is raised). That is + * why we don't handle partial writes. Just pop the request + * off the write queue and onto the completed queue, done. + */ + QUEUE_REMOVE(&req->queue); + QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); + uv__io_feed(handle->loop, &handle->io_watcher); + } +} + + +/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional + * refinements for programs that use multicast. + * + * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that + * are different from the BSDs: it _shares_ the port rather than steal it + * from the current listener. While useful, it's not something we can emulate + * on other platforms so we don't enable it. + */ +static int uv__set_reuse(int fd) { + int yes; + +#if defined(SO_REUSEPORT) && !defined(__linux__) + yes = 1; + if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes))) + return -errno; +#else + yes = 1; + if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes))) + return -errno; +#endif + + return 0; +} + + +int uv__udp_bind(uv_udp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + unsigned int flags) { + int err; + int yes; + int fd; + + /* Check for bad flags. */ + if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR)) + return -EINVAL; + + /* Cannot set IPv6-only mode on non-IPv6 socket. */ + if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6) + return -EINVAL; + + fd = handle->io_watcher.fd; + if (fd == -1) { + err = uv__socket(addr->sa_family, SOCK_DGRAM, 0); + if (err < 0) + return err; + fd = err; + handle->io_watcher.fd = fd; + } + + if (flags & UV_UDP_REUSEADDR) { + err = uv__set_reuse(fd); + if (err) + goto out; + } + + if (flags & UV_UDP_IPV6ONLY) { +#ifdef IPV6_V6ONLY + yes = 1; + if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) { + err = -errno; + goto out; + } +#else + err = -ENOTSUP; + goto out; +#endif + } + + if (bind(fd, addr, addrlen)) { + err = -errno; + if (errno == EAFNOSUPPORT) + /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a + * socket created with AF_INET to an AF_INET6 address or vice versa. */ + err = -EINVAL; + goto out; + } + + if (addr->sa_family == AF_INET6) + handle->flags |= UV_HANDLE_IPV6; + + handle->flags |= UV_HANDLE_BOUND; + + return 0; + +out: + uv__close(handle->io_watcher.fd); + handle->io_watcher.fd = -1; + return err; +} + + +static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, + int domain, + unsigned int flags) { + unsigned char taddr[sizeof(struct sockaddr_in6)]; + socklen_t addrlen; + + if (handle->io_watcher.fd != -1) + return 0; + + switch (domain) { + case AF_INET: + { + struct sockaddr_in* addr = (void*)&taddr; + memset(addr, 0, sizeof *addr); + addr->sin_family = AF_INET; + addr->sin_addr.s_addr = INADDR_ANY; + addrlen = sizeof *addr; + break; + } + case AF_INET6: + { + struct sockaddr_in6* addr = (void*)&taddr; + memset(addr, 0, sizeof *addr); + addr->sin6_family = AF_INET6; + addr->sin6_addr = in6addr_any; + addrlen = sizeof *addr; + break; + } + default: + assert(0 && "unsupported address family"); + abort(); + } + + return uv__udp_bind(handle, (const struct sockaddr*) &taddr, addrlen, flags); +} + + +int uv__udp_send(uv_udp_send_t* req, + uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen, + uv_udp_send_cb send_cb) { + int err; + int empty_queue; + + assert(nbufs > 0); + + err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); + if (err) + return err; + + /* It's legal for send_queue_count > 0 even when the write_queue is empty; + * it means there are error-state requests in the write_completed_queue that + * will touch up send_queue_size/count later. + */ + empty_queue = (handle->send_queue_count == 0); + + uv__req_init(handle->loop, req, UV_UDP_SEND); + assert(addrlen <= sizeof(req->addr)); + memcpy(&req->addr, addr, addrlen); + req->send_cb = send_cb; + req->handle = handle; + req->nbufs = nbufs; + + req->bufs = req->bufsml; + if (nbufs > ARRAY_SIZE(req->bufsml)) + req->bufs = uv__malloc(nbufs * sizeof(bufs[0])); + + if (req->bufs == NULL) { + uv__req_unregister(handle->loop, req); + return -ENOMEM; + } + + memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); + handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs); + handle->send_queue_count++; + QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); + uv__handle_start(handle); + + if (empty_queue && !(handle->flags & UV_UDP_PROCESSING)) { + uv__udp_sendmsg(handle); + } else { + uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); + } + + return 0; +} + + +int uv__udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen) { + int err; + struct msghdr h; + ssize_t size; + + assert(nbufs > 0); + + /* already sending a message */ + if (handle->send_queue_count != 0) + return -EAGAIN; + + err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); + if (err) + return err; + + memset(&h, 0, sizeof h); + h.msg_name = (struct sockaddr*) addr; + h.msg_namelen = addrlen; + h.msg_iov = (struct iovec*) bufs; + h.msg_iovlen = nbufs; + + do { + size = sendmsg(handle->io_watcher.fd, &h, 0); + } while (size == -1 && errno == EINTR); + + if (size == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + return -EAGAIN; + else + return -errno; + } + + return size; +} + + +static int uv__udp_set_membership4(uv_udp_t* handle, + const struct sockaddr_in* multicast_addr, + const char* interface_addr, + uv_membership membership) { + struct ip_mreq mreq; + int optname; + int err; + + memset(&mreq, 0, sizeof mreq); + + if (interface_addr) { + err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr); + if (err) + return err; + } else { + mreq.imr_interface.s_addr = htonl(INADDR_ANY); + } + + mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr; + + switch (membership) { + case UV_JOIN_GROUP: + optname = IP_ADD_MEMBERSHIP; + break; + case UV_LEAVE_GROUP: + optname = IP_DROP_MEMBERSHIP; + break; + default: + return -EINVAL; + } + + if (setsockopt(handle->io_watcher.fd, + IPPROTO_IP, + optname, + &mreq, + sizeof(mreq))) { +#if defined(__MVS__) + if (errno == ENXIO) + return -ENODEV; +#endif + return -errno; + } + + return 0; +} + + +static int uv__udp_set_membership6(uv_udp_t* handle, + const struct sockaddr_in6* multicast_addr, + const char* interface_addr, + uv_membership membership) { + int optname; + struct ipv6_mreq mreq; + struct sockaddr_in6 addr6; + + memset(&mreq, 0, sizeof mreq); + + if (interface_addr) { + if (uv_ip6_addr(interface_addr, 0, &addr6)) + return -EINVAL; + mreq.ipv6mr_interface = addr6.sin6_scope_id; + } else { + mreq.ipv6mr_interface = 0; + } + + mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr; + + switch (membership) { + case UV_JOIN_GROUP: + optname = IPV6_ADD_MEMBERSHIP; + break; + case UV_LEAVE_GROUP: + optname = IPV6_DROP_MEMBERSHIP; + break; + default: + return -EINVAL; + } + + if (setsockopt(handle->io_watcher.fd, + IPPROTO_IPV6, + optname, + &mreq, + sizeof(mreq))) { +#if defined(__MVS__) + if (errno == ENXIO) + return -ENODEV; +#endif + return -errno; + } + + return 0; +} + + +int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned int flags) { + int domain; + int err; + int fd; + + /* Use the lower 8 bits for the domain */ + domain = flags & 0xFF; + if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC) + return -EINVAL; + + if (flags & ~0xFF) + return -EINVAL; + + if (domain != AF_UNSPEC) { + err = uv__socket(domain, SOCK_DGRAM, 0); + if (err < 0) + return err; + fd = err; + } else { + fd = -1; + } + + uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP); + handle->alloc_cb = NULL; + handle->recv_cb = NULL; + handle->send_queue_size = 0; + handle->send_queue_count = 0; + uv__io_init(&handle->io_watcher, uv__udp_io, fd); + QUEUE_INIT(&handle->write_queue); + QUEUE_INIT(&handle->write_completed_queue); + return 0; +} + + +int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) { + return uv_udp_init_ex(loop, handle, AF_UNSPEC); +} + + +int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) { + int err; + + /* Check for already active socket. */ + if (handle->io_watcher.fd != -1) + return -EBUSY; + + err = uv__nonblock(sock, 1); + if (err) + return err; + + err = uv__set_reuse(sock); + if (err) + return err; + + handle->io_watcher.fd = sock; + return 0; +} + + +int uv_udp_set_membership(uv_udp_t* handle, + const char* multicast_addr, + const char* interface_addr, + uv_membership membership) { + int err; + struct sockaddr_in addr4; + struct sockaddr_in6 addr6; + + if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) { + err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR); + if (err) + return err; + return uv__udp_set_membership4(handle, &addr4, interface_addr, membership); + } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) { + err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR); + if (err) + return err; + return uv__udp_set_membership6(handle, &addr6, interface_addr, membership); + } else { + return -EINVAL; + } +} + +static int uv__setsockopt(uv_udp_t* handle, + int option4, + int option6, + const void* val, + size_t size) { + int r; + + if (handle->flags & UV_HANDLE_IPV6) + r = setsockopt(handle->io_watcher.fd, + IPPROTO_IPV6, + option6, + val, + size); + else + r = setsockopt(handle->io_watcher.fd, + IPPROTO_IP, + option4, + val, + size); + if (r) + return -errno; + + return 0; +} + +static int uv__setsockopt_maybe_char(uv_udp_t* handle, + int option4, + int option6, + int val) { +#if defined(__sun) || defined(_AIX) || defined(__MVS__) + char arg = val; +#elif defined(__OpenBSD__) + unsigned char arg = val; +#else + int arg = val; +#endif + + if (val < 0 || val > 255) + return -EINVAL; + + return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg)); +} + + +int uv_udp_set_broadcast(uv_udp_t* handle, int on) { + if (setsockopt(handle->io_watcher.fd, + SOL_SOCKET, + SO_BROADCAST, + &on, + sizeof(on))) { + return -errno; + } + + return 0; +} + + +int uv_udp_set_ttl(uv_udp_t* handle, int ttl) { + if (ttl < 1 || ttl > 255) + return -EINVAL; + +#if defined(__MVS__) + if (!(handle->flags & UV_HANDLE_IPV6)) + return -ENOTSUP; /* zOS does not support setting ttl for IPv4 */ +#endif + +/* + * On Solaris and derivatives such as SmartOS, the length of socket options + * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS, + * so hardcode the size of these options on this platform, + * and use the general uv__setsockopt_maybe_char call on other platforms. + */ +#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \ + defined(__MVS__) + + return uv__setsockopt(handle, + IP_TTL, + IPV6_UNICAST_HOPS, + &ttl, + sizeof(ttl)); +#endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) || + defined(__MVS__) */ + + return uv__setsockopt_maybe_char(handle, + IP_TTL, + IPV6_UNICAST_HOPS, + ttl); +} + + +int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) { +/* + * On Solaris and derivatives such as SmartOS, the length of socket options + * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for + * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case, + * and use the general uv__setsockopt_maybe_char call otherwise. + */ +#if defined(__sun) || defined(_AIX) || defined(__MVS__) + if (handle->flags & UV_HANDLE_IPV6) + return uv__setsockopt(handle, + IP_MULTICAST_TTL, + IPV6_MULTICAST_HOPS, + &ttl, + sizeof(ttl)); +#endif /* defined(__sun) || defined(_AIX) || defined(__MVS__) */ + + return uv__setsockopt_maybe_char(handle, + IP_MULTICAST_TTL, + IPV6_MULTICAST_HOPS, + ttl); +} + + +int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) { +/* + * On Solaris and derivatives such as SmartOS, the length of socket options + * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for + * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case, + * and use the general uv__setsockopt_maybe_char call otherwise. + */ +#if defined(__sun) || defined(_AIX) || defined(__MVS__) + if (handle->flags & UV_HANDLE_IPV6) + return uv__setsockopt(handle, + IP_MULTICAST_LOOP, + IPV6_MULTICAST_LOOP, + &on, + sizeof(on)); +#endif /* defined(__sun) || defined(_AIX) || defined(__MVS__) */ + + return uv__setsockopt_maybe_char(handle, + IP_MULTICAST_LOOP, + IPV6_MULTICAST_LOOP, + on); +} + +int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) { + struct sockaddr_storage addr_st; + struct sockaddr_in* addr4; + struct sockaddr_in6* addr6; + + addr4 = (struct sockaddr_in*) &addr_st; + addr6 = (struct sockaddr_in6*) &addr_st; + + if (!interface_addr) { + memset(&addr_st, 0, sizeof addr_st); + if (handle->flags & UV_HANDLE_IPV6) { + addr_st.ss_family = AF_INET6; + addr6->sin6_scope_id = 0; + } else { + addr_st.ss_family = AF_INET; + addr4->sin_addr.s_addr = htonl(INADDR_ANY); + } + } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) { + /* nothing, address was parsed */ + } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) { + /* nothing, address was parsed */ + } else { + return -EINVAL; + } + + if (addr_st.ss_family == AF_INET) { + if (setsockopt(handle->io_watcher.fd, + IPPROTO_IP, + IP_MULTICAST_IF, + (void*) &addr4->sin_addr, + sizeof(addr4->sin_addr)) == -1) { + return -errno; + } + } else if (addr_st.ss_family == AF_INET6) { + if (setsockopt(handle->io_watcher.fd, + IPPROTO_IPV6, + IPV6_MULTICAST_IF, + &addr6->sin6_scope_id, + sizeof(addr6->sin6_scope_id)) == -1) { + return -errno; + } + } else { + assert(0 && "unexpected address family"); + abort(); + } + + return 0; +} + + +int uv_udp_getsockname(const uv_udp_t* handle, + struct sockaddr* name, + int* namelen) { + socklen_t socklen; + + if (handle->io_watcher.fd == -1) + return -EINVAL; /* FIXME(bnoordhuis) -EBADF */ + + /* sizeof(socklen_t) != sizeof(int) on some systems. */ + socklen = (socklen_t) *namelen; + + if (getsockname(handle->io_watcher.fd, name, &socklen)) + return -errno; + + *namelen = (int) socklen; + return 0; +} + + +int uv__udp_recv_start(uv_udp_t* handle, + uv_alloc_cb alloc_cb, + uv_udp_recv_cb recv_cb) { + int err; + + if (alloc_cb == NULL || recv_cb == NULL) + return -EINVAL; + + if (uv__io_active(&handle->io_watcher, POLLIN)) + return -EALREADY; /* FIXME(bnoordhuis) Should be -EBUSY. */ + + err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0); + if (err) + return err; + + handle->alloc_cb = alloc_cb; + handle->recv_cb = recv_cb; + + uv__io_start(handle->loop, &handle->io_watcher, POLLIN); + uv__handle_start(handle); + + return 0; +} + + +int uv__udp_recv_stop(uv_udp_t* handle) { + uv__io_stop(handle->loop, &handle->io_watcher, POLLIN); + + if (!uv__io_active(&handle->io_watcher, POLLOUT)) + uv__handle_stop(handle); + + handle->alloc_cb = NULL; + handle->recv_cb = NULL; + + return 0; +} diff --git a/deps/libuv/src/uv-common.c b/deps/libuv/src/uv-common.c new file mode 100644 index 00000000..46d95467 --- /dev/null +++ b/deps/libuv/src/uv-common.c @@ -0,0 +1,654 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "uv-common.h" + +#include +#include +#include +#include /* NULL */ +#include +#include /* malloc */ +#include /* memset */ + +#if defined(_WIN32) +# include /* malloc */ +#else +# include /* if_nametoindex */ +#endif + + +typedef struct { + uv_malloc_func local_malloc; + uv_realloc_func local_realloc; + uv_calloc_func local_calloc; + uv_free_func local_free; +} uv__allocator_t; + +static uv__allocator_t uv__allocator = { + malloc, + realloc, + calloc, + free, +}; + +char* uv__strdup(const char* s) { + size_t len = strlen(s) + 1; + char* m = uv__malloc(len); + if (m == NULL) + return NULL; + return memcpy(m, s, len); +} + +char* uv__strndup(const char* s, size_t n) { + char* m; + size_t len = strlen(s); + if (n < len) + len = n; + m = uv__malloc(len + 1); + if (m == NULL) + return NULL; + m[len] = '\0'; + return memcpy(m, s, len); +} + +void* uv__malloc(size_t size) { + return uv__allocator.local_malloc(size); +} + +void uv__free(void* ptr) { + int saved_errno; + + /* Libuv expects that free() does not clobber errno. The system allocator + * honors that assumption but custom allocators may not be so careful. + */ + saved_errno = errno; + uv__allocator.local_free(ptr); + errno = saved_errno; +} + +void* uv__calloc(size_t count, size_t size) { + return uv__allocator.local_calloc(count, size); +} + +void* uv__realloc(void* ptr, size_t size) { + return uv__allocator.local_realloc(ptr, size); +} + +int uv_replace_allocator(uv_malloc_func malloc_func, + uv_realloc_func realloc_func, + uv_calloc_func calloc_func, + uv_free_func free_func) { + if (malloc_func == NULL || realloc_func == NULL || + calloc_func == NULL || free_func == NULL) { + return UV_EINVAL; + } + + uv__allocator.local_malloc = malloc_func; + uv__allocator.local_realloc = realloc_func; + uv__allocator.local_calloc = calloc_func; + uv__allocator.local_free = free_func; + + return 0; +} + +#define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t); + +size_t uv_handle_size(uv_handle_type type) { + switch (type) { + UV_HANDLE_TYPE_MAP(XX) + default: + return -1; + } +} + +size_t uv_req_size(uv_req_type type) { + switch(type) { + UV_REQ_TYPE_MAP(XX) + default: + return -1; + } +} + +#undef XX + + +size_t uv_loop_size(void) { + return sizeof(uv_loop_t); +} + + +uv_buf_t uv_buf_init(char* base, unsigned int len) { + uv_buf_t buf; + buf.base = base; + buf.len = len; + return buf; +} + + +static const char* uv__unknown_err_code(int err) { + char buf[32]; + char* copy; + + snprintf(buf, sizeof(buf), "Unknown system error %d", err); + copy = uv__strdup(buf); + + return copy != NULL ? copy : "Unknown system error"; +} + + +#define UV_ERR_NAME_GEN(name, _) case UV_ ## name: return #name; +const char* uv_err_name(int err) { + switch (err) { + UV_ERRNO_MAP(UV_ERR_NAME_GEN) + } + return uv__unknown_err_code(err); +} +#undef UV_ERR_NAME_GEN + + +#define UV_STRERROR_GEN(name, msg) case UV_ ## name: return msg; +const char* uv_strerror(int err) { + switch (err) { + UV_ERRNO_MAP(UV_STRERROR_GEN) + } + return uv__unknown_err_code(err); +} +#undef UV_STRERROR_GEN + + +int uv_ip4_addr(const char* ip, int port, struct sockaddr_in* addr) { + memset(addr, 0, sizeof(*addr)); + addr->sin_family = AF_INET; + addr->sin_port = htons(port); + return uv_inet_pton(AF_INET, ip, &(addr->sin_addr.s_addr)); +} + + +int uv_ip6_addr(const char* ip, int port, struct sockaddr_in6* addr) { + char address_part[40]; + size_t address_part_size; + const char* zone_index; + + memset(addr, 0, sizeof(*addr)); + addr->sin6_family = AF_INET6; + addr->sin6_port = htons(port); + + zone_index = strchr(ip, '%'); + if (zone_index != NULL) { + address_part_size = zone_index - ip; + if (address_part_size >= sizeof(address_part)) + address_part_size = sizeof(address_part) - 1; + + memcpy(address_part, ip, address_part_size); + address_part[address_part_size] = '\0'; + ip = address_part; + + zone_index++; /* skip '%' */ + /* NOTE: unknown interface (id=0) is silently ignored */ +#ifdef _WIN32 + addr->sin6_scope_id = atoi(zone_index); +#else + addr->sin6_scope_id = if_nametoindex(zone_index); +#endif + } + + return uv_inet_pton(AF_INET6, ip, &addr->sin6_addr); +} + + +int uv_ip4_name(const struct sockaddr_in* src, char* dst, size_t size) { + return uv_inet_ntop(AF_INET, &src->sin_addr, dst, size); +} + + +int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size) { + return uv_inet_ntop(AF_INET6, &src->sin6_addr, dst, size); +} + + +int uv_tcp_bind(uv_tcp_t* handle, + const struct sockaddr* addr, + unsigned int flags) { + unsigned int addrlen; + + if (handle->type != UV_TCP) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) + addrlen = sizeof(struct sockaddr_in); + else if (addr->sa_family == AF_INET6) + addrlen = sizeof(struct sockaddr_in6); + else + return UV_EINVAL; + + return uv__tcp_bind(handle, addr, addrlen, flags); +} + + +int uv_udp_bind(uv_udp_t* handle, + const struct sockaddr* addr, + unsigned int flags) { + unsigned int addrlen; + + if (handle->type != UV_UDP) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) + addrlen = sizeof(struct sockaddr_in); + else if (addr->sa_family == AF_INET6) + addrlen = sizeof(struct sockaddr_in6); + else + return UV_EINVAL; + + return uv__udp_bind(handle, addr, addrlen, flags); +} + + +int uv_tcp_connect(uv_connect_t* req, + uv_tcp_t* handle, + const struct sockaddr* addr, + uv_connect_cb cb) { + unsigned int addrlen; + + if (handle->type != UV_TCP) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) + addrlen = sizeof(struct sockaddr_in); + else if (addr->sa_family == AF_INET6) + addrlen = sizeof(struct sockaddr_in6); + else + return UV_EINVAL; + + return uv__tcp_connect(req, handle, addr, addrlen, cb); +} + + +int uv_udp_send(uv_udp_send_t* req, + uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + uv_udp_send_cb send_cb) { + unsigned int addrlen; + + if (handle->type != UV_UDP) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) + addrlen = sizeof(struct sockaddr_in); + else if (addr->sa_family == AF_INET6) + addrlen = sizeof(struct sockaddr_in6); + else + return UV_EINVAL; + + return uv__udp_send(req, handle, bufs, nbufs, addr, addrlen, send_cb); +} + + +int uv_udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr) { + unsigned int addrlen; + + if (handle->type != UV_UDP) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) + addrlen = sizeof(struct sockaddr_in); + else if (addr->sa_family == AF_INET6) + addrlen = sizeof(struct sockaddr_in6); + else + return UV_EINVAL; + + return uv__udp_try_send(handle, bufs, nbufs, addr, addrlen); +} + + +int uv_udp_recv_start(uv_udp_t* handle, + uv_alloc_cb alloc_cb, + uv_udp_recv_cb recv_cb) { + if (handle->type != UV_UDP || alloc_cb == NULL || recv_cb == NULL) + return UV_EINVAL; + else + return uv__udp_recv_start(handle, alloc_cb, recv_cb); +} + + +int uv_udp_recv_stop(uv_udp_t* handle) { + if (handle->type != UV_UDP) + return UV_EINVAL; + else + return uv__udp_recv_stop(handle); +} + + +void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) { + QUEUE queue; + QUEUE* q; + uv_handle_t* h; + + QUEUE_MOVE(&loop->handle_queue, &queue); + while (!QUEUE_EMPTY(&queue)) { + q = QUEUE_HEAD(&queue); + h = QUEUE_DATA(q, uv_handle_t, handle_queue); + + QUEUE_REMOVE(q); + QUEUE_INSERT_TAIL(&loop->handle_queue, q); + + if (h->flags & UV__HANDLE_INTERNAL) continue; + walk_cb(h, arg); + } +} + + +static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) { + const char* type; + QUEUE* q; + uv_handle_t* h; + + if (loop == NULL) + loop = uv_default_loop(); + + QUEUE_FOREACH(q, &loop->handle_queue) { + h = QUEUE_DATA(q, uv_handle_t, handle_queue); + + if (only_active && !uv__is_active(h)) + continue; + + switch (h->type) { +#define X(uc, lc) case UV_##uc: type = #lc; break; + UV_HANDLE_TYPE_MAP(X) +#undef X + default: type = ""; + } + + fprintf(stream, + "[%c%c%c] %-8s %p\n", + "R-"[!(h->flags & UV__HANDLE_REF)], + "A-"[!(h->flags & UV__HANDLE_ACTIVE)], + "I-"[!(h->flags & UV__HANDLE_INTERNAL)], + type, + (void*)h); + } +} + + +void uv_print_all_handles(uv_loop_t* loop, FILE* stream) { + uv__print_handles(loop, 0, stream); +} + + +void uv_print_active_handles(uv_loop_t* loop, FILE* stream) { + uv__print_handles(loop, 1, stream); +} + + +void uv_ref(uv_handle_t* handle) { + uv__handle_ref(handle); +} + + +void uv_unref(uv_handle_t* handle) { + uv__handle_unref(handle); +} + + +int uv_has_ref(const uv_handle_t* handle) { + return uv__has_ref(handle); +} + + +void uv_stop(uv_loop_t* loop) { + loop->stop_flag = 1; +} + + +uint64_t uv_now(const uv_loop_t* loop) { + return loop->time; +} + + + +size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs) { + unsigned int i; + size_t bytes; + + bytes = 0; + for (i = 0; i < nbufs; i++) + bytes += (size_t) bufs[i].len; + + return bytes; +} + +int uv_recv_buffer_size(uv_handle_t* handle, int* value) { + return uv__socket_sockopt(handle, SO_RCVBUF, value); +} + +int uv_send_buffer_size(uv_handle_t* handle, int *value) { + return uv__socket_sockopt(handle, SO_SNDBUF, value); +} + +int uv_fs_event_getpath(uv_fs_event_t* handle, char* buffer, size_t* size) { + size_t required_len; + + if (!uv__is_active(handle)) { + *size = 0; + return UV_EINVAL; + } + + required_len = strlen(handle->path); + if (required_len >= *size) { + *size = required_len + 1; + return UV_ENOBUFS; + } + + memcpy(buffer, handle->path, required_len); + *size = required_len; + buffer[required_len] = '\0'; + + return 0; +} + +/* The windows implementation does not have the same structure layout as + * the unix implementation (nbufs is not directly inside req but is + * contained in a nested union/struct) so this function locates it. +*/ +static unsigned int* uv__get_nbufs(uv_fs_t* req) { +#ifdef _WIN32 + return &req->fs.info.nbufs; +#else + return &req->nbufs; +#endif +} + +/* uv_fs_scandir() uses the system allocator to allocate memory on non-Windows + * systems. So, the memory should be released using free(). On Windows, + * uv__malloc() is used, so use uv__free() to free memory. +*/ +#ifdef _WIN32 +# define uv__fs_scandir_free uv__free +#else +# define uv__fs_scandir_free free +#endif + +void uv__fs_scandir_cleanup(uv_fs_t* req) { + uv__dirent_t** dents; + + unsigned int* nbufs = uv__get_nbufs(req); + + dents = req->ptr; + if (*nbufs > 0 && *nbufs != (unsigned int) req->result) + (*nbufs)--; + for (; *nbufs < (unsigned int) req->result; (*nbufs)++) + uv__fs_scandir_free(dents[*nbufs]); + + uv__fs_scandir_free(req->ptr); + req->ptr = NULL; +} + + +int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent) { + uv__dirent_t** dents; + uv__dirent_t* dent; + + unsigned int* nbufs = uv__get_nbufs(req); + + dents = req->ptr; + + /* Free previous entity */ + if (*nbufs > 0) + uv__fs_scandir_free(dents[*nbufs - 1]); + + /* End was already reached */ + if (*nbufs == (unsigned int) req->result) { + uv__fs_scandir_free(dents); + req->ptr = NULL; + return UV_EOF; + } + + dent = dents[(*nbufs)++]; + + ent->name = dent->d_name; +#ifdef HAVE_DIRENT_TYPES + switch (dent->d_type) { + case UV__DT_DIR: + ent->type = UV_DIRENT_DIR; + break; + case UV__DT_FILE: + ent->type = UV_DIRENT_FILE; + break; + case UV__DT_LINK: + ent->type = UV_DIRENT_LINK; + break; + case UV__DT_FIFO: + ent->type = UV_DIRENT_FIFO; + break; + case UV__DT_SOCKET: + ent->type = UV_DIRENT_SOCKET; + break; + case UV__DT_CHAR: + ent->type = UV_DIRENT_CHAR; + break; + case UV__DT_BLOCK: + ent->type = UV_DIRENT_BLOCK; + break; + default: + ent->type = UV_DIRENT_UNKNOWN; + } +#else + ent->type = UV_DIRENT_UNKNOWN; +#endif + + return 0; +} + + +int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...) { + va_list ap; + int err; + + va_start(ap, option); + /* Any platform-agnostic options should be handled here. */ + err = uv__loop_configure(loop, option, ap); + va_end(ap); + + return err; +} + + +static uv_loop_t default_loop_struct; +static uv_loop_t* default_loop_ptr; + + +uv_loop_t* uv_default_loop(void) { + if (default_loop_ptr != NULL) + return default_loop_ptr; + + if (uv_loop_init(&default_loop_struct)) + return NULL; + + default_loop_ptr = &default_loop_struct; + return default_loop_ptr; +} + + +uv_loop_t* uv_loop_new(void) { + uv_loop_t* loop; + + loop = uv__malloc(sizeof(*loop)); + if (loop == NULL) + return NULL; + + if (uv_loop_init(loop)) { + uv__free(loop); + return NULL; + } + + return loop; +} + + +int uv_loop_close(uv_loop_t* loop) { + QUEUE* q; + uv_handle_t* h; +#ifndef NDEBUG + void* saved_data; +#endif + + if (!QUEUE_EMPTY(&(loop)->active_reqs)) + return UV_EBUSY; + + QUEUE_FOREACH(q, &loop->handle_queue) { + h = QUEUE_DATA(q, uv_handle_t, handle_queue); + if (!(h->flags & UV__HANDLE_INTERNAL)) + return UV_EBUSY; + } + + uv__loop_close(loop); + +#ifndef NDEBUG + saved_data = loop->data; + memset(loop, -1, sizeof(*loop)); + loop->data = saved_data; +#endif + if (loop == default_loop_ptr) + default_loop_ptr = NULL; + + return 0; +} + + +void uv_loop_delete(uv_loop_t* loop) { + uv_loop_t* default_loop; + int err; + + default_loop = default_loop_ptr; + + err = uv_loop_close(loop); + (void) err; /* Squelch compiler warnings. */ + assert(err == 0); + if (loop != default_loop) + uv__free(loop); +} diff --git a/deps/libuv/src/uv-common.h b/deps/libuv/src/uv-common.h new file mode 100644 index 00000000..27902fdf --- /dev/null +++ b/deps/libuv/src/uv-common.h @@ -0,0 +1,227 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* + * This file is private to libuv. It provides common functionality to both + * Windows and Unix backends. + */ + +#ifndef UV_COMMON_H_ +#define UV_COMMON_H_ + +#include +#include +#include + +#if defined(_MSC_VER) && _MSC_VER < 1600 +# include "stdint-msvc2008.h" +#else +# include +#endif + +#include "uv.h" +#include "tree.h" +#include "queue.h" + +#if !defined(snprintf) && defined(_MSC_VER) && _MSC_VER < 1900 +extern int snprintf(char*, size_t, const char*, ...); +#endif + +#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) + +#define container_of(ptr, type, member) \ + ((type *) ((char *) (ptr) - offsetof(type, member))) + +#define STATIC_ASSERT(expr) \ + void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)]) + +#ifndef _WIN32 +enum { + UV__HANDLE_INTERNAL = 0x8000, + UV__HANDLE_ACTIVE = 0x4000, + UV__HANDLE_REF = 0x2000, + UV__HANDLE_CLOSING = 0 /* no-op on unix */ +}; +#else +# define UV__HANDLE_INTERNAL 0x80 +# define UV__HANDLE_ACTIVE 0x40 +# define UV__HANDLE_REF 0x20 +# define UV__HANDLE_CLOSING 0x01 +#endif + +int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap); + +void uv__loop_close(uv_loop_t* loop); + +int uv__tcp_bind(uv_tcp_t* tcp, + const struct sockaddr* addr, + unsigned int addrlen, + unsigned int flags); + +int uv__tcp_connect(uv_connect_t* req, + uv_tcp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + uv_connect_cb cb); + +int uv__udp_bind(uv_udp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + unsigned int flags); + +int uv__udp_send(uv_udp_send_t* req, + uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen, + uv_udp_send_cb send_cb); + +int uv__udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen); + +int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloccb, + uv_udp_recv_cb recv_cb); + +int uv__udp_recv_stop(uv_udp_t* handle); + +void uv__fs_poll_close(uv_fs_poll_t* handle); + +int uv__getaddrinfo_translate_error(int sys_err); /* EAI_* error. */ + +void uv__work_submit(uv_loop_t* loop, + struct uv__work *w, + void (*work)(struct uv__work *w), + void (*done)(struct uv__work *w, int status)); + +void uv__work_done(uv_async_t* handle); + +size_t uv__count_bufs(const uv_buf_t bufs[], unsigned int nbufs); + +int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value); + +void uv__fs_scandir_cleanup(uv_fs_t* req); + +#define uv__has_active_reqs(loop) \ + (QUEUE_EMPTY(&(loop)->active_reqs) == 0) + +#define uv__req_register(loop, req) \ + do { \ + QUEUE_INSERT_TAIL(&(loop)->active_reqs, &(req)->active_queue); \ + } \ + while (0) + +#define uv__req_unregister(loop, req) \ + do { \ + assert(uv__has_active_reqs(loop)); \ + QUEUE_REMOVE(&(req)->active_queue); \ + } \ + while (0) + +#define uv__has_active_handles(loop) \ + ((loop)->active_handles > 0) + +#define uv__active_handle_add(h) \ + do { \ + (h)->loop->active_handles++; \ + } \ + while (0) + +#define uv__active_handle_rm(h) \ + do { \ + (h)->loop->active_handles--; \ + } \ + while (0) + +#define uv__is_active(h) \ + (((h)->flags & UV__HANDLE_ACTIVE) != 0) + +#define uv__is_closing(h) \ + (((h)->flags & (UV_CLOSING | UV_CLOSED)) != 0) + +#define uv__handle_start(h) \ + do { \ + assert(((h)->flags & UV__HANDLE_CLOSING) == 0); \ + if (((h)->flags & UV__HANDLE_ACTIVE) != 0) break; \ + (h)->flags |= UV__HANDLE_ACTIVE; \ + if (((h)->flags & UV__HANDLE_REF) != 0) uv__active_handle_add(h); \ + } \ + while (0) + +#define uv__handle_stop(h) \ + do { \ + assert(((h)->flags & UV__HANDLE_CLOSING) == 0); \ + if (((h)->flags & UV__HANDLE_ACTIVE) == 0) break; \ + (h)->flags &= ~UV__HANDLE_ACTIVE; \ + if (((h)->flags & UV__HANDLE_REF) != 0) uv__active_handle_rm(h); \ + } \ + while (0) + +#define uv__handle_ref(h) \ + do { \ + if (((h)->flags & UV__HANDLE_REF) != 0) break; \ + (h)->flags |= UV__HANDLE_REF; \ + if (((h)->flags & UV__HANDLE_CLOSING) != 0) break; \ + if (((h)->flags & UV__HANDLE_ACTIVE) != 0) uv__active_handle_add(h); \ + } \ + while (0) + +#define uv__handle_unref(h) \ + do { \ + if (((h)->flags & UV__HANDLE_REF) == 0) break; \ + (h)->flags &= ~UV__HANDLE_REF; \ + if (((h)->flags & UV__HANDLE_CLOSING) != 0) break; \ + if (((h)->flags & UV__HANDLE_ACTIVE) != 0) uv__active_handle_rm(h); \ + } \ + while (0) + +#define uv__has_ref(h) \ + (((h)->flags & UV__HANDLE_REF) != 0) + +#if defined(_WIN32) +# define uv__handle_platform_init(h) ((h)->u.fd = -1) +#else +# define uv__handle_platform_init(h) ((h)->next_closing = NULL) +#endif + +#define uv__handle_init(loop_, h, type_) \ + do { \ + (h)->loop = (loop_); \ + (h)->type = (type_); \ + (h)->flags = UV__HANDLE_REF; /* Ref the loop when active. */ \ + QUEUE_INSERT_TAIL(&(loop_)->handle_queue, &(h)->handle_queue); \ + uv__handle_platform_init(h); \ + } \ + while (0) + + +/* Allocator prototypes */ +void *uv__calloc(size_t count, size_t size); +char *uv__strdup(const char* s); +char *uv__strndup(const char* s, size_t n); +void* uv__malloc(size_t size); +void uv__free(void* ptr); +void* uv__realloc(void* ptr, size_t size); + +#endif /* UV_COMMON_H_ */ diff --git a/deps/libuv/src/version.c b/deps/libuv/src/version.c new file mode 100644 index 00000000..686dedd9 --- /dev/null +++ b/deps/libuv/src/version.c @@ -0,0 +1,45 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" + +#define UV_STRINGIFY(v) UV_STRINGIFY_HELPER(v) +#define UV_STRINGIFY_HELPER(v) #v + +#define UV_VERSION_STRING_BASE UV_STRINGIFY(UV_VERSION_MAJOR) "." \ + UV_STRINGIFY(UV_VERSION_MINOR) "." \ + UV_STRINGIFY(UV_VERSION_PATCH) + +#if UV_VERSION_IS_RELEASE +# define UV_VERSION_STRING UV_VERSION_STRING_BASE +#else +# define UV_VERSION_STRING UV_VERSION_STRING_BASE "-" UV_VERSION_SUFFIX +#endif + + +unsigned int uv_version(void) { + return UV_VERSION_HEX; +} + + +const char* uv_version_string(void) { + return UV_VERSION_STRING; +} diff --git a/deps/libuv/src/win/async.c b/deps/libuv/src/win/async.c new file mode 100644 index 00000000..ad240ab8 --- /dev/null +++ b/deps/libuv/src/win/async.c @@ -0,0 +1,99 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include + +#include "uv.h" +#include "internal.h" +#include "atomicops-inl.h" +#include "handle-inl.h" +#include "req-inl.h" + + +void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle) { + if (handle->flags & UV__HANDLE_CLOSING && + !handle->async_sent) { + assert(!(handle->flags & UV_HANDLE_CLOSED)); + uv__handle_close(handle); + } +} + + +int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { + uv_req_t* req; + + uv__handle_init(loop, (uv_handle_t*) handle, UV_ASYNC); + handle->async_sent = 0; + handle->async_cb = async_cb; + + req = &handle->async_req; + uv_req_init(loop, req); + req->type = UV_WAKEUP; + req->data = handle; + + uv__handle_start(handle); + + return 0; +} + + +void uv_async_close(uv_loop_t* loop, uv_async_t* handle) { + if (!((uv_async_t*)handle)->async_sent) { + uv_want_endgame(loop, (uv_handle_t*) handle); + } + + uv__handle_closing(handle); +} + + +int uv_async_send(uv_async_t* handle) { + uv_loop_t* loop = handle->loop; + + if (handle->type != UV_ASYNC) { + /* Can't set errno because that's not thread-safe. */ + return -1; + } + + /* The user should make sure never to call uv_async_send to a closing */ + /* or closed handle. */ + assert(!(handle->flags & UV__HANDLE_CLOSING)); + + if (!uv__atomic_exchange_set(&handle->async_sent)) { + POST_COMPLETION_FOR_REQ(loop, &handle->async_req); + } + + return 0; +} + + +void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle, + uv_req_t* req) { + assert(handle->type == UV_ASYNC); + assert(req->type == UV_WAKEUP); + + handle->async_sent = 0; + + if (handle->flags & UV__HANDLE_CLOSING) { + uv_want_endgame(loop, (uv_handle_t*)handle); + } else if (handle->async_cb != NULL) { + handle->async_cb(handle); + } +} diff --git a/deps/libuv/src/win/atomicops-inl.h b/deps/libuv/src/win/atomicops-inl.h new file mode 100644 index 00000000..61e00602 --- /dev/null +++ b/deps/libuv/src/win/atomicops-inl.h @@ -0,0 +1,56 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_WIN_ATOMICOPS_INL_H_ +#define UV_WIN_ATOMICOPS_INL_H_ + +#include "uv.h" + + +/* Atomic set operation on char */ +#ifdef _MSC_VER /* MSVC */ + +/* _InterlockedOr8 is supported by MSVC on x32 and x64. It is slightly less */ +/* efficient than InterlockedExchange, but InterlockedExchange8 does not */ +/* exist, and interlocked operations on larger targets might require the */ +/* target to be aligned. */ +#pragma intrinsic(_InterlockedOr8) + +static char __declspec(inline) uv__atomic_exchange_set(char volatile* target) { + return _InterlockedOr8(target, 1); +} + +#else /* GCC */ + +/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */ +static inline char uv__atomic_exchange_set(char volatile* target) { + const char one = 1; + char old_value; + __asm__ __volatile__ ("lock xchgb %0, %1\n\t" + : "=r"(old_value), "=m"(*target) + : "0"(one), "m"(*target) + : "memory"); + return old_value; +} + +#endif + +#endif /* UV_WIN_ATOMICOPS_INL_H_ */ diff --git a/deps/libuv/src/win/core.c b/deps/libuv/src/win/core.c new file mode 100644 index 00000000..e84186d4 --- /dev/null +++ b/deps/libuv/src/win/core.c @@ -0,0 +1,598 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#if defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR) +#include +#endif + +#include "uv.h" +#include "internal.h" +#include "queue.h" +#include "handle-inl.h" +#include "req-inl.h" + +/* uv_once initialization guards */ +static uv_once_t uv_init_guard_ = UV_ONCE_INIT; + + +#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR)) +/* Our crt debug report handler allows us to temporarily disable asserts + * just for the current thread. + */ + +UV_THREAD_LOCAL int uv__crt_assert_enabled = TRUE; + +static int uv__crt_dbg_report_handler(int report_type, char *message, int *ret_val) { + if (uv__crt_assert_enabled || report_type != _CRT_ASSERT) + return FALSE; + + if (ret_val) { + /* Set ret_val to 0 to continue with normal execution. + * Set ret_val to 1 to trigger a breakpoint. + */ + + if(IsDebuggerPresent()) + *ret_val = 1; + else + *ret_val = 0; + } + + /* Don't call _CrtDbgReport. */ + return TRUE; +} +#else +UV_THREAD_LOCAL int uv__crt_assert_enabled = FALSE; +#endif + + +#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800 +static void uv__crt_invalid_parameter_handler(const wchar_t* expression, + const wchar_t* function, const wchar_t * file, unsigned int line, + uintptr_t reserved) { + /* No-op. */ +} +#endif + +static uv_loop_t** uv__loops; +static int uv__loops_size; +static int uv__loops_capacity; +#define UV__LOOPS_CHUNK_SIZE 8 +static uv_mutex_t uv__loops_lock; + +static void uv__loops_init() { + uv_mutex_init(&uv__loops_lock); + uv__loops = uv__calloc(UV__LOOPS_CHUNK_SIZE, sizeof(uv_loop_t*)); + if (!uv__loops) + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + uv__loops_size = 0; + uv__loops_capacity = UV__LOOPS_CHUNK_SIZE; +} + +static int uv__loops_add(uv_loop_t* loop) { + uv_loop_t** new_loops; + int new_capacity, i; + + uv_mutex_lock(&uv__loops_lock); + + if (uv__loops_size == uv__loops_capacity) { + new_capacity = uv__loops_capacity + UV__LOOPS_CHUNK_SIZE; + new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * new_capacity); + if (!new_loops) + goto failed_loops_realloc; + uv__loops = new_loops; + for (i = uv__loops_capacity; i < new_capacity; ++i) + uv__loops[i] = NULL; + uv__loops_capacity = new_capacity; + } + uv__loops[uv__loops_size] = loop; + ++uv__loops_size; + + uv_mutex_unlock(&uv__loops_lock); + return 0; + +failed_loops_realloc: + uv_mutex_unlock(&uv__loops_lock); + return ERROR_OUTOFMEMORY; +} + +static void uv__loops_remove(uv_loop_t* loop) { + int loop_index; + int smaller_capacity; + uv_loop_t** new_loops; + + uv_mutex_lock(&uv__loops_lock); + + for (loop_index = 0; loop_index < uv__loops_size; ++loop_index) { + if (uv__loops[loop_index] == loop) + break; + } + /* If loop was not found, ignore */ + if (loop_index == uv__loops_size) + goto loop_removed; + + uv__loops[loop_index] = uv__loops[uv__loops_size - 1]; + uv__loops[uv__loops_size - 1] = NULL; + --uv__loops_size; + + /* If we didn't grow to big skip downsizing */ + if (uv__loops_capacity < 4 * UV__LOOPS_CHUNK_SIZE) + goto loop_removed; + + /* Downsize only if more than half of buffer is free */ + smaller_capacity = uv__loops_capacity / 2; + if (uv__loops_size >= smaller_capacity) + goto loop_removed; + new_loops = uv__realloc(uv__loops, sizeof(uv_loop_t*) * smaller_capacity); + if (!new_loops) + goto loop_removed; + uv__loops = new_loops; + uv__loops_capacity = smaller_capacity; + +loop_removed: + uv_mutex_unlock(&uv__loops_lock); +} + +void uv__wake_all_loops() { + int i; + uv_loop_t* loop; + + uv_mutex_lock(&uv__loops_lock); + for (i = 0; i < uv__loops_size; ++i) { + loop = uv__loops[i]; + assert(loop); + if (loop->iocp != INVALID_HANDLE_VALUE) + PostQueuedCompletionStatus(loop->iocp, 0, 0, NULL); + } + uv_mutex_unlock(&uv__loops_lock); +} + +static void uv_init(void) { + /* Tell Windows that we will handle critical errors. */ + SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | + SEM_NOOPENFILEERRORBOX); + + /* Tell the CRT to not exit the application when an invalid parameter is + * passed. The main issue is that invalid FDs will trigger this behavior. + */ +#if !defined(__MINGW32__) || __MSVCRT_VERSION__ >= 0x800 + _set_invalid_parameter_handler(uv__crt_invalid_parameter_handler); +#endif + + /* We also need to setup our debug report handler because some CRT + * functions (eg _get_osfhandle) raise an assert when called with invalid + * FDs even though they return the proper error code in the release build. + */ +#if defined(_DEBUG) && (defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR)) + _CrtSetReportHook(uv__crt_dbg_report_handler); +#endif + + /* Initialize tracking of all uv loops */ + uv__loops_init(); + + /* Fetch winapi function pointers. This must be done first because other + * initialization code might need these function pointers to be loaded. + */ + uv_winapi_init(); + + /* Initialize winsock */ + uv_winsock_init(); + + /* Initialize FS */ + uv_fs_init(); + + /* Initialize signal stuff */ + uv_signals_init(); + + /* Initialize console */ + uv_console_init(); + + /* Initialize utilities */ + uv__util_init(); + + /* Initialize system wakeup detection */ + uv__init_detect_system_wakeup(); +} + + +int uv_loop_init(uv_loop_t* loop) { + int err; + + /* Initialize libuv itself first */ + uv__once_init(); + + /* Create an I/O completion port */ + loop->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1); + if (loop->iocp == NULL) + return uv_translate_sys_error(GetLastError()); + + /* To prevent uninitialized memory access, loop->time must be initialized + * to zero before calling uv_update_time for the first time. + */ + loop->time = 0; + uv_update_time(loop); + + QUEUE_INIT(&loop->wq); + QUEUE_INIT(&loop->handle_queue); + QUEUE_INIT(&loop->active_reqs); + loop->active_handles = 0; + + loop->pending_reqs_tail = NULL; + + loop->endgame_handles = NULL; + + RB_INIT(&loop->timers); + + loop->check_handles = NULL; + loop->prepare_handles = NULL; + loop->idle_handles = NULL; + + loop->next_prepare_handle = NULL; + loop->next_check_handle = NULL; + loop->next_idle_handle = NULL; + + memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets); + + loop->active_tcp_streams = 0; + loop->active_udp_streams = 0; + + loop->timer_counter = 0; + loop->stop_flag = 0; + + err = uv_mutex_init(&loop->wq_mutex); + if (err) + goto fail_mutex_init; + + err = uv_async_init(loop, &loop->wq_async, uv__work_done); + if (err) + goto fail_async_init; + + uv__handle_unref(&loop->wq_async); + loop->wq_async.flags |= UV__HANDLE_INTERNAL; + + err = uv__loops_add(loop); + if (err) + goto fail_async_init; + + return 0; + +fail_async_init: + uv_mutex_destroy(&loop->wq_mutex); + +fail_mutex_init: + CloseHandle(loop->iocp); + loop->iocp = INVALID_HANDLE_VALUE; + + return err; +} + + +void uv__once_init(void) { + uv_once(&uv_init_guard_, uv_init); +} + + +void uv__loop_close(uv_loop_t* loop) { + size_t i; + + uv__loops_remove(loop); + + /* close the async handle without needing an extra loop iteration */ + assert(!loop->wq_async.async_sent); + loop->wq_async.close_cb = NULL; + uv__handle_closing(&loop->wq_async); + uv__handle_close(&loop->wq_async); + + for (i = 0; i < ARRAY_SIZE(loop->poll_peer_sockets); i++) { + SOCKET sock = loop->poll_peer_sockets[i]; + if (sock != 0 && sock != INVALID_SOCKET) + closesocket(sock); + } + + uv_mutex_lock(&loop->wq_mutex); + assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!"); + assert(!uv__has_active_reqs(loop)); + uv_mutex_unlock(&loop->wq_mutex); + uv_mutex_destroy(&loop->wq_mutex); + + CloseHandle(loop->iocp); +} + + +int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) { + return UV_ENOSYS; +} + + +int uv_backend_fd(const uv_loop_t* loop) { + return -1; +} + + +int uv_backend_timeout(const uv_loop_t* loop) { + if (loop->stop_flag != 0) + return 0; + + if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop)) + return 0; + + if (loop->pending_reqs_tail) + return 0; + + if (loop->endgame_handles) + return 0; + + if (loop->idle_handles) + return 0; + + return uv__next_timeout(loop); +} + + +static void uv_poll(uv_loop_t* loop, DWORD timeout) { + DWORD bytes; + ULONG_PTR key; + OVERLAPPED* overlapped; + uv_req_t* req; + int repeat; + uint64_t timeout_time; + + timeout_time = loop->time + timeout; + + for (repeat = 0; ; repeat++) { + GetQueuedCompletionStatus(loop->iocp, + &bytes, + &key, + &overlapped, + timeout); + + if (overlapped) { + /* Package was dequeued */ + req = uv_overlapped_to_req(overlapped); + uv_insert_pending_req(loop, req); + + /* Some time might have passed waiting for I/O, + * so update the loop time here. + */ + uv_update_time(loop); + } else if (GetLastError() != WAIT_TIMEOUT) { + /* Serious error */ + uv_fatal_error(GetLastError(), "GetQueuedCompletionStatus"); + } else if (timeout > 0) { + /* GetQueuedCompletionStatus can occasionally return a little early. + * Make sure that the desired timeout target time is reached. + */ + uv_update_time(loop); + if (timeout_time > loop->time) { + timeout = (DWORD)(timeout_time - loop->time); + /* The first call to GetQueuedCompletionStatus should return very + * close to the target time and the second should reach it, but + * this is not stated in the documentation. To make sure a busy + * loop cannot happen, the timeout is increased exponentially + * starting on the third round. + */ + timeout += repeat ? (1 << (repeat - 1)) : 0; + continue; + } + } + break; + } +} + + +static void uv_poll_ex(uv_loop_t* loop, DWORD timeout) { + BOOL success; + uv_req_t* req; + OVERLAPPED_ENTRY overlappeds[128]; + ULONG count; + ULONG i; + int repeat; + uint64_t timeout_time; + + timeout_time = loop->time + timeout; + + for (repeat = 0; ; repeat++) { + success = pGetQueuedCompletionStatusEx(loop->iocp, + overlappeds, + ARRAY_SIZE(overlappeds), + &count, + timeout, + FALSE); + + if (success) { + for (i = 0; i < count; i++) { + /* Package was dequeued, but see if it is not a empty package + * meant only to wake us up. + */ + if (overlappeds[i].lpOverlapped) { + req = uv_overlapped_to_req(overlappeds[i].lpOverlapped); + uv_insert_pending_req(loop, req); + } + } + + /* Some time might have passed waiting for I/O, + * so update the loop time here. + */ + uv_update_time(loop); + } else if (GetLastError() != WAIT_TIMEOUT) { + /* Serious error */ + uv_fatal_error(GetLastError(), "GetQueuedCompletionStatusEx"); + } else if (timeout > 0) { + /* GetQueuedCompletionStatus can occasionally return a little early. + * Make sure that the desired timeout target time is reached. + */ + uv_update_time(loop); + if (timeout_time > loop->time) { + timeout = (DWORD)(timeout_time - loop->time); + /* The first call to GetQueuedCompletionStatus should return very + * close to the target time and the second should reach it, but + * this is not stated in the documentation. To make sure a busy + * loop cannot happen, the timeout is increased exponentially + * starting on the third round. + */ + timeout += repeat ? (1 << (repeat - 1)) : 0; + continue; + } + } + break; + } +} + + +static int uv__loop_alive(const uv_loop_t* loop) { + return loop->active_handles > 0 || + !QUEUE_EMPTY(&loop->active_reqs) || + loop->endgame_handles != NULL; +} + + +int uv_loop_alive(const uv_loop_t* loop) { + return uv__loop_alive(loop); +} + + +int uv_run(uv_loop_t *loop, uv_run_mode mode) { + DWORD timeout; + int r; + int ran_pending; + void (*poll)(uv_loop_t* loop, DWORD timeout); + + if (pGetQueuedCompletionStatusEx) + poll = &uv_poll_ex; + else + poll = &uv_poll; + + r = uv__loop_alive(loop); + if (!r) + uv_update_time(loop); + + while (r != 0 && loop->stop_flag == 0) { + uv_update_time(loop); + uv_process_timers(loop); + + ran_pending = uv_process_reqs(loop); + uv_idle_invoke(loop); + uv_prepare_invoke(loop); + + timeout = 0; + if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT) + timeout = uv_backend_timeout(loop); + + (*poll)(loop, timeout); + + uv_check_invoke(loop); + uv_process_endgames(loop); + + if (mode == UV_RUN_ONCE) { + /* UV_RUN_ONCE implies forward progress: at least one callback must have + * been invoked when it returns. uv__io_poll() can return without doing + * I/O (meaning: no callbacks) when its timeout expires - which means we + * have pending timers that satisfy the forward progress constraint. + * + * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from + * the check. + */ + uv_process_timers(loop); + } + + r = uv__loop_alive(loop); + if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT) + break; + } + + /* The if statement lets the compiler compile it to a conditional store. + * Avoids dirtying a cache line. + */ + if (loop->stop_flag != 0) + loop->stop_flag = 0; + + return r; +} + + +int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) { + uv_os_fd_t fd_out; + + switch (handle->type) { + case UV_TCP: + fd_out = (uv_os_fd_t)((uv_tcp_t*) handle)->socket; + break; + + case UV_NAMED_PIPE: + fd_out = ((uv_pipe_t*) handle)->handle; + break; + + case UV_TTY: + fd_out = ((uv_tty_t*) handle)->handle; + break; + + case UV_UDP: + fd_out = (uv_os_fd_t)((uv_udp_t*) handle)->socket; + break; + + case UV_POLL: + fd_out = (uv_os_fd_t)((uv_poll_t*) handle)->socket; + break; + + default: + return UV_EINVAL; + } + + if (uv_is_closing(handle) || fd_out == INVALID_HANDLE_VALUE) + return UV_EBADF; + + *fd = fd_out; + return 0; +} + + +int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) { + int r; + int len; + SOCKET socket; + + if (handle == NULL || value == NULL) + return UV_EINVAL; + + if (handle->type == UV_TCP) + socket = ((uv_tcp_t*) handle)->socket; + else if (handle->type == UV_UDP) + socket = ((uv_udp_t*) handle)->socket; + else + return UV_ENOTSUP; + + len = sizeof(*value); + + if (*value == 0) + r = getsockopt(socket, SOL_SOCKET, optname, (char*) value, &len); + else + r = setsockopt(socket, SOL_SOCKET, optname, (const char*) value, len); + + if (r == SOCKET_ERROR) + return uv_translate_sys_error(WSAGetLastError()); + + return 0; +} diff --git a/deps/libuv/src/win/detect-wakeup.c b/deps/libuv/src/win/detect-wakeup.c new file mode 100644 index 00000000..a12179f7 --- /dev/null +++ b/deps/libuv/src/win/detect-wakeup.c @@ -0,0 +1,35 @@ +#include "uv.h" +#include "internal.h" +#include "winapi.h" + +static void uv__register_system_resume_callback(); + +void uv__init_detect_system_wakeup() { + /* Try registering system power event callback. This is the cleanest + * method, but it will only work on Win8 and above. + */ + uv__register_system_resume_callback(); +} + +static ULONG CALLBACK uv__system_resume_callback(PVOID Context, + ULONG Type, + PVOID Setting) { + if (Type == PBT_APMRESUMESUSPEND || Type == PBT_APMRESUMEAUTOMATIC) + uv__wake_all_loops(); + + return 0; +} + +static void uv__register_system_resume_callback() { + _DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS recipient; + _HPOWERNOTIFY registration_handle; + + if (pPowerRegisterSuspendResumeNotification == NULL) + return; + + recipient.Callback = uv__system_resume_callback; + recipient.Context = NULL; + (*pPowerRegisterSuspendResumeNotification)(DEVICE_NOTIFY_CALLBACK, + &recipient, + ®istration_handle); +} diff --git a/deps/libuv/src/win/dl.c b/deps/libuv/src/win/dl.c new file mode 100644 index 00000000..39e400ab --- /dev/null +++ b/deps/libuv/src/win/dl.c @@ -0,0 +1,118 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "internal.h" + +static int uv__dlerror(uv_lib_t* lib, int errorno); + + +int uv_dlopen(const char* filename, uv_lib_t* lib) { + WCHAR filename_w[32768]; + + lib->handle = NULL; + lib->errmsg = NULL; + + if (!MultiByteToWideChar(CP_UTF8, + 0, + filename, + -1, + filename_w, + ARRAY_SIZE(filename_w))) { + return uv__dlerror(lib, GetLastError()); + } + + lib->handle = LoadLibraryExW(filename_w, NULL, LOAD_WITH_ALTERED_SEARCH_PATH); + if (lib->handle == NULL) { + return uv__dlerror(lib, GetLastError()); + } + + return 0; +} + + +void uv_dlclose(uv_lib_t* lib) { + if (lib->errmsg) { + LocalFree((void*)lib->errmsg); + lib->errmsg = NULL; + } + + if (lib->handle) { + /* Ignore errors. No good way to signal them without leaking memory. */ + FreeLibrary(lib->handle); + lib->handle = NULL; + } +} + + +int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) { + *ptr = (void*) GetProcAddress(lib->handle, name); + return uv__dlerror(lib, *ptr ? 0 : GetLastError()); +} + + +const char* uv_dlerror(const uv_lib_t* lib) { + return lib->errmsg ? lib->errmsg : "no error"; +} + + +static void uv__format_fallback_error(uv_lib_t* lib, int errorno){ + DWORD_PTR args[1] = { (DWORD_PTR) errorno }; + LPSTR fallback_error = "error: %1!d!"; + + FormatMessageA(FORMAT_MESSAGE_FROM_STRING | + FORMAT_MESSAGE_ARGUMENT_ARRAY | + FORMAT_MESSAGE_ALLOCATE_BUFFER, + fallback_error, 0, 0, + (LPSTR) &lib->errmsg, + 0, (va_list*) args); +} + + + +static int uv__dlerror(uv_lib_t* lib, int errorno) { + DWORD res; + + if (lib->errmsg) { + LocalFree((void*)lib->errmsg); + lib->errmsg = NULL; + } + + if (errorno) { + res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno, + MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US), + (LPSTR) &lib->errmsg, 0, NULL); + if (!res && GetLastError() == ERROR_MUI_FILE_NOT_FOUND) { + res = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno, + 0, (LPSTR) &lib->errmsg, 0, NULL); + } + + if (!res) { + uv__format_fallback_error(lib, errorno); + } + } + + return errorno ? -1 : 0; +} diff --git a/deps/libuv/src/win/error.c b/deps/libuv/src/win/error.c new file mode 100644 index 00000000..c512f35a --- /dev/null +++ b/deps/libuv/src/win/error.c @@ -0,0 +1,170 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "uv.h" +#include "internal.h" + + +/* + * Display an error message and abort the event loop. + */ +void uv_fatal_error(const int errorno, const char* syscall) { + char* buf = NULL; + const char* errmsg; + + FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, NULL, errorno, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL); + + if (buf) { + errmsg = buf; + } else { + errmsg = "Unknown error"; + } + + /* FormatMessage messages include a newline character already, */ + /* so don't add another. */ + if (syscall) { + fprintf(stderr, "%s: (%d) %s", syscall, errorno, errmsg); + } else { + fprintf(stderr, "(%d) %s", errorno, errmsg); + } + + if (buf) { + LocalFree(buf); + } + + *((char*)NULL) = 0xff; /* Force debug break */ + abort(); +} + + +int uv_translate_sys_error(int sys_errno) { + if (sys_errno <= 0) { + return sys_errno; /* If < 0 then it's already a libuv error. */ + } + + switch (sys_errno) { + case ERROR_NOACCESS: return UV_EACCES; + case WSAEACCES: return UV_EACCES; + case ERROR_ADDRESS_ALREADY_ASSOCIATED: return UV_EADDRINUSE; + case WSAEADDRINUSE: return UV_EADDRINUSE; + case WSAEADDRNOTAVAIL: return UV_EADDRNOTAVAIL; + case WSAEAFNOSUPPORT: return UV_EAFNOSUPPORT; + case WSAEWOULDBLOCK: return UV_EAGAIN; + case WSAEALREADY: return UV_EALREADY; + case ERROR_INVALID_FLAGS: return UV_EBADF; + case ERROR_INVALID_HANDLE: return UV_EBADF; + case ERROR_LOCK_VIOLATION: return UV_EBUSY; + case ERROR_PIPE_BUSY: return UV_EBUSY; + case ERROR_SHARING_VIOLATION: return UV_EBUSY; + case ERROR_OPERATION_ABORTED: return UV_ECANCELED; + case WSAEINTR: return UV_ECANCELED; + case ERROR_NO_UNICODE_TRANSLATION: return UV_ECHARSET; + case ERROR_CONNECTION_ABORTED: return UV_ECONNABORTED; + case WSAECONNABORTED: return UV_ECONNABORTED; + case ERROR_CONNECTION_REFUSED: return UV_ECONNREFUSED; + case WSAECONNREFUSED: return UV_ECONNREFUSED; + case ERROR_NETNAME_DELETED: return UV_ECONNRESET; + case WSAECONNRESET: return UV_ECONNRESET; + case ERROR_ALREADY_EXISTS: return UV_EEXIST; + case ERROR_FILE_EXISTS: return UV_EEXIST; + case ERROR_BUFFER_OVERFLOW: return UV_EFAULT; + case WSAEFAULT: return UV_EFAULT; + case ERROR_HOST_UNREACHABLE: return UV_EHOSTUNREACH; + case WSAEHOSTUNREACH: return UV_EHOSTUNREACH; + case ERROR_INSUFFICIENT_BUFFER: return UV_EINVAL; + case ERROR_INVALID_DATA: return UV_EINVAL; + case ERROR_INVALID_PARAMETER: return UV_EINVAL; + case ERROR_SYMLINK_NOT_SUPPORTED: return UV_EINVAL; + case WSAEINVAL: return UV_EINVAL; + case WSAEPFNOSUPPORT: return UV_EINVAL; + case WSAESOCKTNOSUPPORT: return UV_EINVAL; + case ERROR_BEGINNING_OF_MEDIA: return UV_EIO; + case ERROR_BUS_RESET: return UV_EIO; + case ERROR_CRC: return UV_EIO; + case ERROR_DEVICE_DOOR_OPEN: return UV_EIO; + case ERROR_DEVICE_REQUIRES_CLEANING: return UV_EIO; + case ERROR_DISK_CORRUPT: return UV_EIO; + case ERROR_EOM_OVERFLOW: return UV_EIO; + case ERROR_FILEMARK_DETECTED: return UV_EIO; + case ERROR_GEN_FAILURE: return UV_EIO; + case ERROR_INVALID_BLOCK_LENGTH: return UV_EIO; + case ERROR_IO_DEVICE: return UV_EIO; + case ERROR_NO_DATA_DETECTED: return UV_EIO; + case ERROR_NO_SIGNAL_SENT: return UV_EIO; + case ERROR_OPEN_FAILED: return UV_EIO; + case ERROR_SETMARK_DETECTED: return UV_EIO; + case ERROR_SIGNAL_REFUSED: return UV_EIO; + case WSAEISCONN: return UV_EISCONN; + case ERROR_CANT_RESOLVE_FILENAME: return UV_ELOOP; + case ERROR_TOO_MANY_OPEN_FILES: return UV_EMFILE; + case WSAEMFILE: return UV_EMFILE; + case WSAEMSGSIZE: return UV_EMSGSIZE; + case ERROR_FILENAME_EXCED_RANGE: return UV_ENAMETOOLONG; + case ERROR_NETWORK_UNREACHABLE: return UV_ENETUNREACH; + case WSAENETUNREACH: return UV_ENETUNREACH; + case WSAENOBUFS: return UV_ENOBUFS; + case ERROR_BAD_PATHNAME: return UV_ENOENT; + case ERROR_DIRECTORY: return UV_ENOENT; + case ERROR_FILE_NOT_FOUND: return UV_ENOENT; + case ERROR_INVALID_NAME: return UV_ENOENT; + case ERROR_INVALID_DRIVE: return UV_ENOENT; + case ERROR_INVALID_REPARSE_DATA: return UV_ENOENT; + case ERROR_MOD_NOT_FOUND: return UV_ENOENT; + case ERROR_PATH_NOT_FOUND: return UV_ENOENT; + case WSAHOST_NOT_FOUND: return UV_ENOENT; + case WSANO_DATA: return UV_ENOENT; + case ERROR_NOT_ENOUGH_MEMORY: return UV_ENOMEM; + case ERROR_OUTOFMEMORY: return UV_ENOMEM; + case ERROR_CANNOT_MAKE: return UV_ENOSPC; + case ERROR_DISK_FULL: return UV_ENOSPC; + case ERROR_EA_TABLE_FULL: return UV_ENOSPC; + case ERROR_END_OF_MEDIA: return UV_ENOSPC; + case ERROR_HANDLE_DISK_FULL: return UV_ENOSPC; + case ERROR_NOT_CONNECTED: return UV_ENOTCONN; + case WSAENOTCONN: return UV_ENOTCONN; + case ERROR_DIR_NOT_EMPTY: return UV_ENOTEMPTY; + case WSAENOTSOCK: return UV_ENOTSOCK; + case ERROR_NOT_SUPPORTED: return UV_ENOTSUP; + case ERROR_BROKEN_PIPE: return UV_EOF; + case ERROR_ACCESS_DENIED: return UV_EPERM; + case ERROR_PRIVILEGE_NOT_HELD: return UV_EPERM; + case ERROR_BAD_PIPE: return UV_EPIPE; + case ERROR_NO_DATA: return UV_EPIPE; + case ERROR_PIPE_NOT_CONNECTED: return UV_EPIPE; + case WSAESHUTDOWN: return UV_EPIPE; + case WSAEPROTONOSUPPORT: return UV_EPROTONOSUPPORT; + case ERROR_WRITE_PROTECT: return UV_EROFS; + case ERROR_SEM_TIMEOUT: return UV_ETIMEDOUT; + case WSAETIMEDOUT: return UV_ETIMEDOUT; + case ERROR_NOT_SAME_DEVICE: return UV_EXDEV; + case ERROR_INVALID_FUNCTION: return UV_EISDIR; + case ERROR_META_EXPANSION_TOO_LONG: return UV_E2BIG; + default: return UV_UNKNOWN; + } +} diff --git a/deps/libuv/src/win/fs-event.c b/deps/libuv/src/win/fs-event.c new file mode 100644 index 00000000..03e4adc0 --- /dev/null +++ b/deps/libuv/src/win/fs-event.c @@ -0,0 +1,545 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" +#include "req-inl.h" + + +const unsigned int uv_directory_watcher_buffer_size = 4096; + + +static void uv_fs_event_queue_readdirchanges(uv_loop_t* loop, + uv_fs_event_t* handle) { + assert(handle->dir_handle != INVALID_HANDLE_VALUE); + assert(!handle->req_pending); + + memset(&(handle->req.u.io.overlapped), 0, + sizeof(handle->req.u.io.overlapped)); + if (!ReadDirectoryChangesW(handle->dir_handle, + handle->buffer, + uv_directory_watcher_buffer_size, + (handle->flags & UV_FS_EVENT_RECURSIVE) ? TRUE : FALSE, + FILE_NOTIFY_CHANGE_FILE_NAME | + FILE_NOTIFY_CHANGE_DIR_NAME | + FILE_NOTIFY_CHANGE_ATTRIBUTES | + FILE_NOTIFY_CHANGE_SIZE | + FILE_NOTIFY_CHANGE_LAST_WRITE | + FILE_NOTIFY_CHANGE_LAST_ACCESS | + FILE_NOTIFY_CHANGE_CREATION | + FILE_NOTIFY_CHANGE_SECURITY, + NULL, + &handle->req.u.io.overlapped, + NULL)) { + /* Make this req pending reporting an error. */ + SET_REQ_ERROR(&handle->req, GetLastError()); + uv_insert_pending_req(loop, (uv_req_t*)&handle->req); + } + + handle->req_pending = 1; +} + +static void uv_relative_path(const WCHAR* filename, + const WCHAR* dir, + WCHAR** relpath) { + size_t relpathlen; + size_t filenamelen = wcslen(filename); + size_t dirlen = wcslen(dir); + if (dirlen > 0 && dir[dirlen - 1] == '\\') + dirlen--; + relpathlen = filenamelen - dirlen - 1; + *relpath = uv__malloc((relpathlen + 1) * sizeof(WCHAR)); + if (!*relpath) + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + wcsncpy(*relpath, filename + dirlen + 1, relpathlen); + (*relpath)[relpathlen] = L'\0'; +} + +static int uv_split_path(const WCHAR* filename, WCHAR** dir, + WCHAR** file) { + int len = wcslen(filename); + int i = len; + while (i > 0 && filename[--i] != '\\' && filename[i] != '/'); + + if (i == 0) { + if (dir) { + *dir = (WCHAR*)uv__malloc((MAX_PATH + 1) * sizeof(WCHAR)); + if (!*dir) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + if (!GetCurrentDirectoryW(MAX_PATH, *dir)) { + uv__free(*dir); + *dir = NULL; + return -1; + } + } + + *file = wcsdup(filename); + } else { + if (dir) { + *dir = (WCHAR*)uv__malloc((i + 2) * sizeof(WCHAR)); + if (!*dir) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + wcsncpy(*dir, filename, i + 1); + (*dir)[i + 1] = L'\0'; + } + + *file = (WCHAR*)uv__malloc((len - i) * sizeof(WCHAR)); + if (!*file) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + wcsncpy(*file, filename + i + 1, len - i - 1); + (*file)[len - i - 1] = L'\0'; + } + + return 0; +} + + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { + uv__handle_init(loop, (uv_handle_t*) handle, UV_FS_EVENT); + handle->dir_handle = INVALID_HANDLE_VALUE; + handle->buffer = NULL; + handle->req_pending = 0; + handle->filew = NULL; + handle->short_filew = NULL; + handle->dirw = NULL; + + uv_req_init(loop, (uv_req_t*)&handle->req); + handle->req.type = UV_FS_EVENT_REQ; + handle->req.data = handle; + + return 0; +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* path, + unsigned int flags) { + int name_size, is_path_dir; + DWORD attr, last_error; + WCHAR* dir = NULL, *dir_to_watch, *pathw = NULL; + WCHAR short_path[MAX_PATH]; + + if (uv__is_active(handle)) + return UV_EINVAL; + + handle->cb = cb; + handle->path = uv__strdup(path); + if (!handle->path) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + uv__handle_start(handle); + + /* Convert name to UTF16. */ + + name_size = MultiByteToWideChar(CP_UTF8, 0, path, -1, NULL, 0) * + sizeof(WCHAR); + pathw = (WCHAR*)uv__malloc(name_size); + if (!pathw) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + if (!MultiByteToWideChar(CP_UTF8, + 0, + path, + -1, + pathw, + name_size / sizeof(WCHAR))) { + return uv_translate_sys_error(GetLastError()); + } + + /* Determine whether path is a file or a directory. */ + attr = GetFileAttributesW(pathw); + if (attr == INVALID_FILE_ATTRIBUTES) { + last_error = GetLastError(); + goto error; + } + + is_path_dir = (attr & FILE_ATTRIBUTE_DIRECTORY) ? 1 : 0; + + if (is_path_dir) { + /* path is a directory, so that's the directory that we will watch. */ + handle->dirw = pathw; + dir_to_watch = pathw; + } else { + /* + * path is a file. So we split path into dir & file parts, and + * watch the dir directory. + */ + + /* Convert to short path. */ + if (!GetShortPathNameW(pathw, short_path, ARRAY_SIZE(short_path))) { + last_error = GetLastError(); + goto error; + } + + if (uv_split_path(pathw, &dir, &handle->filew) != 0) { + last_error = GetLastError(); + goto error; + } + + if (uv_split_path(short_path, NULL, &handle->short_filew) != 0) { + last_error = GetLastError(); + goto error; + } + + dir_to_watch = dir; + uv__free(pathw); + pathw = NULL; + } + + handle->dir_handle = CreateFileW(dir_to_watch, + FILE_LIST_DIRECTORY, + FILE_SHARE_READ | FILE_SHARE_DELETE | + FILE_SHARE_WRITE, + NULL, + OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS | + FILE_FLAG_OVERLAPPED, + NULL); + + if (dir) { + uv__free(dir); + dir = NULL; + } + + if (handle->dir_handle == INVALID_HANDLE_VALUE) { + last_error = GetLastError(); + goto error; + } + + if (CreateIoCompletionPort(handle->dir_handle, + handle->loop->iocp, + (ULONG_PTR)handle, + 0) == NULL) { + last_error = GetLastError(); + goto error; + } + + if (!handle->buffer) { + handle->buffer = (char*)uv__malloc(uv_directory_watcher_buffer_size); + } + if (!handle->buffer) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + memset(&(handle->req.u.io.overlapped), 0, + sizeof(handle->req.u.io.overlapped)); + + if (!ReadDirectoryChangesW(handle->dir_handle, + handle->buffer, + uv_directory_watcher_buffer_size, + (flags & UV_FS_EVENT_RECURSIVE) ? TRUE : FALSE, + FILE_NOTIFY_CHANGE_FILE_NAME | + FILE_NOTIFY_CHANGE_DIR_NAME | + FILE_NOTIFY_CHANGE_ATTRIBUTES | + FILE_NOTIFY_CHANGE_SIZE | + FILE_NOTIFY_CHANGE_LAST_WRITE | + FILE_NOTIFY_CHANGE_LAST_ACCESS | + FILE_NOTIFY_CHANGE_CREATION | + FILE_NOTIFY_CHANGE_SECURITY, + NULL, + &handle->req.u.io.overlapped, + NULL)) { + last_error = GetLastError(); + goto error; + } + + handle->req_pending = 1; + return 0; + +error: + if (handle->path) { + uv__free(handle->path); + handle->path = NULL; + } + + if (handle->filew) { + uv__free(handle->filew); + handle->filew = NULL; + } + + if (handle->short_filew) { + uv__free(handle->short_filew); + handle->short_filew = NULL; + } + + uv__free(pathw); + + if (handle->dir_handle != INVALID_HANDLE_VALUE) { + CloseHandle(handle->dir_handle); + handle->dir_handle = INVALID_HANDLE_VALUE; + } + + if (handle->buffer) { + uv__free(handle->buffer); + handle->buffer = NULL; + } + + return uv_translate_sys_error(last_error); +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { + if (!uv__is_active(handle)) + return 0; + + if (handle->dir_handle != INVALID_HANDLE_VALUE) { + CloseHandle(handle->dir_handle); + handle->dir_handle = INVALID_HANDLE_VALUE; + } + + uv__handle_stop(handle); + + if (handle->filew) { + uv__free(handle->filew); + handle->filew = NULL; + } + + if (handle->short_filew) { + uv__free(handle->short_filew); + handle->short_filew = NULL; + } + + if (handle->path) { + uv__free(handle->path); + handle->path = NULL; + } + + if (handle->dirw) { + uv__free(handle->dirw); + handle->dirw = NULL; + } + + return 0; +} + + +static int file_info_cmp(WCHAR* str, WCHAR* file_name, int file_name_len) { + int str_len; + + str_len = wcslen(str); + + /* + Since we only care about equality, return early if the strings + aren't the same length + */ + if (str_len != (file_name_len / sizeof(WCHAR))) + return -1; + + return _wcsnicmp(str, file_name, str_len); +} + + +void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req, + uv_fs_event_t* handle) { + FILE_NOTIFY_INFORMATION* file_info; + int err, sizew, size; + char* filename = NULL; + WCHAR* filenamew = NULL; + WCHAR* long_filenamew = NULL; + DWORD offset = 0; + + assert(req->type == UV_FS_EVENT_REQ); + assert(handle->req_pending); + handle->req_pending = 0; + + /* Don't report any callbacks if: + * - We're closing, just push the handle onto the endgame queue + * - We are not active, just ignore the callback + */ + if (!uv__is_active(handle)) { + if (handle->flags & UV__HANDLE_CLOSING) { + uv_want_endgame(loop, (uv_handle_t*) handle); + } + return; + } + + file_info = (FILE_NOTIFY_INFORMATION*)(handle->buffer + offset); + + if (REQ_SUCCESS(req)) { + if (req->u.io.overlapped.InternalHigh > 0) { + do { + file_info = (FILE_NOTIFY_INFORMATION*)((char*)file_info + offset); + assert(!filename); + assert(!filenamew); + assert(!long_filenamew); + + /* + * Fire the event only if we were asked to watch a directory, + * or if the filename filter matches. + */ + if (handle->dirw || + file_info_cmp(handle->filew, + file_info->FileName, + file_info->FileNameLength) == 0 || + file_info_cmp(handle->short_filew, + file_info->FileName, + file_info->FileNameLength) == 0) { + + if (handle->dirw) { + /* + * We attempt to resolve the long form of the file name explicitly. + * We only do this for file names that might still exist on disk. + * If this fails, we use the name given by ReadDirectoryChangesW. + * This may be the long form or the 8.3 short name in some cases. + */ + if (file_info->Action != FILE_ACTION_REMOVED && + file_info->Action != FILE_ACTION_RENAMED_OLD_NAME) { + /* Construct a full path to the file. */ + size = wcslen(handle->dirw) + + file_info->FileNameLength / sizeof(WCHAR) + 2; + + filenamew = (WCHAR*)uv__malloc(size * sizeof(WCHAR)); + if (!filenamew) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + _snwprintf(filenamew, size, L"%s\\%.*s", handle->dirw, + file_info->FileNameLength / (DWORD)sizeof(WCHAR), + file_info->FileName); + + filenamew[size - 1] = L'\0'; + + /* Convert to long name. */ + size = GetLongPathNameW(filenamew, NULL, 0); + + if (size) { + long_filenamew = (WCHAR*)uv__malloc(size * sizeof(WCHAR)); + if (!long_filenamew) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + size = GetLongPathNameW(filenamew, long_filenamew, size); + if (size) { + long_filenamew[size] = '\0'; + } else { + uv__free(long_filenamew); + long_filenamew = NULL; + } + } + + uv__free(filenamew); + + if (long_filenamew) { + /* Get the file name out of the long path. */ + uv_relative_path(long_filenamew, + handle->dirw, + &filenamew); + uv__free(long_filenamew); + long_filenamew = filenamew; + sizew = -1; + } else { + /* We couldn't get the long filename, use the one reported. */ + filenamew = file_info->FileName; + sizew = file_info->FileNameLength / sizeof(WCHAR); + } + } else { + /* + * Removed or renamed events cannot be resolved to the long form. + * We therefore use the name given by ReadDirectoryChangesW. + * This may be the long form or the 8.3 short name in some cases. + */ + filenamew = file_info->FileName; + sizew = file_info->FileNameLength / sizeof(WCHAR); + } + } else { + /* We already have the long name of the file, so just use it. */ + filenamew = handle->filew; + sizew = -1; + } + + /* Convert the filename to utf8. */ + uv__convert_utf16_to_utf8(filenamew, sizew, &filename); + + switch (file_info->Action) { + case FILE_ACTION_ADDED: + case FILE_ACTION_REMOVED: + case FILE_ACTION_RENAMED_OLD_NAME: + case FILE_ACTION_RENAMED_NEW_NAME: + handle->cb(handle, filename, UV_RENAME, 0); + break; + + case FILE_ACTION_MODIFIED: + handle->cb(handle, filename, UV_CHANGE, 0); + break; + } + + uv__free(filename); + filename = NULL; + uv__free(long_filenamew); + long_filenamew = NULL; + filenamew = NULL; + } + + offset = file_info->NextEntryOffset; + } while (offset && !(handle->flags & UV__HANDLE_CLOSING)); + } else { + handle->cb(handle, NULL, UV_CHANGE, 0); + } + } else { + err = GET_REQ_ERROR(req); + handle->cb(handle, NULL, 0, uv_translate_sys_error(err)); + } + + if (!(handle->flags & UV__HANDLE_CLOSING)) { + uv_fs_event_queue_readdirchanges(loop, handle); + } else { + uv_want_endgame(loop, (uv_handle_t*)handle); + } +} + + +void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) { + uv_fs_event_stop(handle); + + uv__handle_closing(handle); + + if (!handle->req_pending) { + uv_want_endgame(loop, (uv_handle_t*)handle); + } + +} + + +void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle) { + if ((handle->flags & UV__HANDLE_CLOSING) && !handle->req_pending) { + assert(!(handle->flags & UV_HANDLE_CLOSED)); + + if (handle->buffer) { + uv__free(handle->buffer); + handle->buffer = NULL; + } + + uv__handle_close(handle); + } +} diff --git a/deps/libuv/src/win/fs.c b/deps/libuv/src/win/fs.c new file mode 100644 index 00000000..dc0ac89a --- /dev/null +++ b/deps/libuv/src/win/fs.c @@ -0,0 +1,2496 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "uv.h" +#include "internal.h" +#include "req-inl.h" +#include "handle-inl.h" + +#include + + +#define UV_FS_FREE_PATHS 0x0002 +#define UV_FS_FREE_PTR 0x0008 +#define UV_FS_CLEANEDUP 0x0010 + + +#define QUEUE_FS_TP_JOB(loop, req) \ + do { \ + uv__req_register(loop, req); \ + uv__work_submit((loop), &(req)->work_req, uv__fs_work, uv__fs_done); \ + } while (0) + +#define SET_REQ_RESULT(req, result_value) \ + do { \ + req->result = (result_value); \ + if (req->result == -1) { \ + req->sys_errno_ = _doserrno; \ + req->result = uv_translate_sys_error(req->sys_errno_); \ + } \ + } while (0) + +#define SET_REQ_WIN32_ERROR(req, sys_errno) \ + do { \ + req->sys_errno_ = (sys_errno); \ + req->result = uv_translate_sys_error(req->sys_errno_); \ + } while (0) + +#define SET_REQ_UV_ERROR(req, uv_errno, sys_errno) \ + do { \ + req->result = (uv_errno); \ + req->sys_errno_ = (sys_errno); \ + } while (0) + +#define VERIFY_FD(fd, req) \ + if (fd == -1) { \ + req->result = UV_EBADF; \ + req->sys_errno_ = ERROR_INVALID_HANDLE; \ + return; \ + } + +#define FILETIME_TO_UINT(filetime) \ + (*((uint64_t*) &(filetime)) - 116444736000000000ULL) + +#define FILETIME_TO_TIME_T(filetime) \ + (FILETIME_TO_UINT(filetime) / 10000000ULL) + +#define FILETIME_TO_TIME_NS(filetime, secs) \ + ((FILETIME_TO_UINT(filetime) - (secs * 10000000ULL)) * 100) + +#define FILETIME_TO_TIMESPEC(ts, filetime) \ + do { \ + (ts).tv_sec = (long) FILETIME_TO_TIME_T(filetime); \ + (ts).tv_nsec = (long) FILETIME_TO_TIME_NS(filetime, (ts).tv_sec); \ + } while(0) + +#define TIME_T_TO_FILETIME(time, filetime_ptr) \ + do { \ + uint64_t bigtime = ((uint64_t) ((time) * 10000000ULL)) + \ + 116444736000000000ULL; \ + (filetime_ptr)->dwLowDateTime = bigtime & 0xFFFFFFFF; \ + (filetime_ptr)->dwHighDateTime = bigtime >> 32; \ + } while(0) + +#define IS_SLASH(c) ((c) == L'\\' || (c) == L'/') +#define IS_LETTER(c) (((c) >= L'a' && (c) <= L'z') || \ + ((c) >= L'A' && (c) <= L'Z')) + +const WCHAR JUNCTION_PREFIX[] = L"\\??\\"; +const WCHAR JUNCTION_PREFIX_LEN = 4; + +const WCHAR LONG_PATH_PREFIX[] = L"\\\\?\\"; +const WCHAR LONG_PATH_PREFIX_LEN = 4; + +const WCHAR UNC_PATH_PREFIX[] = L"\\\\?\\UNC\\"; +const WCHAR UNC_PATH_PREFIX_LEN = 8; + + +void uv_fs_init() { + _fmode = _O_BINARY; +} + + +INLINE static int fs__capture_path(uv_fs_t* req, const char* path, + const char* new_path, const int copy_path) { + char* buf; + char* pos; + ssize_t buf_sz = 0, path_len = 0, pathw_len = 0, new_pathw_len = 0; + + /* new_path can only be set if path is also set. */ + assert(new_path == NULL || path != NULL); + + if (path != NULL) { + pathw_len = MultiByteToWideChar(CP_UTF8, + 0, + path, + -1, + NULL, + 0); + if (pathw_len == 0) { + return GetLastError(); + } + + buf_sz += pathw_len * sizeof(WCHAR); + } + + if (path != NULL && copy_path) { + path_len = 1 + strlen(path); + buf_sz += path_len; + } + + if (new_path != NULL) { + new_pathw_len = MultiByteToWideChar(CP_UTF8, + 0, + new_path, + -1, + NULL, + 0); + if (new_pathw_len == 0) { + return GetLastError(); + } + + buf_sz += new_pathw_len * sizeof(WCHAR); + } + + + if (buf_sz == 0) { + req->file.pathw = NULL; + req->fs.info.new_pathw = NULL; + req->path = NULL; + return 0; + } + + buf = (char*) uv__malloc(buf_sz); + if (buf == NULL) { + return ERROR_OUTOFMEMORY; + } + + pos = buf; + + if (path != NULL) { + DWORD r = MultiByteToWideChar(CP_UTF8, + 0, + path, + -1, + (WCHAR*) pos, + pathw_len); + assert(r == (DWORD) pathw_len); + req->file.pathw = (WCHAR*) pos; + pos += r * sizeof(WCHAR); + } else { + req->file.pathw = NULL; + } + + if (new_path != NULL) { + DWORD r = MultiByteToWideChar(CP_UTF8, + 0, + new_path, + -1, + (WCHAR*) pos, + new_pathw_len); + assert(r == (DWORD) new_pathw_len); + req->fs.info.new_pathw = (WCHAR*) pos; + pos += r * sizeof(WCHAR); + } else { + req->fs.info.new_pathw = NULL; + } + + req->path = path; + if (path != NULL && copy_path) { + memcpy(pos, path, path_len); + assert(path_len == buf_sz - (pos - buf)); + req->path = pos; + } + + req->flags |= UV_FS_FREE_PATHS; + + return 0; +} + + + +INLINE static void uv_fs_req_init(uv_loop_t* loop, uv_fs_t* req, + uv_fs_type fs_type, const uv_fs_cb cb) { + uv_req_init(loop, (uv_req_t*) req); + + req->type = UV_FS; + req->loop = loop; + req->flags = 0; + req->fs_type = fs_type; + req->result = 0; + req->ptr = NULL; + req->path = NULL; + req->cb = cb; + memset(&req->fs, 0, sizeof(req->fs)); +} + + +static int fs__wide_to_utf8(WCHAR* w_source_ptr, + DWORD w_source_len, + char** target_ptr, + uint64_t* target_len_ptr) { + int r; + int target_len; + char* target; + target_len = WideCharToMultiByte(CP_UTF8, + 0, + w_source_ptr, + w_source_len, + NULL, + 0, + NULL, + NULL); + + if (target_len == 0) { + return -1; + } + + if (target_len_ptr != NULL) { + *target_len_ptr = target_len; + } + + if (target_ptr == NULL) { + return 0; + } + + target = uv__malloc(target_len + 1); + if (target == NULL) { + SetLastError(ERROR_OUTOFMEMORY); + return -1; + } + + r = WideCharToMultiByte(CP_UTF8, + 0, + w_source_ptr, + w_source_len, + target, + target_len, + NULL, + NULL); + assert(r == target_len); + target[target_len] = '\0'; + *target_ptr = target; + return 0; +} + + +INLINE static int fs__readlink_handle(HANDLE handle, char** target_ptr, + uint64_t* target_len_ptr) { + char buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; + REPARSE_DATA_BUFFER* reparse_data = (REPARSE_DATA_BUFFER*) buffer; + WCHAR* w_target; + DWORD w_target_len; + DWORD bytes; + + if (!DeviceIoControl(handle, + FSCTL_GET_REPARSE_POINT, + NULL, + 0, + buffer, + sizeof buffer, + &bytes, + NULL)) { + return -1; + } + + if (reparse_data->ReparseTag == IO_REPARSE_TAG_SYMLINK) { + /* Real symlink */ + w_target = reparse_data->SymbolicLinkReparseBuffer.PathBuffer + + (reparse_data->SymbolicLinkReparseBuffer.SubstituteNameOffset / + sizeof(WCHAR)); + w_target_len = + reparse_data->SymbolicLinkReparseBuffer.SubstituteNameLength / + sizeof(WCHAR); + + /* Real symlinks can contain pretty much everything, but the only thing */ + /* we really care about is undoing the implicit conversion to an NT */ + /* namespaced path that CreateSymbolicLink will perform on absolute */ + /* paths. If the path is win32-namespaced then the user must have */ + /* explicitly made it so, and we better just return the unmodified */ + /* reparse data. */ + if (w_target_len >= 4 && + w_target[0] == L'\\' && + w_target[1] == L'?' && + w_target[2] == L'?' && + w_target[3] == L'\\') { + /* Starts with \??\ */ + if (w_target_len >= 6 && + ((w_target[4] >= L'A' && w_target[4] <= L'Z') || + (w_target[4] >= L'a' && w_target[4] <= L'z')) && + w_target[5] == L':' && + (w_target_len == 6 || w_target[6] == L'\\')) { + /* \??\:\ */ + w_target += 4; + w_target_len -= 4; + + } else if (w_target_len >= 8 && + (w_target[4] == L'U' || w_target[4] == L'u') && + (w_target[5] == L'N' || w_target[5] == L'n') && + (w_target[6] == L'C' || w_target[6] == L'c') && + w_target[7] == L'\\') { + /* \??\UNC\\\ - make sure the final path looks like */ + /* \\\\ */ + w_target += 6; + w_target[0] = L'\\'; + w_target_len -= 6; + } + } + + } else if (reparse_data->ReparseTag == IO_REPARSE_TAG_MOUNT_POINT) { + /* Junction. */ + w_target = reparse_data->MountPointReparseBuffer.PathBuffer + + (reparse_data->MountPointReparseBuffer.SubstituteNameOffset / + sizeof(WCHAR)); + w_target_len = reparse_data->MountPointReparseBuffer.SubstituteNameLength / + sizeof(WCHAR); + + /* Only treat junctions that look like \??\:\ as symlink. */ + /* Junctions can also be used as mount points, like \??\Volume{}, */ + /* but that's confusing for programs since they wouldn't be able to */ + /* actually understand such a path when returned by uv_readlink(). */ + /* UNC paths are never valid for junctions so we don't care about them. */ + if (!(w_target_len >= 6 && + w_target[0] == L'\\' && + w_target[1] == L'?' && + w_target[2] == L'?' && + w_target[3] == L'\\' && + ((w_target[4] >= L'A' && w_target[4] <= L'Z') || + (w_target[4] >= L'a' && w_target[4] <= L'z')) && + w_target[5] == L':' && + (w_target_len == 6 || w_target[6] == L'\\'))) { + SetLastError(ERROR_SYMLINK_NOT_SUPPORTED); + return -1; + } + + /* Remove leading \??\ */ + w_target += 4; + w_target_len -= 4; + + } else { + /* Reparse tag does not indicate a symlink. */ + SetLastError(ERROR_SYMLINK_NOT_SUPPORTED); + return -1; + } + + return fs__wide_to_utf8(w_target, w_target_len, target_ptr, target_len_ptr); +} + + +void fs__open(uv_fs_t* req) { + DWORD access; + DWORD share; + DWORD disposition; + DWORD attributes = 0; + HANDLE file; + int fd, current_umask; + int flags = req->fs.info.file_flags; + + /* Obtain the active umask. umask() never fails and returns the previous */ + /* umask. */ + current_umask = umask(0); + umask(current_umask); + + /* convert flags and mode to CreateFile parameters */ + switch (flags & (_O_RDONLY | _O_WRONLY | _O_RDWR)) { + case _O_RDONLY: + access = FILE_GENERIC_READ; + attributes |= FILE_FLAG_BACKUP_SEMANTICS; + break; + case _O_WRONLY: + access = FILE_GENERIC_WRITE; + break; + case _O_RDWR: + access = FILE_GENERIC_READ | FILE_GENERIC_WRITE; + break; + default: + goto einval; + } + + if (flags & _O_APPEND) { + access &= ~FILE_WRITE_DATA; + access |= FILE_APPEND_DATA; + attributes &= ~FILE_FLAG_BACKUP_SEMANTICS; + } + + /* + * Here is where we deviate significantly from what CRT's _open() + * does. We indiscriminately use all the sharing modes, to match + * UNIX semantics. In particular, this ensures that the file can + * be deleted even whilst it's open, fixing issue #1449. + */ + share = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE; + + switch (flags & (_O_CREAT | _O_EXCL | _O_TRUNC)) { + case 0: + case _O_EXCL: + disposition = OPEN_EXISTING; + break; + case _O_CREAT: + disposition = OPEN_ALWAYS; + break; + case _O_CREAT | _O_EXCL: + case _O_CREAT | _O_TRUNC | _O_EXCL: + disposition = CREATE_NEW; + break; + case _O_TRUNC: + case _O_TRUNC | _O_EXCL: + disposition = TRUNCATE_EXISTING; + break; + case _O_CREAT | _O_TRUNC: + disposition = CREATE_ALWAYS; + break; + default: + goto einval; + } + + attributes |= FILE_ATTRIBUTE_NORMAL; + if (flags & _O_CREAT) { + if (!((req->fs.info.mode & ~current_umask) & _S_IWRITE)) { + attributes |= FILE_ATTRIBUTE_READONLY; + } + } + + if (flags & _O_TEMPORARY ) { + attributes |= FILE_FLAG_DELETE_ON_CLOSE | FILE_ATTRIBUTE_TEMPORARY; + access |= DELETE; + } + + if (flags & _O_SHORT_LIVED) { + attributes |= FILE_ATTRIBUTE_TEMPORARY; + } + + switch (flags & (_O_SEQUENTIAL | _O_RANDOM)) { + case 0: + break; + case _O_SEQUENTIAL: + attributes |= FILE_FLAG_SEQUENTIAL_SCAN; + break; + case _O_RANDOM: + attributes |= FILE_FLAG_RANDOM_ACCESS; + break; + default: + goto einval; + } + + /* Setting this flag makes it possible to open a directory. */ + attributes |= FILE_FLAG_BACKUP_SEMANTICS; + + file = CreateFileW(req->file.pathw, + access, + share, + NULL, + disposition, + attributes, + NULL); + if (file == INVALID_HANDLE_VALUE) { + DWORD error = GetLastError(); + if (error == ERROR_FILE_EXISTS && (flags & _O_CREAT) && + !(flags & _O_EXCL)) { + /* Special case: when ERROR_FILE_EXISTS happens and O_CREAT was */ + /* specified, it means the path referred to a directory. */ + SET_REQ_UV_ERROR(req, UV_EISDIR, error); + } else { + SET_REQ_WIN32_ERROR(req, GetLastError()); + } + return; + } + + fd = _open_osfhandle((intptr_t) file, flags); + if (fd < 0) { + /* The only known failure mode for _open_osfhandle() is EMFILE, in which + * case GetLastError() will return zero. However we'll try to handle other + * errors as well, should they ever occur. + */ + if (errno == EMFILE) + SET_REQ_UV_ERROR(req, UV_EMFILE, ERROR_TOO_MANY_OPEN_FILES); + else if (GetLastError() != ERROR_SUCCESS) + SET_REQ_WIN32_ERROR(req, GetLastError()); + else + SET_REQ_WIN32_ERROR(req, UV_UNKNOWN); + CloseHandle(file); + return; + } + + SET_REQ_RESULT(req, fd); + return; + + einval: + SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER); +} + +void fs__close(uv_fs_t* req) { + int fd = req->file.fd; + int result; + + VERIFY_FD(fd, req); + + if (fd > 2) + result = _close(fd); + else + result = 0; + + /* _close doesn't set _doserrno on failure, but it does always set errno + * to EBADF on failure. + */ + if (result == -1) { + assert(errno == EBADF); + SET_REQ_UV_ERROR(req, UV_EBADF, ERROR_INVALID_HANDLE); + } else { + req->result = 0; + } +} + + +void fs__read(uv_fs_t* req) { + int fd = req->file.fd; + int64_t offset = req->fs.info.offset; + HANDLE handle; + OVERLAPPED overlapped, *overlapped_ptr; + LARGE_INTEGER offset_; + DWORD bytes; + DWORD error; + int result; + unsigned int index; + + VERIFY_FD(fd, req); + + handle = uv__get_osfhandle(fd); + + if (handle == INVALID_HANDLE_VALUE) { + SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE); + return; + } + + if (offset != -1) { + memset(&overlapped, 0, sizeof overlapped); + overlapped_ptr = &overlapped; + } else { + overlapped_ptr = NULL; + } + + index = 0; + bytes = 0; + do { + DWORD incremental_bytes; + + if (offset != -1) { + offset_.QuadPart = offset + bytes; + overlapped.Offset = offset_.LowPart; + overlapped.OffsetHigh = offset_.HighPart; + } + + result = ReadFile(handle, + req->fs.info.bufs[index].base, + req->fs.info.bufs[index].len, + &incremental_bytes, + overlapped_ptr); + bytes += incremental_bytes; + ++index; + } while (result && index < req->fs.info.nbufs); + + if (result || bytes > 0) { + SET_REQ_RESULT(req, bytes); + } else { + error = GetLastError(); + if (error == ERROR_HANDLE_EOF) { + SET_REQ_RESULT(req, bytes); + } else { + SET_REQ_WIN32_ERROR(req, error); + } + } +} + + +void fs__write(uv_fs_t* req) { + int fd = req->file.fd; + int64_t offset = req->fs.info.offset; + HANDLE handle; + OVERLAPPED overlapped, *overlapped_ptr; + LARGE_INTEGER offset_; + DWORD bytes; + int result; + unsigned int index; + + VERIFY_FD(fd, req); + + handle = uv__get_osfhandle(fd); + if (handle == INVALID_HANDLE_VALUE) { + SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE); + return; + } + + if (offset != -1) { + memset(&overlapped, 0, sizeof overlapped); + overlapped_ptr = &overlapped; + } else { + overlapped_ptr = NULL; + } + + index = 0; + bytes = 0; + do { + DWORD incremental_bytes; + + if (offset != -1) { + offset_.QuadPart = offset + bytes; + overlapped.Offset = offset_.LowPart; + overlapped.OffsetHigh = offset_.HighPart; + } + + result = WriteFile(handle, + req->fs.info.bufs[index].base, + req->fs.info.bufs[index].len, + &incremental_bytes, + overlapped_ptr); + bytes += incremental_bytes; + ++index; + } while (result && index < req->fs.info.nbufs); + + if (result || bytes > 0) { + SET_REQ_RESULT(req, bytes); + } else { + SET_REQ_WIN32_ERROR(req, GetLastError()); + } +} + + +void fs__rmdir(uv_fs_t* req) { + int result = _wrmdir(req->file.pathw); + SET_REQ_RESULT(req, result); +} + + +void fs__unlink(uv_fs_t* req) { + const WCHAR* pathw = req->file.pathw; + HANDLE handle; + BY_HANDLE_FILE_INFORMATION info; + FILE_DISPOSITION_INFORMATION disposition; + IO_STATUS_BLOCK iosb; + NTSTATUS status; + + handle = CreateFileW(pathw, + FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | DELETE, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, + OPEN_EXISTING, + FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, + NULL); + + if (handle == INVALID_HANDLE_VALUE) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + if (!GetFileInformationByHandle(handle, &info)) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + CloseHandle(handle); + return; + } + + if (info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { + /* Do not allow deletion of directories, unless it is a symlink. When */ + /* the path refers to a non-symlink directory, report EPERM as mandated */ + /* by POSIX.1. */ + + /* Check if it is a reparse point. If it's not, it's a normal directory. */ + if (!(info.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT)) { + SET_REQ_WIN32_ERROR(req, ERROR_ACCESS_DENIED); + CloseHandle(handle); + return; + } + + /* Read the reparse point and check if it is a valid symlink. */ + /* If not, don't unlink. */ + if (fs__readlink_handle(handle, NULL, NULL) < 0) { + DWORD error = GetLastError(); + if (error == ERROR_SYMLINK_NOT_SUPPORTED) + error = ERROR_ACCESS_DENIED; + SET_REQ_WIN32_ERROR(req, error); + CloseHandle(handle); + return; + } + } + + if (info.dwFileAttributes & FILE_ATTRIBUTE_READONLY) { + /* Remove read-only attribute */ + FILE_BASIC_INFORMATION basic = { 0 }; + + basic.FileAttributes = info.dwFileAttributes & ~(FILE_ATTRIBUTE_READONLY); + + status = pNtSetInformationFile(handle, + &iosb, + &basic, + sizeof basic, + FileBasicInformation); + if (!NT_SUCCESS(status)) { + SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status)); + CloseHandle(handle); + return; + } + } + + /* Try to set the delete flag. */ + disposition.DeleteFile = TRUE; + status = pNtSetInformationFile(handle, + &iosb, + &disposition, + sizeof disposition, + FileDispositionInformation); + if (NT_SUCCESS(status)) { + SET_REQ_SUCCESS(req); + } else { + SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status)); + } + + CloseHandle(handle); +} + + +void fs__mkdir(uv_fs_t* req) { + /* TODO: use req->mode. */ + int result = _wmkdir(req->file.pathw); + SET_REQ_RESULT(req, result); +} + + +/* OpenBSD original: lib/libc/stdio/mktemp.c */ +void fs__mkdtemp(uv_fs_t* req) { + static const WCHAR *tempchars = + L"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"; + static const size_t num_chars = 62; + static const size_t num_x = 6; + WCHAR *cp, *ep; + unsigned int tries, i; + size_t len; + HCRYPTPROV h_crypt_prov; + uint64_t v; + BOOL released; + + len = wcslen(req->file.pathw); + ep = req->file.pathw + len; + if (len < num_x || wcsncmp(ep - num_x, L"XXXXXX", num_x)) { + SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_INVALID_PARAMETER); + return; + } + + if (!CryptAcquireContext(&h_crypt_prov, NULL, NULL, PROV_RSA_FULL, + CRYPT_VERIFYCONTEXT)) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + tries = TMP_MAX; + do { + if (!CryptGenRandom(h_crypt_prov, sizeof(v), (BYTE*) &v)) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + break; + } + + cp = ep - num_x; + for (i = 0; i < num_x; i++) { + *cp++ = tempchars[v % num_chars]; + v /= num_chars; + } + + if (_wmkdir(req->file.pathw) == 0) { + len = strlen(req->path); + wcstombs((char*) req->path + len - num_x, ep - num_x, num_x); + SET_REQ_RESULT(req, 0); + break; + } else if (errno != EEXIST) { + SET_REQ_RESULT(req, -1); + break; + } + } while (--tries); + + released = CryptReleaseContext(h_crypt_prov, 0); + assert(released); + if (tries == 0) { + SET_REQ_RESULT(req, -1); + } +} + + +void fs__scandir(uv_fs_t* req) { + static const size_t dirents_initial_size = 32; + + HANDLE dir_handle = INVALID_HANDLE_VALUE; + + uv__dirent_t** dirents = NULL; + size_t dirents_size = 0; + size_t dirents_used = 0; + + IO_STATUS_BLOCK iosb; + NTSTATUS status; + + /* Buffer to hold directory entries returned by NtQueryDirectoryFile. + * It's important that this buffer can hold at least one entry, regardless + * of the length of the file names present in the enumerated directory. + * A file name is at most 256 WCHARs long. + * According to MSDN, the buffer must be aligned at an 8-byte boundary. + */ +#if _MSC_VER + __declspec(align(8)) char buffer[8192]; +#else + __attribute__ ((aligned (8))) char buffer[8192]; +#endif + + STATIC_ASSERT(sizeof buffer >= + sizeof(FILE_DIRECTORY_INFORMATION) + 256 * sizeof(WCHAR)); + + /* Open the directory. */ + dir_handle = + CreateFileW(req->file.pathw, + FILE_LIST_DIRECTORY | SYNCHRONIZE, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, + OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS, + NULL); + if (dir_handle == INVALID_HANDLE_VALUE) + goto win32_error; + + /* Read the first chunk. */ + status = pNtQueryDirectoryFile(dir_handle, + NULL, + NULL, + NULL, + &iosb, + &buffer, + sizeof buffer, + FileDirectoryInformation, + FALSE, + NULL, + TRUE); + + /* If the handle is not a directory, we'll get STATUS_INVALID_PARAMETER. + * This should be reported back as UV_ENOTDIR. + */ + if (status == STATUS_INVALID_PARAMETER) + goto not_a_directory_error; + + while (NT_SUCCESS(status)) { + char* position = buffer; + size_t next_entry_offset = 0; + + do { + FILE_DIRECTORY_INFORMATION* info; + uv__dirent_t* dirent; + + size_t wchar_len; + size_t utf8_len; + + /* Obtain a pointer to the current directory entry. */ + position += next_entry_offset; + info = (FILE_DIRECTORY_INFORMATION*) position; + + /* Fetch the offset to the next directory entry. */ + next_entry_offset = info->NextEntryOffset; + + /* Compute the length of the filename in WCHARs. */ + wchar_len = info->FileNameLength / sizeof info->FileName[0]; + + /* Skip over '.' and '..' entries. It has been reported that + * the SharePoint driver includes the terminating zero byte in + * the filename length. Strip those first. + */ + while (wchar_len > 0 && info->FileName[wchar_len - 1] == L'\0') + wchar_len -= 1; + + if (wchar_len == 0) + continue; + if (wchar_len == 1 && info->FileName[0] == L'.') + continue; + if (wchar_len == 2 && info->FileName[0] == L'.' && + info->FileName[1] == L'.') + continue; + + /* Compute the space required to store the filename as UTF-8. */ + utf8_len = WideCharToMultiByte( + CP_UTF8, 0, &info->FileName[0], wchar_len, NULL, 0, NULL, NULL); + if (utf8_len == 0) + goto win32_error; + + /* Resize the dirent array if needed. */ + if (dirents_used >= dirents_size) { + size_t new_dirents_size = + dirents_size == 0 ? dirents_initial_size : dirents_size << 1; + uv__dirent_t** new_dirents = + uv__realloc(dirents, new_dirents_size * sizeof *dirents); + + if (new_dirents == NULL) + goto out_of_memory_error; + + dirents_size = new_dirents_size; + dirents = new_dirents; + } + + /* Allocate space for the uv dirent structure. The dirent structure + * includes room for the first character of the filename, but `utf8_len` + * doesn't count the NULL terminator at this point. + */ + dirent = uv__malloc(sizeof *dirent + utf8_len); + if (dirent == NULL) + goto out_of_memory_error; + + dirents[dirents_used++] = dirent; + + /* Convert file name to UTF-8. */ + if (WideCharToMultiByte(CP_UTF8, + 0, + &info->FileName[0], + wchar_len, + &dirent->d_name[0], + utf8_len, + NULL, + NULL) == 0) + goto win32_error; + + /* Add a null terminator to the filename. */ + dirent->d_name[utf8_len] = '\0'; + + /* Fill out the type field. */ + if (info->FileAttributes & FILE_ATTRIBUTE_DEVICE) + dirent->d_type = UV__DT_CHAR; + else if (info->FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) + dirent->d_type = UV__DT_LINK; + else if (info->FileAttributes & FILE_ATTRIBUTE_DIRECTORY) + dirent->d_type = UV__DT_DIR; + else + dirent->d_type = UV__DT_FILE; + } while (next_entry_offset != 0); + + /* Read the next chunk. */ + status = pNtQueryDirectoryFile(dir_handle, + NULL, + NULL, + NULL, + &iosb, + &buffer, + sizeof buffer, + FileDirectoryInformation, + FALSE, + NULL, + FALSE); + + /* After the first pNtQueryDirectoryFile call, the function may return + * STATUS_SUCCESS even if the buffer was too small to hold at least one + * directory entry. + */ + if (status == STATUS_SUCCESS && iosb.Information == 0) + status = STATUS_BUFFER_OVERFLOW; + } + + if (status != STATUS_NO_MORE_FILES) + goto nt_error; + + CloseHandle(dir_handle); + + /* Store the result in the request object. */ + req->ptr = dirents; + if (dirents != NULL) + req->flags |= UV_FS_FREE_PTR; + + SET_REQ_RESULT(req, dirents_used); + + /* `nbufs` will be used as index by uv_fs_scandir_next. */ + req->fs.info.nbufs = 0; + + return; + +nt_error: + SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status)); + goto cleanup; + +win32_error: + SET_REQ_WIN32_ERROR(req, GetLastError()); + goto cleanup; + +not_a_directory_error: + SET_REQ_UV_ERROR(req, UV_ENOTDIR, ERROR_DIRECTORY); + goto cleanup; + +out_of_memory_error: + SET_REQ_UV_ERROR(req, UV_ENOMEM, ERROR_OUTOFMEMORY); + goto cleanup; + +cleanup: + if (dir_handle != INVALID_HANDLE_VALUE) + CloseHandle(dir_handle); + while (dirents_used > 0) + uv__free(dirents[--dirents_used]); + if (dirents != NULL) + uv__free(dirents); +} + + +INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf) { + FILE_ALL_INFORMATION file_info; + FILE_FS_VOLUME_INFORMATION volume_info; + NTSTATUS nt_status; + IO_STATUS_BLOCK io_status; + + nt_status = pNtQueryInformationFile(handle, + &io_status, + &file_info, + sizeof file_info, + FileAllInformation); + + /* Buffer overflow (a warning status code) is expected here. */ + if (NT_ERROR(nt_status)) { + SetLastError(pRtlNtStatusToDosError(nt_status)); + return -1; + } + + nt_status = pNtQueryVolumeInformationFile(handle, + &io_status, + &volume_info, + sizeof volume_info, + FileFsVolumeInformation); + + /* Buffer overflow (a warning status code) is expected here. */ + if (io_status.Status == STATUS_NOT_IMPLEMENTED) { + statbuf->st_dev = 0; + } else if (NT_ERROR(nt_status)) { + SetLastError(pRtlNtStatusToDosError(nt_status)); + return -1; + } else { + statbuf->st_dev = volume_info.VolumeSerialNumber; + } + + /* Todo: st_mode should probably always be 0666 for everyone. We might also + * want to report 0777 if the file is a .exe or a directory. + * + * Currently it's based on whether the 'readonly' attribute is set, which + * makes little sense because the semantics are so different: the 'read-only' + * flag is just a way for a user to protect against accidental deletion, and + * serves no security purpose. Windows uses ACLs for that. + * + * Also people now use uv_fs_chmod() to take away the writable bit for good + * reasons. Windows however just makes the file read-only, which makes it + * impossible to delete the file afterwards, since read-only files can't be + * deleted. + * + * IOW it's all just a clusterfuck and we should think of something that + * makes slightly more sense. + * + * And uv_fs_chmod should probably just fail on windows or be a total no-op. + * There's nothing sensible it can do anyway. + */ + statbuf->st_mode = 0; + + if (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) { + /* + * It is possible for a file to have FILE_ATTRIBUTE_REPARSE_POINT but not have + * any link data. In that case DeviceIoControl() in fs__readlink_handle() sets + * the last error to ERROR_NOT_A_REPARSE_POINT. Then the stat result mode + * calculated below will indicate a normal directory or file, as if + * FILE_ATTRIBUTE_REPARSE_POINT was not present. + */ + if (fs__readlink_handle(handle, NULL, &statbuf->st_size) == 0) { + statbuf->st_mode |= S_IFLNK; + } else if (GetLastError() != ERROR_NOT_A_REPARSE_POINT) { + return -1; + } + } + + if (statbuf->st_mode == 0) { + if (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_DIRECTORY) { + statbuf->st_mode |= _S_IFDIR; + statbuf->st_size = 0; + } else { + statbuf->st_mode |= _S_IFREG; + statbuf->st_size = file_info.StandardInformation.EndOfFile.QuadPart; + } + } + + if (file_info.BasicInformation.FileAttributes & FILE_ATTRIBUTE_READONLY) + statbuf->st_mode |= _S_IREAD | (_S_IREAD >> 3) | (_S_IREAD >> 6); + else + statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) | + ((_S_IREAD | _S_IWRITE) >> 6); + + FILETIME_TO_TIMESPEC(statbuf->st_atim, file_info.BasicInformation.LastAccessTime); + FILETIME_TO_TIMESPEC(statbuf->st_ctim, file_info.BasicInformation.ChangeTime); + FILETIME_TO_TIMESPEC(statbuf->st_mtim, file_info.BasicInformation.LastWriteTime); + FILETIME_TO_TIMESPEC(statbuf->st_birthtim, file_info.BasicInformation.CreationTime); + + statbuf->st_ino = file_info.InternalInformation.IndexNumber.QuadPart; + + /* st_blocks contains the on-disk allocation size in 512-byte units. */ + statbuf->st_blocks = + file_info.StandardInformation.AllocationSize.QuadPart >> 9ULL; + + statbuf->st_nlink = file_info.StandardInformation.NumberOfLinks; + + /* The st_blksize is supposed to be the 'optimal' number of bytes for reading + * and writing to the disk. That is, for any definition of 'optimal' - it's + * supposed to at least avoid read-update-write behavior when writing to the + * disk. + * + * However nobody knows this and even fewer people actually use this value, + * and in order to fill it out we'd have to make another syscall to query the + * volume for FILE_FS_SECTOR_SIZE_INFORMATION. + * + * Therefore we'll just report a sensible value that's quite commonly okay + * on modern hardware. + */ + statbuf->st_blksize = 2048; + + /* Todo: set st_flags to something meaningful. Also provide a wrapper for + * chattr(2). + */ + statbuf->st_flags = 0; + + /* Windows has nothing sensible to say about these values, so they'll just + * remain empty. + */ + statbuf->st_gid = 0; + statbuf->st_uid = 0; + statbuf->st_rdev = 0; + statbuf->st_gen = 0; + + return 0; +} + + +INLINE static void fs__stat_prepare_path(WCHAR* pathw) { + size_t len = wcslen(pathw); + + /* TODO: ignore namespaced paths. */ + if (len > 1 && pathw[len - 2] != L':' && + (pathw[len - 1] == L'\\' || pathw[len - 1] == L'/')) { + pathw[len - 1] = '\0'; + } +} + + +INLINE static void fs__stat_impl(uv_fs_t* req, int do_lstat) { + HANDLE handle; + DWORD flags; + + flags = FILE_FLAG_BACKUP_SEMANTICS; + if (do_lstat) { + flags |= FILE_FLAG_OPEN_REPARSE_POINT; + } + + handle = CreateFileW(req->file.pathw, + FILE_READ_ATTRIBUTES, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, + OPEN_EXISTING, + flags, + NULL); + if (handle == INVALID_HANDLE_VALUE) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + if (fs__stat_handle(handle, &req->statbuf) != 0) { + DWORD error = GetLastError(); + if (do_lstat && error == ERROR_SYMLINK_NOT_SUPPORTED) { + /* We opened a reparse point but it was not a symlink. Try again. */ + fs__stat_impl(req, 0); + + } else { + /* Stat failed. */ + SET_REQ_WIN32_ERROR(req, GetLastError()); + } + + CloseHandle(handle); + return; + } + + req->ptr = &req->statbuf; + req->result = 0; + CloseHandle(handle); +} + + +static void fs__stat(uv_fs_t* req) { + fs__stat_prepare_path(req->file.pathw); + fs__stat_impl(req, 0); +} + + +static void fs__lstat(uv_fs_t* req) { + fs__stat_prepare_path(req->file.pathw); + fs__stat_impl(req, 1); +} + + +static void fs__fstat(uv_fs_t* req) { + int fd = req->file.fd; + HANDLE handle; + + VERIFY_FD(fd, req); + + handle = uv__get_osfhandle(fd); + + if (handle == INVALID_HANDLE_VALUE) { + SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE); + return; + } + + if (fs__stat_handle(handle, &req->statbuf) != 0) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + req->ptr = &req->statbuf; + req->result = 0; +} + + +static void fs__rename(uv_fs_t* req) { + if (!MoveFileExW(req->file.pathw, req->fs.info.new_pathw, MOVEFILE_REPLACE_EXISTING)) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + SET_REQ_RESULT(req, 0); +} + + +INLINE static void fs__sync_impl(uv_fs_t* req) { + int fd = req->file.fd; + int result; + + VERIFY_FD(fd, req); + + result = FlushFileBuffers(uv__get_osfhandle(fd)) ? 0 : -1; + if (result == -1) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + } else { + SET_REQ_RESULT(req, result); + } +} + + +static void fs__fsync(uv_fs_t* req) { + fs__sync_impl(req); +} + + +static void fs__fdatasync(uv_fs_t* req) { + fs__sync_impl(req); +} + + +static void fs__ftruncate(uv_fs_t* req) { + int fd = req->file.fd; + HANDLE handle; + NTSTATUS status; + IO_STATUS_BLOCK io_status; + FILE_END_OF_FILE_INFORMATION eof_info; + + VERIFY_FD(fd, req); + + handle = uv__get_osfhandle(fd); + + eof_info.EndOfFile.QuadPart = req->fs.info.offset; + + status = pNtSetInformationFile(handle, + &io_status, + &eof_info, + sizeof eof_info, + FileEndOfFileInformation); + + if (NT_SUCCESS(status)) { + SET_REQ_RESULT(req, 0); + } else { + SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(status)); + } +} + + +static void fs__sendfile(uv_fs_t* req) { + int fd_in = req->file.fd, fd_out = req->fs.info.fd_out; + size_t length = req->fs.info.bufsml[0].len; + int64_t offset = req->fs.info.offset; + const size_t max_buf_size = 65536; + size_t buf_size = length < max_buf_size ? length : max_buf_size; + int n, result = 0; + int64_t result_offset = 0; + char* buf = (char*) uv__malloc(buf_size); + if (!buf) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + if (offset != -1) { + result_offset = _lseeki64(fd_in, offset, SEEK_SET); + } + + if (result_offset == -1) { + result = -1; + } else { + while (length > 0) { + n = _read(fd_in, buf, length < buf_size ? length : buf_size); + if (n == 0) { + break; + } else if (n == -1) { + result = -1; + break; + } + + length -= n; + + n = _write(fd_out, buf, n); + if (n == -1) { + result = -1; + break; + } + + result += n; + } + } + + uv__free(buf); + + SET_REQ_RESULT(req, result); +} + + +static void fs__access(uv_fs_t* req) { + DWORD attr = GetFileAttributesW(req->file.pathw); + + if (attr == INVALID_FILE_ATTRIBUTES) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + /* + * Access is possible if + * - write access wasn't requested, + * - or the file isn't read-only, + * - or it's a directory. + * (Directories cannot be read-only on Windows.) + */ + if (!(req->flags & W_OK) || + !(attr & FILE_ATTRIBUTE_READONLY) || + (attr & FILE_ATTRIBUTE_DIRECTORY)) { + SET_REQ_RESULT(req, 0); + } else { + SET_REQ_WIN32_ERROR(req, UV_EPERM); + } + +} + + +static void fs__chmod(uv_fs_t* req) { + int result = _wchmod(req->file.pathw, req->fs.info.mode); + SET_REQ_RESULT(req, result); +} + + +static void fs__fchmod(uv_fs_t* req) { + int fd = req->file.fd; + HANDLE handle; + NTSTATUS nt_status; + IO_STATUS_BLOCK io_status; + FILE_BASIC_INFORMATION file_info; + + VERIFY_FD(fd, req); + + handle = uv__get_osfhandle(fd); + + nt_status = pNtQueryInformationFile(handle, + &io_status, + &file_info, + sizeof file_info, + FileBasicInformation); + + if (!NT_SUCCESS(nt_status)) { + SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status)); + return; + } + + if (req->fs.info.mode & _S_IWRITE) { + file_info.FileAttributes &= ~FILE_ATTRIBUTE_READONLY; + } else { + file_info.FileAttributes |= FILE_ATTRIBUTE_READONLY; + } + + nt_status = pNtSetInformationFile(handle, + &io_status, + &file_info, + sizeof file_info, + FileBasicInformation); + + if (!NT_SUCCESS(nt_status)) { + SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status)); + return; + } + + SET_REQ_SUCCESS(req); +} + + +INLINE static int fs__utime_handle(HANDLE handle, double atime, double mtime) { + FILETIME filetime_a, filetime_m; + + TIME_T_TO_FILETIME(atime, &filetime_a); + TIME_T_TO_FILETIME(mtime, &filetime_m); + + if (!SetFileTime(handle, NULL, &filetime_a, &filetime_m)) { + return -1; + } + + return 0; +} + + +static void fs__utime(uv_fs_t* req) { + HANDLE handle; + + handle = CreateFileW(req->file.pathw, + FILE_WRITE_ATTRIBUTES, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, + OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS, + NULL); + + if (handle == INVALID_HANDLE_VALUE) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + if (fs__utime_handle(handle, req->fs.time.atime, req->fs.time.mtime) != 0) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + CloseHandle(handle); + return; + } + + CloseHandle(handle); + + req->result = 0; +} + + +static void fs__futime(uv_fs_t* req) { + int fd = req->file.fd; + HANDLE handle; + VERIFY_FD(fd, req); + + handle = uv__get_osfhandle(fd); + + if (handle == INVALID_HANDLE_VALUE) { + SET_REQ_WIN32_ERROR(req, ERROR_INVALID_HANDLE); + return; + } + + if (fs__utime_handle(handle, req->fs.time.atime, req->fs.time.mtime) != 0) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + req->result = 0; +} + + +static void fs__link(uv_fs_t* req) { + DWORD r = CreateHardLinkW(req->fs.info.new_pathw, req->file.pathw, NULL); + if (r == 0) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + } else { + req->result = 0; + } +} + + +static void fs__create_junction(uv_fs_t* req, const WCHAR* path, + const WCHAR* new_path) { + HANDLE handle = INVALID_HANDLE_VALUE; + REPARSE_DATA_BUFFER *buffer = NULL; + int created = 0; + int target_len; + int is_absolute, is_long_path; + int needed_buf_size, used_buf_size, used_data_size, path_buf_len; + int start, len, i; + int add_slash; + DWORD bytes; + WCHAR* path_buf; + + target_len = wcslen(path); + is_long_path = wcsncmp(path, LONG_PATH_PREFIX, LONG_PATH_PREFIX_LEN) == 0; + + if (is_long_path) { + is_absolute = 1; + } else { + is_absolute = target_len >= 3 && IS_LETTER(path[0]) && + path[1] == L':' && IS_SLASH(path[2]); + } + + if (!is_absolute) { + /* Not supporting relative paths */ + SET_REQ_UV_ERROR(req, UV_EINVAL, ERROR_NOT_SUPPORTED); + return; + } + + /* Do a pessimistic calculation of the required buffer size */ + needed_buf_size = + FIELD_OFFSET(REPARSE_DATA_BUFFER, MountPointReparseBuffer.PathBuffer) + + JUNCTION_PREFIX_LEN * sizeof(WCHAR) + + 2 * (target_len + 2) * sizeof(WCHAR); + + /* Allocate the buffer */ + buffer = (REPARSE_DATA_BUFFER*)uv__malloc(needed_buf_size); + if (!buffer) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + /* Grab a pointer to the part of the buffer where filenames go */ + path_buf = (WCHAR*)&(buffer->MountPointReparseBuffer.PathBuffer); + path_buf_len = 0; + + /* Copy the substitute (internal) target path */ + start = path_buf_len; + + wcsncpy((WCHAR*)&path_buf[path_buf_len], JUNCTION_PREFIX, + JUNCTION_PREFIX_LEN); + path_buf_len += JUNCTION_PREFIX_LEN; + + add_slash = 0; + for (i = is_long_path ? LONG_PATH_PREFIX_LEN : 0; path[i] != L'\0'; i++) { + if (IS_SLASH(path[i])) { + add_slash = 1; + continue; + } + + if (add_slash) { + path_buf[path_buf_len++] = L'\\'; + add_slash = 0; + } + + path_buf[path_buf_len++] = path[i]; + } + path_buf[path_buf_len++] = L'\\'; + len = path_buf_len - start; + + /* Set the info about the substitute name */ + buffer->MountPointReparseBuffer.SubstituteNameOffset = start * sizeof(WCHAR); + buffer->MountPointReparseBuffer.SubstituteNameLength = len * sizeof(WCHAR); + + /* Insert null terminator */ + path_buf[path_buf_len++] = L'\0'; + + /* Copy the print name of the target path */ + start = path_buf_len; + add_slash = 0; + for (i = is_long_path ? LONG_PATH_PREFIX_LEN : 0; path[i] != L'\0'; i++) { + if (IS_SLASH(path[i])) { + add_slash = 1; + continue; + } + + if (add_slash) { + path_buf[path_buf_len++] = L'\\'; + add_slash = 0; + } + + path_buf[path_buf_len++] = path[i]; + } + len = path_buf_len - start; + if (len == 2) { + path_buf[path_buf_len++] = L'\\'; + len++; + } + + /* Set the info about the print name */ + buffer->MountPointReparseBuffer.PrintNameOffset = start * sizeof(WCHAR); + buffer->MountPointReparseBuffer.PrintNameLength = len * sizeof(WCHAR); + + /* Insert another null terminator */ + path_buf[path_buf_len++] = L'\0'; + + /* Calculate how much buffer space was actually used */ + used_buf_size = FIELD_OFFSET(REPARSE_DATA_BUFFER, MountPointReparseBuffer.PathBuffer) + + path_buf_len * sizeof(WCHAR); + used_data_size = used_buf_size - + FIELD_OFFSET(REPARSE_DATA_BUFFER, MountPointReparseBuffer); + + /* Put general info in the data buffer */ + buffer->ReparseTag = IO_REPARSE_TAG_MOUNT_POINT; + buffer->ReparseDataLength = used_data_size; + buffer->Reserved = 0; + + /* Create a new directory */ + if (!CreateDirectoryW(new_path, NULL)) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + goto error; + } + created = 1; + + /* Open the directory */ + handle = CreateFileW(new_path, + GENERIC_WRITE, + 0, + NULL, + OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS | + FILE_FLAG_OPEN_REPARSE_POINT, + NULL); + if (handle == INVALID_HANDLE_VALUE) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + goto error; + } + + /* Create the actual reparse point */ + if (!DeviceIoControl(handle, + FSCTL_SET_REPARSE_POINT, + buffer, + used_buf_size, + NULL, + 0, + &bytes, + NULL)) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + goto error; + } + + /* Clean up */ + CloseHandle(handle); + uv__free(buffer); + + SET_REQ_RESULT(req, 0); + return; + +error: + uv__free(buffer); + + if (handle != INVALID_HANDLE_VALUE) { + CloseHandle(handle); + } + + if (created) { + RemoveDirectoryW(new_path); + } +} + + +static void fs__symlink(uv_fs_t* req) { + WCHAR* pathw = req->file.pathw; + WCHAR* new_pathw = req->fs.info.new_pathw; + int flags = req->fs.info.file_flags; + int result; + + + if (flags & UV_FS_SYMLINK_JUNCTION) { + fs__create_junction(req, pathw, new_pathw); + } else if (pCreateSymbolicLinkW) { + result = pCreateSymbolicLinkW(new_pathw, + pathw, + flags & UV_FS_SYMLINK_DIR ? SYMBOLIC_LINK_FLAG_DIRECTORY : 0) ? 0 : -1; + if (result == -1) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + } else { + SET_REQ_RESULT(req, result); + } + } else { + SET_REQ_UV_ERROR(req, UV_ENOSYS, ERROR_NOT_SUPPORTED); + } +} + + +static void fs__readlink(uv_fs_t* req) { + HANDLE handle; + + handle = CreateFileW(req->file.pathw, + 0, + 0, + NULL, + OPEN_EXISTING, + FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, + NULL); + + if (handle == INVALID_HANDLE_VALUE) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + CloseHandle(handle); + return; + } + + req->flags |= UV_FS_FREE_PTR; + SET_REQ_RESULT(req, 0); + + CloseHandle(handle); +} + + +static size_t fs__realpath_handle(HANDLE handle, char** realpath_ptr) { + int r; + DWORD w_realpath_len; + WCHAR* w_realpath_ptr = NULL; + WCHAR* w_realpath_buf; + + w_realpath_len = pGetFinalPathNameByHandleW(handle, NULL, 0, VOLUME_NAME_DOS); + if (w_realpath_len == 0) { + return -1; + } + + w_realpath_buf = uv__malloc((w_realpath_len + 1) * sizeof(WCHAR)); + if (w_realpath_buf == NULL) { + SetLastError(ERROR_OUTOFMEMORY); + return -1; + } + w_realpath_ptr = w_realpath_buf; + + if (pGetFinalPathNameByHandleW(handle, + w_realpath_ptr, + w_realpath_len, + VOLUME_NAME_DOS) == 0) { + uv__free(w_realpath_buf); + SetLastError(ERROR_INVALID_HANDLE); + return -1; + } + + /* convert UNC path to long path */ + if (wcsncmp(w_realpath_ptr, + UNC_PATH_PREFIX, + UNC_PATH_PREFIX_LEN) == 0) { + w_realpath_ptr += 6; + *w_realpath_ptr = L'\\'; + w_realpath_len -= 6; + } else if (wcsncmp(w_realpath_ptr, + LONG_PATH_PREFIX, + LONG_PATH_PREFIX_LEN) == 0) { + w_realpath_ptr += 4; + w_realpath_len -= 4; + } else { + uv__free(w_realpath_buf); + SetLastError(ERROR_INVALID_HANDLE); + return -1; + } + + r = fs__wide_to_utf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL); + uv__free(w_realpath_buf); + return r; +} + +static void fs__realpath(uv_fs_t* req) { + HANDLE handle; + + if (!pGetFinalPathNameByHandleW) { + SET_REQ_UV_ERROR(req, UV_ENOSYS, ERROR_NOT_SUPPORTED); + return; + } + + handle = CreateFileW(req->file.pathw, + 0, + 0, + NULL, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL | FILE_FLAG_BACKUP_SEMANTICS, + NULL); + if (handle == INVALID_HANDLE_VALUE) { + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + if (fs__realpath_handle(handle, (char**) &req->ptr) == -1) { + CloseHandle(handle); + SET_REQ_WIN32_ERROR(req, GetLastError()); + return; + } + + CloseHandle(handle); + req->flags |= UV_FS_FREE_PTR; + SET_REQ_RESULT(req, 0); +} + + +static void fs__chown(uv_fs_t* req) { + req->result = 0; +} + + +static void fs__fchown(uv_fs_t* req) { + req->result = 0; +} + + +static void uv__fs_work(struct uv__work* w) { + uv_fs_t* req; + + req = container_of(w, uv_fs_t, work_req); + assert(req->type == UV_FS); + +#define XX(uc, lc) case UV_FS_##uc: fs__##lc(req); break; + switch (req->fs_type) { + XX(OPEN, open) + XX(CLOSE, close) + XX(READ, read) + XX(WRITE, write) + XX(SENDFILE, sendfile) + XX(STAT, stat) + XX(LSTAT, lstat) + XX(FSTAT, fstat) + XX(FTRUNCATE, ftruncate) + XX(UTIME, utime) + XX(FUTIME, futime) + XX(ACCESS, access) + XX(CHMOD, chmod) + XX(FCHMOD, fchmod) + XX(FSYNC, fsync) + XX(FDATASYNC, fdatasync) + XX(UNLINK, unlink) + XX(RMDIR, rmdir) + XX(MKDIR, mkdir) + XX(MKDTEMP, mkdtemp) + XX(RENAME, rename) + XX(SCANDIR, scandir) + XX(LINK, link) + XX(SYMLINK, symlink) + XX(READLINK, readlink) + XX(REALPATH, realpath) + XX(CHOWN, chown) + XX(FCHOWN, fchown); + default: + assert(!"bad uv_fs_type"); + } +} + + +static void uv__fs_done(struct uv__work* w, int status) { + uv_fs_t* req; + + req = container_of(w, uv_fs_t, work_req); + uv__req_unregister(req->loop, req); + + if (status == UV_ECANCELED) { + assert(req->result == 0); + req->result = UV_ECANCELED; + } + + req->cb(req); +} + + +void uv_fs_req_cleanup(uv_fs_t* req) { + if (req->flags & UV_FS_CLEANEDUP) + return; + + if (req->flags & UV_FS_FREE_PATHS) + uv__free(req->file.pathw); + + if (req->flags & UV_FS_FREE_PTR) { + if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL) + uv__fs_scandir_cleanup(req); + else + uv__free(req->ptr); + } + + if (req->fs.info.bufs != req->fs.info.bufsml) + uv__free(req->fs.info.bufs); + + req->path = NULL; + req->file.pathw = NULL; + req->fs.info.new_pathw = NULL; + req->fs.info.bufs = NULL; + req->ptr = NULL; + + req->flags |= UV_FS_CLEANEDUP; +} + + +int uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, + int mode, uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_OPEN, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + req->fs.info.file_flags = flags; + req->fs.info.mode = mode; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__open(req); + return req->result; + } +} + + +int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) { + uv_fs_req_init(loop, req, UV_FS_CLOSE, cb); + req->file.fd = fd; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__close(req); + return req->result; + } +} + + +int uv_fs_read(uv_loop_t* loop, + uv_fs_t* req, + uv_file fd, + const uv_buf_t bufs[], + unsigned int nbufs, + int64_t offset, + uv_fs_cb cb) { + if (bufs == NULL || nbufs == 0) + return UV_EINVAL; + + uv_fs_req_init(loop, req, UV_FS_READ, cb); + + req->file.fd = fd; + + req->fs.info.nbufs = nbufs; + req->fs.info.bufs = req->fs.info.bufsml; + if (nbufs > ARRAY_SIZE(req->fs.info.bufsml)) + req->fs.info.bufs = uv__malloc(nbufs * sizeof(*bufs)); + + if (req->fs.info.bufs == NULL) + return UV_ENOMEM; + + memcpy(req->fs.info.bufs, bufs, nbufs * sizeof(*bufs)); + + req->fs.info.offset = offset; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__read(req); + return req->result; + } +} + + +int uv_fs_write(uv_loop_t* loop, + uv_fs_t* req, + uv_file fd, + const uv_buf_t bufs[], + unsigned int nbufs, + int64_t offset, + uv_fs_cb cb) { + if (bufs == NULL || nbufs == 0) + return UV_EINVAL; + + uv_fs_req_init(loop, req, UV_FS_WRITE, cb); + + req->file.fd = fd; + + req->fs.info.nbufs = nbufs; + req->fs.info.bufs = req->fs.info.bufsml; + if (nbufs > ARRAY_SIZE(req->fs.info.bufsml)) + req->fs.info.bufs = uv__malloc(nbufs * sizeof(*bufs)); + + if (req->fs.info.bufs == NULL) + return UV_ENOMEM; + + memcpy(req->fs.info.bufs, bufs, nbufs * sizeof(*bufs)); + + req->fs.info.offset = offset; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__write(req); + return req->result; + } +} + + +int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, + uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_UNLINK, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__unlink(req); + return req->result; + } +} + + +int uv_fs_mkdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, + uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_MKDIR, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + req->fs.info.mode = mode; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__mkdir(req); + return req->result; + } +} + + +int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, + uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_MKDTEMP, cb); + + err = fs__capture_path(req, tpl, NULL, TRUE); + if (err) + return uv_translate_sys_error(err); + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__mkdtemp(req); + return req->result; + } +} + + +int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_RMDIR, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__rmdir(req); + return req->result; + } +} + + +int uv_fs_scandir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, + uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_SCANDIR, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + req->fs.info.file_flags = flags; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__scandir(req); + return req->result; + } +} + + +int uv_fs_link(uv_loop_t* loop, uv_fs_t* req, const char* path, + const char* new_path, uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_LINK, cb); + + err = fs__capture_path(req, path, new_path, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__link(req); + return req->result; + } +} + + +int uv_fs_symlink(uv_loop_t* loop, uv_fs_t* req, const char* path, + const char* new_path, int flags, uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_SYMLINK, cb); + + err = fs__capture_path(req, path, new_path, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + req->fs.info.file_flags = flags; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__symlink(req); + return req->result; + } +} + + +int uv_fs_readlink(uv_loop_t* loop, uv_fs_t* req, const char* path, + uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_READLINK, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__readlink(req); + return req->result; + } +} + + +int uv_fs_realpath(uv_loop_t* loop, uv_fs_t* req, const char* path, + uv_fs_cb cb) { + int err; + + if (!req || !path) { + return UV_EINVAL; + } + + uv_fs_req_init(loop, req, UV_FS_REALPATH, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__realpath(req); + return req->result; + } +} + + +int uv_fs_chown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid, + uv_gid_t gid, uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_CHOWN, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__chown(req); + return req->result; + } +} + + +int uv_fs_fchown(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_uid_t uid, + uv_gid_t gid, uv_fs_cb cb) { + uv_fs_req_init(loop, req, UV_FS_FCHOWN, cb); + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__fchown(req); + return req->result; + } +} + + +int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_STAT, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__stat(req); + return req->result; + } +} + + +int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_LSTAT, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__lstat(req); + return req->result; + } +} + + +int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) { + uv_fs_req_init(loop, req, UV_FS_FSTAT, cb); + req->file.fd = fd; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__fstat(req); + return req->result; + } +} + + +int uv_fs_rename(uv_loop_t* loop, uv_fs_t* req, const char* path, + const char* new_path, uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_RENAME, cb); + + err = fs__capture_path(req, path, new_path, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__rename(req); + return req->result; + } +} + + +int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) { + uv_fs_req_init(loop, req, UV_FS_FSYNC, cb); + req->file.fd = fd; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__fsync(req); + return req->result; + } +} + + +int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file fd, uv_fs_cb cb) { + uv_fs_req_init(loop, req, UV_FS_FDATASYNC, cb); + req->file.fd = fd; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__fdatasync(req); + return req->result; + } +} + + +int uv_fs_ftruncate(uv_loop_t* loop, uv_fs_t* req, uv_file fd, + int64_t offset, uv_fs_cb cb) { + uv_fs_req_init(loop, req, UV_FS_FTRUNCATE, cb); + + req->file.fd = fd; + req->fs.info.offset = offset; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__ftruncate(req); + return req->result; + } +} + + + +int uv_fs_sendfile(uv_loop_t* loop, uv_fs_t* req, uv_file fd_out, + uv_file fd_in, int64_t in_offset, size_t length, uv_fs_cb cb) { + uv_fs_req_init(loop, req, UV_FS_SENDFILE, cb); + + req->file.fd = fd_in; + req->fs.info.fd_out = fd_out; + req->fs.info.offset = in_offset; + req->fs.info.bufsml[0].len = length; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__sendfile(req); + return req->result; + } +} + + +int uv_fs_access(uv_loop_t* loop, + uv_fs_t* req, + const char* path, + int flags, + uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_ACCESS, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) + return uv_translate_sys_error(err); + + req->flags = flags; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } + + fs__access(req); + return req->result; +} + + +int uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, + uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_CHMOD, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + req->fs.info.mode = mode; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__chmod(req); + return req->result; + } +} + + +int uv_fs_fchmod(uv_loop_t* loop, uv_fs_t* req, uv_file fd, int mode, + uv_fs_cb cb) { + uv_fs_req_init(loop, req, UV_FS_FCHMOD, cb); + + req->file.fd = fd; + req->fs.info.mode = mode; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__fchmod(req); + return req->result; + } +} + + +int uv_fs_utime(uv_loop_t* loop, uv_fs_t* req, const char* path, double atime, + double mtime, uv_fs_cb cb) { + int err; + + uv_fs_req_init(loop, req, UV_FS_UTIME, cb); + + err = fs__capture_path(req, path, NULL, cb != NULL); + if (err) { + return uv_translate_sys_error(err); + } + + req->fs.time.atime = atime; + req->fs.time.mtime = mtime; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__utime(req); + return req->result; + } +} + + +int uv_fs_futime(uv_loop_t* loop, uv_fs_t* req, uv_file fd, double atime, + double mtime, uv_fs_cb cb) { + uv_fs_req_init(loop, req, UV_FS_FUTIME, cb); + + req->file.fd = fd; + req->fs.time.atime = atime; + req->fs.time.mtime = mtime; + + if (cb) { + QUEUE_FS_TP_JOB(loop, req); + return 0; + } else { + fs__futime(req); + return req->result; + } +} diff --git a/deps/libuv/src/win/getaddrinfo.c b/deps/libuv/src/win/getaddrinfo.c new file mode 100644 index 00000000..744f8e02 --- /dev/null +++ b/deps/libuv/src/win/getaddrinfo.c @@ -0,0 +1,385 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include + +#include "uv.h" +#include "internal.h" +#include "req-inl.h" + +/* EAI_* constants. */ +#include + + +int uv__getaddrinfo_translate_error(int sys_err) { + switch (sys_err) { + case 0: return 0; + case WSATRY_AGAIN: return UV_EAI_AGAIN; + case WSAEINVAL: return UV_EAI_BADFLAGS; + case WSANO_RECOVERY: return UV_EAI_FAIL; + case WSAEAFNOSUPPORT: return UV_EAI_FAMILY; + case WSA_NOT_ENOUGH_MEMORY: return UV_EAI_MEMORY; + case WSAHOST_NOT_FOUND: return UV_EAI_NONAME; + case WSATYPE_NOT_FOUND: return UV_EAI_SERVICE; + case WSAESOCKTNOSUPPORT: return UV_EAI_SOCKTYPE; + default: return uv_translate_sys_error(sys_err); + } +} + + +/* + * MinGW is missing this + */ +#if !defined(_MSC_VER) && !defined(__MINGW64_VERSION_MAJOR) + typedef struct addrinfoW { + int ai_flags; + int ai_family; + int ai_socktype; + int ai_protocol; + size_t ai_addrlen; + WCHAR* ai_canonname; + struct sockaddr* ai_addr; + struct addrinfoW* ai_next; + } ADDRINFOW, *PADDRINFOW; + + DECLSPEC_IMPORT int WSAAPI GetAddrInfoW(const WCHAR* node, + const WCHAR* service, + const ADDRINFOW* hints, + PADDRINFOW* result); + + DECLSPEC_IMPORT void WSAAPI FreeAddrInfoW(PADDRINFOW pAddrInfo); +#endif + + +/* adjust size value to be multiple of 4. Use to keep pointer aligned */ +/* Do we need different versions of this for different architectures? */ +#define ALIGNED_SIZE(X) ((((X) + 3) >> 2) << 2) + + +static void uv__getaddrinfo_work(struct uv__work* w) { + uv_getaddrinfo_t* req; + struct addrinfoW* hints; + int err; + + req = container_of(w, uv_getaddrinfo_t, work_req); + hints = req->addrinfow; + req->addrinfow = NULL; + err = GetAddrInfoW(req->node, req->service, hints, &req->addrinfow); + req->retcode = uv__getaddrinfo_translate_error(err); +} + + +/* + * Called from uv_run when complete. Call user specified callback + * then free returned addrinfo + * Returned addrinfo strings are converted from UTF-16 to UTF-8. + * + * To minimize allocation we calculate total size required, + * and copy all structs and referenced strings into the one block. + * Each size calculation is adjusted to avoid unaligned pointers. + */ +static void uv__getaddrinfo_done(struct uv__work* w, int status) { + uv_getaddrinfo_t* req; + int addrinfo_len = 0; + int name_len = 0; + size_t addrinfo_struct_len = ALIGNED_SIZE(sizeof(struct addrinfo)); + struct addrinfoW* addrinfow_ptr; + struct addrinfo* addrinfo_ptr; + char* alloc_ptr = NULL; + char* cur_ptr = NULL; + + req = container_of(w, uv_getaddrinfo_t, work_req); + + /* release input parameter memory */ + uv__free(req->alloc); + req->alloc = NULL; + + if (status == UV_ECANCELED) { + assert(req->retcode == 0); + req->retcode = UV_EAI_CANCELED; + goto complete; + } + + if (req->retcode == 0) { + /* convert addrinfoW to addrinfo */ + /* first calculate required length */ + addrinfow_ptr = req->addrinfow; + while (addrinfow_ptr != NULL) { + addrinfo_len += addrinfo_struct_len + + ALIGNED_SIZE(addrinfow_ptr->ai_addrlen); + if (addrinfow_ptr->ai_canonname != NULL) { + name_len = WideCharToMultiByte(CP_UTF8, + 0, + addrinfow_ptr->ai_canonname, + -1, + NULL, + 0, + NULL, + NULL); + if (name_len == 0) { + req->retcode = uv_translate_sys_error(GetLastError()); + goto complete; + } + addrinfo_len += ALIGNED_SIZE(name_len); + } + addrinfow_ptr = addrinfow_ptr->ai_next; + } + + /* allocate memory for addrinfo results */ + alloc_ptr = (char*)uv__malloc(addrinfo_len); + + /* do conversions */ + if (alloc_ptr != NULL) { + cur_ptr = alloc_ptr; + addrinfow_ptr = req->addrinfow; + + while (addrinfow_ptr != NULL) { + /* copy addrinfo struct data */ + assert(cur_ptr + addrinfo_struct_len <= alloc_ptr + addrinfo_len); + addrinfo_ptr = (struct addrinfo*)cur_ptr; + addrinfo_ptr->ai_family = addrinfow_ptr->ai_family; + addrinfo_ptr->ai_socktype = addrinfow_ptr->ai_socktype; + addrinfo_ptr->ai_protocol = addrinfow_ptr->ai_protocol; + addrinfo_ptr->ai_flags = addrinfow_ptr->ai_flags; + addrinfo_ptr->ai_addrlen = addrinfow_ptr->ai_addrlen; + addrinfo_ptr->ai_canonname = NULL; + addrinfo_ptr->ai_addr = NULL; + addrinfo_ptr->ai_next = NULL; + + cur_ptr += addrinfo_struct_len; + + /* copy sockaddr */ + if (addrinfo_ptr->ai_addrlen > 0) { + assert(cur_ptr + addrinfo_ptr->ai_addrlen <= + alloc_ptr + addrinfo_len); + memcpy(cur_ptr, addrinfow_ptr->ai_addr, addrinfo_ptr->ai_addrlen); + addrinfo_ptr->ai_addr = (struct sockaddr*)cur_ptr; + cur_ptr += ALIGNED_SIZE(addrinfo_ptr->ai_addrlen); + } + + /* convert canonical name to UTF-8 */ + if (addrinfow_ptr->ai_canonname != NULL) { + name_len = WideCharToMultiByte(CP_UTF8, + 0, + addrinfow_ptr->ai_canonname, + -1, + NULL, + 0, + NULL, + NULL); + assert(name_len > 0); + assert(cur_ptr + name_len <= alloc_ptr + addrinfo_len); + name_len = WideCharToMultiByte(CP_UTF8, + 0, + addrinfow_ptr->ai_canonname, + -1, + cur_ptr, + name_len, + NULL, + NULL); + assert(name_len > 0); + addrinfo_ptr->ai_canonname = cur_ptr; + cur_ptr += ALIGNED_SIZE(name_len); + } + assert(cur_ptr <= alloc_ptr + addrinfo_len); + + /* set next ptr */ + addrinfow_ptr = addrinfow_ptr->ai_next; + if (addrinfow_ptr != NULL) { + addrinfo_ptr->ai_next = (struct addrinfo*)cur_ptr; + } + } + req->addrinfo = (struct addrinfo*)alloc_ptr; + } else { + req->retcode = UV_EAI_MEMORY; + } + } + + /* return memory to system */ + if (req->addrinfow != NULL) { + FreeAddrInfoW(req->addrinfow); + req->addrinfow = NULL; + } + +complete: + uv__req_unregister(req->loop, req); + + /* finally do callback with converted result */ + if (req->getaddrinfo_cb) + req->getaddrinfo_cb(req, req->retcode, req->addrinfo); +} + + +void uv_freeaddrinfo(struct addrinfo* ai) { + char* alloc_ptr = (char*)ai; + + /* release copied result memory */ + uv__free(alloc_ptr); +} + + +/* + * Entry point for getaddrinfo + * we convert the UTF-8 strings to UNICODE + * and save the UNICODE string pointers in the req + * We also copy hints so that caller does not need to keep memory until the + * callback. + * return 0 if a callback will be made + * return error code if validation fails + * + * To minimize allocation we calculate total size required, + * and copy all structs and referenced strings into the one block. + * Each size calculation is adjusted to avoid unaligned pointers. + */ +int uv_getaddrinfo(uv_loop_t* loop, + uv_getaddrinfo_t* req, + uv_getaddrinfo_cb getaddrinfo_cb, + const char* node, + const char* service, + const struct addrinfo* hints) { + int nodesize = 0; + int servicesize = 0; + int hintssize = 0; + char* alloc_ptr = NULL; + int err; + + if (req == NULL || (node == NULL && service == NULL)) { + err = WSAEINVAL; + goto error; + } + + uv_req_init(loop, (uv_req_t*)req); + + req->getaddrinfo_cb = getaddrinfo_cb; + req->addrinfo = NULL; + req->type = UV_GETADDRINFO; + req->loop = loop; + req->retcode = 0; + + /* calculate required memory size for all input values */ + if (node != NULL) { + nodesize = ALIGNED_SIZE(MultiByteToWideChar(CP_UTF8, 0, node, -1, NULL, 0) * + sizeof(WCHAR)); + if (nodesize == 0) { + err = GetLastError(); + goto error; + } + } + + if (service != NULL) { + servicesize = ALIGNED_SIZE(MultiByteToWideChar(CP_UTF8, + 0, + service, + -1, + NULL, + 0) * + sizeof(WCHAR)); + if (servicesize == 0) { + err = GetLastError(); + goto error; + } + } + if (hints != NULL) { + hintssize = ALIGNED_SIZE(sizeof(struct addrinfoW)); + } + + /* allocate memory for inputs, and partition it as needed */ + alloc_ptr = (char*)uv__malloc(nodesize + servicesize + hintssize); + if (!alloc_ptr) { + err = WSAENOBUFS; + goto error; + } + + /* save alloc_ptr now so we can free if error */ + req->alloc = (void*)alloc_ptr; + + /* convert node string to UTF16 into allocated memory and save pointer in */ + /* the request. */ + if (node != NULL) { + req->node = (WCHAR*)alloc_ptr; + if (MultiByteToWideChar(CP_UTF8, + 0, + node, + -1, + (WCHAR*) alloc_ptr, + nodesize / sizeof(WCHAR)) == 0) { + err = GetLastError(); + goto error; + } + alloc_ptr += nodesize; + } else { + req->node = NULL; + } + + /* convert service string to UTF16 into allocated memory and save pointer */ + /* in the req. */ + if (service != NULL) { + req->service = (WCHAR*)alloc_ptr; + if (MultiByteToWideChar(CP_UTF8, + 0, + service, + -1, + (WCHAR*) alloc_ptr, + servicesize / sizeof(WCHAR)) == 0) { + err = GetLastError(); + goto error; + } + alloc_ptr += servicesize; + } else { + req->service = NULL; + } + + /* copy hints to allocated memory and save pointer in req */ + if (hints != NULL) { + req->addrinfow = (struct addrinfoW*)alloc_ptr; + req->addrinfow->ai_family = hints->ai_family; + req->addrinfow->ai_socktype = hints->ai_socktype; + req->addrinfow->ai_protocol = hints->ai_protocol; + req->addrinfow->ai_flags = hints->ai_flags; + req->addrinfow->ai_addrlen = 0; + req->addrinfow->ai_canonname = NULL; + req->addrinfow->ai_addr = NULL; + req->addrinfow->ai_next = NULL; + } else { + req->addrinfow = NULL; + } + + uv__req_register(loop, req); + + if (getaddrinfo_cb) { + uv__work_submit(loop, + &req->work_req, + uv__getaddrinfo_work, + uv__getaddrinfo_done); + return 0; + } else { + uv__getaddrinfo_work(&req->work_req); + uv__getaddrinfo_done(&req->work_req, 0); + return req->retcode; + } + +error: + if (req != NULL) { + uv__free(req->alloc); + req->alloc = NULL; + } + return uv_translate_sys_error(err); +} diff --git a/deps/libuv/src/win/getnameinfo.c b/deps/libuv/src/win/getnameinfo.c new file mode 100644 index 00000000..66b64b88 --- /dev/null +++ b/deps/libuv/src/win/getnameinfo.c @@ -0,0 +1,150 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining a copy +* of this software and associated documentation files (the "Software"), to +* deal in the Software without restriction, including without limitation the +* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +* sell copies of the Software, and to permit persons to whom the Software is +* furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +* IN THE SOFTWARE. +*/ + +#include +#include + +#include "uv.h" +#include "internal.h" +#include "req-inl.h" + +#ifndef GetNameInfo +int WSAAPI GetNameInfoW( + const SOCKADDR *pSockaddr, + socklen_t SockaddrLength, + PWCHAR pNodeBuffer, + DWORD NodeBufferSize, + PWCHAR pServiceBuffer, + DWORD ServiceBufferSize, + INT Flags +); +#endif + +static void uv__getnameinfo_work(struct uv__work* w) { + uv_getnameinfo_t* req; + WCHAR host[NI_MAXHOST]; + WCHAR service[NI_MAXSERV]; + int ret = 0; + + req = container_of(w, uv_getnameinfo_t, work_req); + if (GetNameInfoW((struct sockaddr*)&req->storage, + sizeof(req->storage), + host, + ARRAY_SIZE(host), + service, + ARRAY_SIZE(service), + req->flags)) { + ret = WSAGetLastError(); + } + req->retcode = uv__getaddrinfo_translate_error(ret); + + /* convert results to UTF-8 */ + WideCharToMultiByte(CP_UTF8, + 0, + host, + -1, + req->host, + sizeof(req->host), + NULL, + NULL); + + WideCharToMultiByte(CP_UTF8, + 0, + service, + -1, + req->service, + sizeof(req->service), + NULL, + NULL); +} + + +/* +* Called from uv_run when complete. +*/ +static void uv__getnameinfo_done(struct uv__work* w, int status) { + uv_getnameinfo_t* req; + char* host; + char* service; + + req = container_of(w, uv_getnameinfo_t, work_req); + uv__req_unregister(req->loop, req); + host = service = NULL; + + if (status == UV_ECANCELED) { + assert(req->retcode == 0); + req->retcode = UV_EAI_CANCELED; + } else if (req->retcode == 0) { + host = req->host; + service = req->service; + } + + if (req->getnameinfo_cb) + req->getnameinfo_cb(req, req->retcode, host, service); +} + + +/* +* Entry point for getnameinfo +* return 0 if a callback will be made +* return error code if validation fails +*/ +int uv_getnameinfo(uv_loop_t* loop, + uv_getnameinfo_t* req, + uv_getnameinfo_cb getnameinfo_cb, + const struct sockaddr* addr, + int flags) { + if (req == NULL || addr == NULL) + return UV_EINVAL; + + if (addr->sa_family == AF_INET) { + memcpy(&req->storage, + addr, + sizeof(struct sockaddr_in)); + } else if (addr->sa_family == AF_INET6) { + memcpy(&req->storage, + addr, + sizeof(struct sockaddr_in6)); + } else { + return UV_EINVAL; + } + + uv_req_init(loop, (uv_req_t*)req); + uv__req_register(loop, req); + + req->getnameinfo_cb = getnameinfo_cb; + req->flags = flags; + req->type = UV_GETNAMEINFO; + req->loop = loop; + req->retcode = 0; + + if (getnameinfo_cb) { + uv__work_submit(loop, + &req->work_req, + uv__getnameinfo_work, + uv__getnameinfo_done); + return 0; + } else { + uv__getnameinfo_work(&req->work_req); + uv__getnameinfo_done(&req->work_req, 0); + return req->retcode; + } +} diff --git a/deps/libuv/src/win/handle-inl.h b/deps/libuv/src/win/handle-inl.h new file mode 100644 index 00000000..8d0334cc --- /dev/null +++ b/deps/libuv/src/win/handle-inl.h @@ -0,0 +1,179 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_WIN_HANDLE_INL_H_ +#define UV_WIN_HANDLE_INL_H_ + +#include +#include + +#include "uv.h" +#include "internal.h" + + +#define DECREASE_ACTIVE_COUNT(loop, handle) \ + do { \ + if (--(handle)->activecnt == 0 && \ + !((handle)->flags & UV__HANDLE_CLOSING)) { \ + uv__handle_stop((handle)); \ + } \ + assert((handle)->activecnt >= 0); \ + } while (0) + + +#define INCREASE_ACTIVE_COUNT(loop, handle) \ + do { \ + if ((handle)->activecnt++ == 0) { \ + uv__handle_start((handle)); \ + } \ + assert((handle)->activecnt > 0); \ + } while (0) + + +#define DECREASE_PENDING_REQ_COUNT(handle) \ + do { \ + assert(handle->reqs_pending > 0); \ + handle->reqs_pending--; \ + \ + if (handle->flags & UV__HANDLE_CLOSING && \ + handle->reqs_pending == 0) { \ + uv_want_endgame(loop, (uv_handle_t*)handle); \ + } \ + } while (0) + + +#define uv__handle_closing(handle) \ + do { \ + assert(!((handle)->flags & UV__HANDLE_CLOSING)); \ + \ + if (!(((handle)->flags & UV__HANDLE_ACTIVE) && \ + ((handle)->flags & UV__HANDLE_REF))) \ + uv__active_handle_add((uv_handle_t*) (handle)); \ + \ + (handle)->flags |= UV__HANDLE_CLOSING; \ + (handle)->flags &= ~UV__HANDLE_ACTIVE; \ + } while (0) + + +#define uv__handle_close(handle) \ + do { \ + QUEUE_REMOVE(&(handle)->handle_queue); \ + uv__active_handle_rm((uv_handle_t*) (handle)); \ + \ + (handle)->flags |= UV_HANDLE_CLOSED; \ + \ + if ((handle)->close_cb) \ + (handle)->close_cb((uv_handle_t*) (handle)); \ + } while (0) + + +INLINE static void uv_want_endgame(uv_loop_t* loop, uv_handle_t* handle) { + if (!(handle->flags & UV_HANDLE_ENDGAME_QUEUED)) { + handle->flags |= UV_HANDLE_ENDGAME_QUEUED; + + handle->endgame_next = loop->endgame_handles; + loop->endgame_handles = handle; + } +} + + +INLINE static void uv_process_endgames(uv_loop_t* loop) { + uv_handle_t* handle; + + while (loop->endgame_handles) { + handle = loop->endgame_handles; + loop->endgame_handles = handle->endgame_next; + + handle->flags &= ~UV_HANDLE_ENDGAME_QUEUED; + + switch (handle->type) { + case UV_TCP: + uv_tcp_endgame(loop, (uv_tcp_t*) handle); + break; + + case UV_NAMED_PIPE: + uv_pipe_endgame(loop, (uv_pipe_t*) handle); + break; + + case UV_TTY: + uv_tty_endgame(loop, (uv_tty_t*) handle); + break; + + case UV_UDP: + uv_udp_endgame(loop, (uv_udp_t*) handle); + break; + + case UV_POLL: + uv_poll_endgame(loop, (uv_poll_t*) handle); + break; + + case UV_TIMER: + uv_timer_endgame(loop, (uv_timer_t*) handle); + break; + + case UV_PREPARE: + case UV_CHECK: + case UV_IDLE: + uv_loop_watcher_endgame(loop, handle); + break; + + case UV_ASYNC: + uv_async_endgame(loop, (uv_async_t*) handle); + break; + + case UV_SIGNAL: + uv_signal_endgame(loop, (uv_signal_t*) handle); + break; + + case UV_PROCESS: + uv_process_endgame(loop, (uv_process_t*) handle); + break; + + case UV_FS_EVENT: + uv_fs_event_endgame(loop, (uv_fs_event_t*) handle); + break; + + case UV_FS_POLL: + uv__fs_poll_endgame(loop, (uv_fs_poll_t*) handle); + break; + + default: + assert(0); + break; + } + } +} + +INLINE static HANDLE uv__get_osfhandle(int fd) +{ + /* _get_osfhandle() raises an assert in debug builds if the FD is invalid. */ + /* But it also correctly checks the FD and returns INVALID_HANDLE_VALUE */ + /* for invalid FDs in release builds (or if you let the assert continue). */ + /* So this wrapper function disables asserts when calling _get_osfhandle. */ + + HANDLE handle; + UV_BEGIN_DISABLE_CRT_ASSERT(); + handle = (HANDLE) _get_osfhandle(fd); + UV_END_DISABLE_CRT_ASSERT(); + return handle; +} + +#endif /* UV_WIN_HANDLE_INL_H_ */ diff --git a/deps/libuv/src/win/handle.c b/deps/libuv/src/win/handle.c new file mode 100644 index 00000000..72b49d97 --- /dev/null +++ b/deps/libuv/src/win/handle.c @@ -0,0 +1,154 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" + + +uv_handle_type uv_guess_handle(uv_file file) { + HANDLE handle; + DWORD mode; + + if (file < 0) { + return UV_UNKNOWN_HANDLE; + } + + handle = uv__get_osfhandle(file); + + switch (GetFileType(handle)) { + case FILE_TYPE_CHAR: + if (GetConsoleMode(handle, &mode)) { + return UV_TTY; + } else { + return UV_FILE; + } + + case FILE_TYPE_PIPE: + return UV_NAMED_PIPE; + + case FILE_TYPE_DISK: + return UV_FILE; + + default: + return UV_UNKNOWN_HANDLE; + } +} + + +int uv_is_active(const uv_handle_t* handle) { + return (handle->flags & UV__HANDLE_ACTIVE) && + !(handle->flags & UV__HANDLE_CLOSING); +} + + +void uv_close(uv_handle_t* handle, uv_close_cb cb) { + uv_loop_t* loop = handle->loop; + + if (handle->flags & UV__HANDLE_CLOSING) { + assert(0); + return; + } + + handle->close_cb = cb; + + /* Handle-specific close actions */ + switch (handle->type) { + case UV_TCP: + uv_tcp_close(loop, (uv_tcp_t*)handle); + return; + + case UV_NAMED_PIPE: + uv_pipe_close(loop, (uv_pipe_t*) handle); + return; + + case UV_TTY: + uv_tty_close((uv_tty_t*) handle); + return; + + case UV_UDP: + uv_udp_close(loop, (uv_udp_t*) handle); + return; + + case UV_POLL: + uv_poll_close(loop, (uv_poll_t*) handle); + return; + + case UV_TIMER: + uv_timer_stop((uv_timer_t*)handle); + uv__handle_closing(handle); + uv_want_endgame(loop, handle); + return; + + case UV_PREPARE: + uv_prepare_stop((uv_prepare_t*)handle); + uv__handle_closing(handle); + uv_want_endgame(loop, handle); + return; + + case UV_CHECK: + uv_check_stop((uv_check_t*)handle); + uv__handle_closing(handle); + uv_want_endgame(loop, handle); + return; + + case UV_IDLE: + uv_idle_stop((uv_idle_t*)handle); + uv__handle_closing(handle); + uv_want_endgame(loop, handle); + return; + + case UV_ASYNC: + uv_async_close(loop, (uv_async_t*) handle); + return; + + case UV_SIGNAL: + uv_signal_close(loop, (uv_signal_t*) handle); + return; + + case UV_PROCESS: + uv_process_close(loop, (uv_process_t*) handle); + return; + + case UV_FS_EVENT: + uv_fs_event_close(loop, (uv_fs_event_t*) handle); + return; + + case UV_FS_POLL: + uv__fs_poll_close((uv_fs_poll_t*) handle); + uv__handle_closing(handle); + uv_want_endgame(loop, handle); + return; + + default: + /* Not supported */ + abort(); + } +} + + +int uv_is_closing(const uv_handle_t* handle) { + return !!(handle->flags & (UV__HANDLE_CLOSING | UV_HANDLE_CLOSED)); +} diff --git a/deps/libuv/src/win/internal.h b/deps/libuv/src/win/internal.h new file mode 100644 index 00000000..b8cfde90 --- /dev/null +++ b/deps/libuv/src/win/internal.h @@ -0,0 +1,394 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_WIN_INTERNAL_H_ +#define UV_WIN_INTERNAL_H_ + +#include "uv.h" +#include "../uv-common.h" + +#include "tree.h" +#include "winapi.h" +#include "winsock.h" + +#ifdef _MSC_VER +# define INLINE __inline +# define UV_THREAD_LOCAL __declspec( thread ) +#else +# define INLINE inline +# define UV_THREAD_LOCAL __thread +#endif + + +#ifdef _DEBUG + +extern UV_THREAD_LOCAL int uv__crt_assert_enabled; + +#define UV_BEGIN_DISABLE_CRT_ASSERT() \ + { \ + int uv__saved_crt_assert_enabled = uv__crt_assert_enabled; \ + uv__crt_assert_enabled = FALSE; + + +#define UV_END_DISABLE_CRT_ASSERT() \ + uv__crt_assert_enabled = uv__saved_crt_assert_enabled; \ + } + +#else +#define UV_BEGIN_DISABLE_CRT_ASSERT() +#define UV_END_DISABLE_CRT_ASSERT() +#endif + +/* + * Handles + * (also see handle-inl.h) + */ + +/* Used by all handles. */ +#define UV_HANDLE_CLOSED 0x00000002 +#define UV_HANDLE_ENDGAME_QUEUED 0x00000008 + +/* uv-common.h: #define UV__HANDLE_CLOSING 0x00000001 */ +/* uv-common.h: #define UV__HANDLE_ACTIVE 0x00000040 */ +/* uv-common.h: #define UV__HANDLE_REF 0x00000020 */ +/* uv-common.h: #define UV_HANDLE_INTERNAL 0x00000080 */ + +/* Used by streams and UDP handles. */ +#define UV_HANDLE_READING 0x00000100 +#define UV_HANDLE_BOUND 0x00000200 +#define UV_HANDLE_LISTENING 0x00000800 +#define UV_HANDLE_CONNECTION 0x00001000 +#define UV_HANDLE_READABLE 0x00008000 +#define UV_HANDLE_WRITABLE 0x00010000 +#define UV_HANDLE_READ_PENDING 0x00020000 +#define UV_HANDLE_SYNC_BYPASS_IOCP 0x00040000 +#define UV_HANDLE_ZERO_READ 0x00080000 +#define UV_HANDLE_EMULATE_IOCP 0x00100000 +#define UV_HANDLE_BLOCKING_WRITES 0x00200000 +#define UV_HANDLE_CANCELLATION_PENDING 0x00400000 + +/* Used by uv_tcp_t and uv_udp_t handles */ +#define UV_HANDLE_IPV6 0x01000000 + +/* Only used by uv_tcp_t handles. */ +#define UV_HANDLE_TCP_NODELAY 0x02000000 +#define UV_HANDLE_TCP_KEEPALIVE 0x04000000 +#define UV_HANDLE_TCP_SINGLE_ACCEPT 0x08000000 +#define UV_HANDLE_TCP_ACCEPT_STATE_CHANGING 0x10000000 +#define UV_HANDLE_TCP_SOCKET_CLOSED 0x20000000 +#define UV_HANDLE_SHARED_TCP_SOCKET 0x40000000 + +/* Only used by uv_pipe_t handles. */ +#define UV_HANDLE_NON_OVERLAPPED_PIPE 0x01000000 +#define UV_HANDLE_PIPESERVER 0x02000000 +#define UV_HANDLE_PIPE_READ_CANCELABLE 0x04000000 + +/* Only used by uv_tty_t handles. */ +#define UV_HANDLE_TTY_READABLE 0x01000000 +#define UV_HANDLE_TTY_RAW 0x02000000 +#define UV_HANDLE_TTY_SAVED_POSITION 0x04000000 +#define UV_HANDLE_TTY_SAVED_ATTRIBUTES 0x08000000 + +/* Only used by uv_poll_t handles. */ +#define UV_HANDLE_POLL_SLOW 0x02000000 + + +/* + * Requests: see req-inl.h + */ + + +/* + * Streams: see stream-inl.h + */ + + +/* + * TCP + */ + +typedef struct { + WSAPROTOCOL_INFOW socket_info; + int delayed_error; +} uv__ipc_socket_info_ex; + +int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb); +int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client); +int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb, + uv_read_cb read_cb); +int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle, + const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb); +int uv__tcp_try_write(uv_tcp_t* handle, const uv_buf_t bufs[], + unsigned int nbufs); + +void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* req); +void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle, + uv_write_t* req); +void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle, + uv_req_t* req); +void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle, + uv_connect_t* req); + +void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp); +void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle); + +int uv_tcp_import(uv_tcp_t* tcp, uv__ipc_socket_info_ex* socket_info_ex, + int tcp_connection); + +int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid, + LPWSAPROTOCOL_INFOW protocol_info); + + +/* + * UDP + */ +void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req); +void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle, + uv_udp_send_t* req); + +void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle); +void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle); + + +/* + * Pipes + */ +int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access, + char* name, size_t nameSize); + +int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb); +int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client); +int uv_pipe_read_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb, + uv_read_cb read_cb); +int uv_pipe_write(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle, + const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb); +int uv_pipe_write2(uv_loop_t* loop, uv_write_t* req, uv_pipe_t* handle, + const uv_buf_t bufs[], unsigned int nbufs, uv_stream_t* send_handle, + uv_write_cb cb); +void uv__pipe_pause_read(uv_pipe_t* handle); +void uv__pipe_unpause_read(uv_pipe_t* handle); +void uv__pipe_stop_read(uv_pipe_t* handle); + +void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle, + uv_req_t* req); +void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle, + uv_write_t* req); +void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle, + uv_req_t* raw_req); +void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle, + uv_connect_t* req); +void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle, + uv_shutdown_t* req); + +void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle); +void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle); +void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle); + + +/* + * TTY + */ +void uv_console_init(); + +int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb, + uv_read_cb read_cb); +int uv_tty_read_stop(uv_tty_t* handle); +int uv_tty_write(uv_loop_t* loop, uv_write_t* req, uv_tty_t* handle, + const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb); +int uv__tty_try_write(uv_tty_t* handle, const uv_buf_t bufs[], + unsigned int nbufs); +void uv_tty_close(uv_tty_t* handle); + +void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle, + uv_req_t* req); +void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle, + uv_write_t* req); +/* TODO: remove me */ +void uv_process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle, + uv_req_t* raw_req); +/* TODO: remove me */ +void uv_process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle, + uv_connect_t* req); + +void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle); + + +/* + * Poll watchers + */ +void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, + uv_req_t* req); + +int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle); +void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle); + + +/* + * Timers + */ +void uv_timer_endgame(uv_loop_t* loop, uv_timer_t* handle); + +DWORD uv__next_timeout(const uv_loop_t* loop); +void uv_process_timers(uv_loop_t* loop); + + +/* + * Loop watchers + */ +void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle); + +void uv_prepare_invoke(uv_loop_t* loop); +void uv_check_invoke(uv_loop_t* loop); +void uv_idle_invoke(uv_loop_t* loop); + +void uv__once_init(); + + +/* + * Async watcher + */ +void uv_async_close(uv_loop_t* loop, uv_async_t* handle); +void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle); + +void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle, + uv_req_t* req); + + +/* + * Signal watcher + */ +void uv_signals_init(); +int uv__signal_dispatch(int signum); + +void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle); +void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle); + +void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle, + uv_req_t* req); + + +/* + * Spawn + */ +void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle); +void uv_process_close(uv_loop_t* loop, uv_process_t* handle); +void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle); + + +/* + * Error + */ +int uv_translate_sys_error(int sys_errno); + + +/* + * FS + */ +void uv_fs_init(); + + +/* + * FS Event + */ +void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req, + uv_fs_event_t* handle); +void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle); +void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle); + + +/* + * Stat poller. + */ +void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle); + + +/* + * Utilities. + */ +void uv__util_init(); + +uint64_t uv__hrtime(double scale); +int uv_parent_pid(); +int uv_current_pid(); +__declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall); +int uv__getpwuid_r(uv_passwd_t* pwd); +int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8); + + +/* + * Process stdio handles. + */ +int uv__stdio_create(uv_loop_t* loop, + const uv_process_options_t* options, + BYTE** buffer_ptr); +void uv__stdio_destroy(BYTE* buffer); +void uv__stdio_noinherit(BYTE* buffer); +int uv__stdio_verify(BYTE* buffer, WORD size); +WORD uv__stdio_size(BYTE* buffer); +HANDLE uv__stdio_handle(BYTE* buffer, int fd); + + +/* + * Winapi and ntapi utility functions + */ +void uv_winapi_init(); + + +/* + * Winsock utility functions + */ +void uv_winsock_init(); + +int uv_ntstatus_to_winsock_error(NTSTATUS status); + +BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target); +BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target); + +int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers, + DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped, + LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine); +int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers, + DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr, + int* addr_len, WSAOVERLAPPED *overlapped, + LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine); + +int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in, + AFD_POLL_INFO* info_out, OVERLAPPED* overlapped); + +/* Whether there are any non-IFS LSPs stacked on TCP */ +extern int uv_tcp_non_ifs_lsp_ipv4; +extern int uv_tcp_non_ifs_lsp_ipv6; + +/* Ip address used to bind to any port at any interface */ +extern struct sockaddr_in uv_addr_ip4_any_; +extern struct sockaddr_in6 uv_addr_ip6_any_; + +/* + * Wake all loops with fake message + */ +void uv__wake_all_loops(); + +/* + * Init system wake-up detection + */ +void uv__init_detect_system_wakeup(); + +#endif /* UV_WIN_INTERNAL_H_ */ diff --git a/deps/libuv/src/win/loop-watcher.c b/deps/libuv/src/win/loop-watcher.c new file mode 100644 index 00000000..20e4509f --- /dev/null +++ b/deps/libuv/src/win/loop-watcher.c @@ -0,0 +1,122 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" + + +void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) { + if (handle->flags & UV__HANDLE_CLOSING) { + assert(!(handle->flags & UV_HANDLE_CLOSED)); + handle->flags |= UV_HANDLE_CLOSED; + uv__handle_close(handle); + } +} + + +#define UV_LOOP_WATCHER_DEFINE(name, NAME) \ + int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \ + uv__handle_init(loop, (uv_handle_t*) handle, UV_##NAME); \ + \ + return 0; \ + } \ + \ + \ + int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \ + uv_loop_t* loop = handle->loop; \ + uv_##name##_t* old_head; \ + \ + assert(handle->type == UV_##NAME); \ + \ + if (uv__is_active(handle)) \ + return 0; \ + \ + if (cb == NULL) \ + return UV_EINVAL; \ + \ + old_head = loop->name##_handles; \ + \ + handle->name##_next = old_head; \ + handle->name##_prev = NULL; \ + \ + if (old_head) { \ + old_head->name##_prev = handle; \ + } \ + \ + loop->name##_handles = handle; \ + \ + handle->name##_cb = cb; \ + uv__handle_start(handle); \ + \ + return 0; \ + } \ + \ + \ + int uv_##name##_stop(uv_##name##_t* handle) { \ + uv_loop_t* loop = handle->loop; \ + \ + assert(handle->type == UV_##NAME); \ + \ + if (!uv__is_active(handle)) \ + return 0; \ + \ + /* Update loop head if needed */ \ + if (loop->name##_handles == handle) { \ + loop->name##_handles = handle->name##_next; \ + } \ + \ + /* Update the iterator-next pointer of needed */ \ + if (loop->next_##name##_handle == handle) { \ + loop->next_##name##_handle = handle->name##_next; \ + } \ + \ + if (handle->name##_prev) { \ + handle->name##_prev->name##_next = handle->name##_next; \ + } \ + if (handle->name##_next) { \ + handle->name##_next->name##_prev = handle->name##_prev; \ + } \ + \ + uv__handle_stop(handle); \ + \ + return 0; \ + } \ + \ + \ + void uv_##name##_invoke(uv_loop_t* loop) { \ + uv_##name##_t* handle; \ + \ + (loop)->next_##name##_handle = (loop)->name##_handles; \ + \ + while ((loop)->next_##name##_handle != NULL) { \ + handle = (loop)->next_##name##_handle; \ + (loop)->next_##name##_handle = handle->name##_next; \ + \ + handle->name##_cb(handle); \ + } \ + } + +UV_LOOP_WATCHER_DEFINE(prepare, PREPARE) +UV_LOOP_WATCHER_DEFINE(check, CHECK) +UV_LOOP_WATCHER_DEFINE(idle, IDLE) diff --git a/deps/libuv/src/win/pipe.c b/deps/libuv/src/win/pipe.c new file mode 100644 index 00000000..2442be73 --- /dev/null +++ b/deps/libuv/src/win/pipe.c @@ -0,0 +1,2130 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" +#include "stream-inl.h" +#include "req-inl.h" + +typedef struct uv__ipc_queue_item_s uv__ipc_queue_item_t; + +struct uv__ipc_queue_item_s { + /* + * NOTE: It is important for socket_info_ex to be the first field, + * because we will we assigning it to the pending_ipc_info.socket_info + */ + uv__ipc_socket_info_ex socket_info_ex; + QUEUE member; + int tcp_connection; +}; + +/* A zero-size buffer for use by uv_pipe_read */ +static char uv_zero_[] = ""; + +/* Null uv_buf_t */ +static const uv_buf_t uv_null_buf_ = { 0, NULL }; + +/* The timeout that the pipe will wait for the remote end to write data */ +/* when the local ends wants to shut it down. */ +static const int64_t eof_timeout = 50; /* ms */ + +static const int default_pending_pipe_instances = 4; + +/* Pipe prefix */ +static char pipe_prefix[] = "\\\\?\\pipe"; +static const int pipe_prefix_len = sizeof(pipe_prefix) - 1; + +/* IPC protocol flags. */ +#define UV_IPC_RAW_DATA 0x0001 +#define UV_IPC_TCP_SERVER 0x0002 +#define UV_IPC_TCP_CONNECTION 0x0004 + +/* IPC frame header. */ +typedef struct { + int flags; + uint64_t raw_data_length; +} uv_ipc_frame_header_t; + +/* IPC frame, which contains an imported TCP socket stream. */ +typedef struct { + uv_ipc_frame_header_t header; + uv__ipc_socket_info_ex socket_info_ex; +} uv_ipc_frame_uv_stream; + +static void eof_timer_init(uv_pipe_t* pipe); +static void eof_timer_start(uv_pipe_t* pipe); +static void eof_timer_stop(uv_pipe_t* pipe); +static void eof_timer_cb(uv_timer_t* timer); +static void eof_timer_destroy(uv_pipe_t* pipe); +static void eof_timer_close_cb(uv_handle_t* handle); + + +static void uv_unique_pipe_name(char* ptr, char* name, size_t size) { + snprintf(name, size, "\\\\?\\pipe\\uv\\%p-%lu", ptr, GetCurrentProcessId()); +} + + +int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) { + uv_stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE); + + handle->reqs_pending = 0; + handle->handle = INVALID_HANDLE_VALUE; + handle->name = NULL; + handle->pipe.conn.ipc_pid = 0; + handle->pipe.conn.remaining_ipc_rawdata_bytes = 0; + QUEUE_INIT(&handle->pipe.conn.pending_ipc_info.queue); + handle->pipe.conn.pending_ipc_info.queue_len = 0; + handle->ipc = ipc; + handle->pipe.conn.non_overlapped_writes_tail = NULL; + handle->pipe.conn.readfile_thread = NULL; + + uv_req_init(loop, (uv_req_t*) &handle->pipe.conn.ipc_header_write_req); + + return 0; +} + + +static void uv_pipe_connection_init(uv_pipe_t* handle) { + uv_connection_init((uv_stream_t*) handle); + handle->read_req.data = handle; + handle->pipe.conn.eof_timer = NULL; + assert(!(handle->flags & UV_HANDLE_PIPESERVER)); + if (pCancelSynchronousIo && + handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) { + uv_mutex_init(&handle->pipe.conn.readfile_mutex); + handle->flags |= UV_HANDLE_PIPE_READ_CANCELABLE; + } +} + + +static HANDLE open_named_pipe(const WCHAR* name, DWORD* duplex_flags) { + HANDLE pipeHandle; + + /* + * Assume that we have a duplex pipe first, so attempt to + * connect with GENERIC_READ | GENERIC_WRITE. + */ + pipeHandle = CreateFileW(name, + GENERIC_READ | GENERIC_WRITE, + 0, + NULL, + OPEN_EXISTING, + FILE_FLAG_OVERLAPPED, + NULL); + if (pipeHandle != INVALID_HANDLE_VALUE) { + *duplex_flags = UV_HANDLE_READABLE | UV_HANDLE_WRITABLE; + return pipeHandle; + } + + /* + * If the pipe is not duplex CreateFileW fails with + * ERROR_ACCESS_DENIED. In that case try to connect + * as a read-only or write-only. + */ + if (GetLastError() == ERROR_ACCESS_DENIED) { + pipeHandle = CreateFileW(name, + GENERIC_READ | FILE_WRITE_ATTRIBUTES, + 0, + NULL, + OPEN_EXISTING, + FILE_FLAG_OVERLAPPED, + NULL); + + if (pipeHandle != INVALID_HANDLE_VALUE) { + *duplex_flags = UV_HANDLE_READABLE; + return pipeHandle; + } + } + + if (GetLastError() == ERROR_ACCESS_DENIED) { + pipeHandle = CreateFileW(name, + GENERIC_WRITE | FILE_READ_ATTRIBUTES, + 0, + NULL, + OPEN_EXISTING, + FILE_FLAG_OVERLAPPED, + NULL); + + if (pipeHandle != INVALID_HANDLE_VALUE) { + *duplex_flags = UV_HANDLE_WRITABLE; + return pipeHandle; + } + } + + return INVALID_HANDLE_VALUE; +} + + +static void close_pipe(uv_pipe_t* pipe) { + assert(pipe->u.fd == -1 || pipe->u.fd > 2); + if (pipe->u.fd == -1) + CloseHandle(pipe->handle); + else + close(pipe->u.fd); + + pipe->u.fd = -1; + pipe->handle = INVALID_HANDLE_VALUE; +} + + +int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access, + char* name, size_t nameSize) { + HANDLE pipeHandle; + int err; + char* ptr = (char*)handle; + + for (;;) { + uv_unique_pipe_name(ptr, name, nameSize); + + pipeHandle = CreateNamedPipeA(name, + access | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE, + PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, 1, 65536, 65536, 0, + NULL); + + if (pipeHandle != INVALID_HANDLE_VALUE) { + /* No name collisions. We're done. */ + break; + } + + err = GetLastError(); + if (err != ERROR_PIPE_BUSY && err != ERROR_ACCESS_DENIED) { + goto error; + } + + /* Pipe name collision. Increment the pointer and try again. */ + ptr++; + } + + if (CreateIoCompletionPort(pipeHandle, + loop->iocp, + (ULONG_PTR)handle, + 0) == NULL) { + err = GetLastError(); + goto error; + } + + uv_pipe_connection_init(handle); + handle->handle = pipeHandle; + + return 0; + + error: + if (pipeHandle != INVALID_HANDLE_VALUE) { + CloseHandle(pipeHandle); + } + + return err; +} + + +static int uv_set_pipe_handle(uv_loop_t* loop, + uv_pipe_t* handle, + HANDLE pipeHandle, + int fd, + DWORD duplex_flags) { + NTSTATUS nt_status; + IO_STATUS_BLOCK io_status; + FILE_MODE_INFORMATION mode_info; + DWORD mode = PIPE_READMODE_BYTE | PIPE_WAIT; + DWORD current_mode = 0; + DWORD err = 0; + + if (!(handle->flags & UV_HANDLE_PIPESERVER) && + handle->handle != INVALID_HANDLE_VALUE) + return UV_EBUSY; + + if (!SetNamedPipeHandleState(pipeHandle, &mode, NULL, NULL)) { + err = GetLastError(); + if (err == ERROR_ACCESS_DENIED) { + /* + * SetNamedPipeHandleState can fail if the handle doesn't have either + * GENERIC_WRITE or FILE_WRITE_ATTRIBUTES. + * But if the handle already has the desired wait and blocking modes + * we can continue. + */ + if (!GetNamedPipeHandleState(pipeHandle, ¤t_mode, NULL, NULL, + NULL, NULL, 0)) { + return -1; + } else if (current_mode & PIPE_NOWAIT) { + SetLastError(ERROR_ACCESS_DENIED); + return -1; + } + } else { + /* If this returns ERROR_INVALID_PARAMETER we probably opened + * something that is not a pipe. */ + if (err == ERROR_INVALID_PARAMETER) { + SetLastError(WSAENOTSOCK); + } + return -1; + } + } + + /* Check if the pipe was created with FILE_FLAG_OVERLAPPED. */ + nt_status = pNtQueryInformationFile(pipeHandle, + &io_status, + &mode_info, + sizeof(mode_info), + FileModeInformation); + if (nt_status != STATUS_SUCCESS) { + return -1; + } + + if (mode_info.Mode & FILE_SYNCHRONOUS_IO_ALERT || + mode_info.Mode & FILE_SYNCHRONOUS_IO_NONALERT) { + /* Non-overlapped pipe. */ + handle->flags |= UV_HANDLE_NON_OVERLAPPED_PIPE; + } else { + /* Overlapped pipe. Try to associate with IOCP. */ + if (CreateIoCompletionPort(pipeHandle, + loop->iocp, + (ULONG_PTR)handle, + 0) == NULL) { + handle->flags |= UV_HANDLE_EMULATE_IOCP; + } + } + + handle->handle = pipeHandle; + handle->u.fd = fd; + handle->flags |= duplex_flags; + + return 0; +} + + +static DWORD WINAPI pipe_shutdown_thread_proc(void* parameter) { + uv_loop_t* loop; + uv_pipe_t* handle; + uv_shutdown_t* req; + + req = (uv_shutdown_t*) parameter; + assert(req); + handle = (uv_pipe_t*) req->handle; + assert(handle); + loop = handle->loop; + assert(loop); + + FlushFileBuffers(handle->handle); + + /* Post completed */ + POST_COMPLETION_FOR_REQ(loop, req); + + return 0; +} + + +void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) { + int err; + DWORD result; + uv_shutdown_t* req; + NTSTATUS nt_status; + IO_STATUS_BLOCK io_status; + FILE_PIPE_LOCAL_INFORMATION pipe_info; + uv__ipc_queue_item_t* item; + + if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) { + handle->flags &= ~UV_HANDLE_PIPE_READ_CANCELABLE; + uv_mutex_destroy(&handle->pipe.conn.readfile_mutex); + } + + if ((handle->flags & UV_HANDLE_CONNECTION) && + handle->stream.conn.shutdown_req != NULL && + handle->stream.conn.write_reqs_pending == 0) { + req = handle->stream.conn.shutdown_req; + + /* Clear the shutdown_req field so we don't go here again. */ + handle->stream.conn.shutdown_req = NULL; + + if (handle->flags & UV__HANDLE_CLOSING) { + UNREGISTER_HANDLE_REQ(loop, handle, req); + + /* Already closing. Cancel the shutdown. */ + if (req->cb) { + req->cb(req, UV_ECANCELED); + } + + DECREASE_PENDING_REQ_COUNT(handle); + return; + } + + /* Try to avoid flushing the pipe buffer in the thread pool. */ + nt_status = pNtQueryInformationFile(handle->handle, + &io_status, + &pipe_info, + sizeof pipe_info, + FilePipeLocalInformation); + + if (nt_status != STATUS_SUCCESS) { + /* Failure */ + UNREGISTER_HANDLE_REQ(loop, handle, req); + + handle->flags |= UV_HANDLE_WRITABLE; /* Questionable */ + if (req->cb) { + err = pRtlNtStatusToDosError(nt_status); + req->cb(req, uv_translate_sys_error(err)); + } + + DECREASE_PENDING_REQ_COUNT(handle); + return; + } + + if (pipe_info.OutboundQuota == pipe_info.WriteQuotaAvailable) { + /* Short-circuit, no need to call FlushFileBuffers. */ + uv_insert_pending_req(loop, (uv_req_t*) req); + return; + } + + /* Run FlushFileBuffers in the thread pool. */ + result = QueueUserWorkItem(pipe_shutdown_thread_proc, + req, + WT_EXECUTELONGFUNCTION); + if (result) { + return; + + } else { + /* Failure. */ + UNREGISTER_HANDLE_REQ(loop, handle, req); + + handle->flags |= UV_HANDLE_WRITABLE; /* Questionable */ + if (req->cb) { + err = GetLastError(); + req->cb(req, uv_translate_sys_error(err)); + } + + DECREASE_PENDING_REQ_COUNT(handle); + return; + } + } + + if (handle->flags & UV__HANDLE_CLOSING && + handle->reqs_pending == 0) { + assert(!(handle->flags & UV_HANDLE_CLOSED)); + + if (handle->flags & UV_HANDLE_CONNECTION) { + /* Free pending sockets */ + while (!QUEUE_EMPTY(&handle->pipe.conn.pending_ipc_info.queue)) { + QUEUE* q; + SOCKET socket; + + q = QUEUE_HEAD(&handle->pipe.conn.pending_ipc_info.queue); + QUEUE_REMOVE(q); + item = QUEUE_DATA(q, uv__ipc_queue_item_t, member); + + /* Materialize socket and close it */ + socket = WSASocketW(FROM_PROTOCOL_INFO, + FROM_PROTOCOL_INFO, + FROM_PROTOCOL_INFO, + &item->socket_info_ex.socket_info, + 0, + WSA_FLAG_OVERLAPPED); + uv__free(item); + + if (socket != INVALID_SOCKET) + closesocket(socket); + } + handle->pipe.conn.pending_ipc_info.queue_len = 0; + + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) { + UnregisterWait(handle->read_req.wait_handle); + handle->read_req.wait_handle = INVALID_HANDLE_VALUE; + } + if (handle->read_req.event_handle) { + CloseHandle(handle->read_req.event_handle); + handle->read_req.event_handle = NULL; + } + } + } + + if (handle->flags & UV_HANDLE_PIPESERVER) { + assert(handle->pipe.serv.accept_reqs); + uv__free(handle->pipe.serv.accept_reqs); + handle->pipe.serv.accept_reqs = NULL; + } + + uv__handle_close(handle); + } +} + + +void uv_pipe_pending_instances(uv_pipe_t* handle, int count) { + if (handle->flags & UV_HANDLE_BOUND) + return; + handle->pipe.serv.pending_instances = count; + handle->flags |= UV_HANDLE_PIPESERVER; +} + + +/* Creates a pipe server. */ +int uv_pipe_bind(uv_pipe_t* handle, const char* name) { + uv_loop_t* loop = handle->loop; + int i, err, nameSize; + uv_pipe_accept_t* req; + + if (handle->flags & UV_HANDLE_BOUND) { + return UV_EINVAL; + } + + if (!name) { + return UV_EINVAL; + } + + if (!(handle->flags & UV_HANDLE_PIPESERVER)) { + handle->pipe.serv.pending_instances = default_pending_pipe_instances; + } + + handle->pipe.serv.accept_reqs = (uv_pipe_accept_t*) + uv__malloc(sizeof(uv_pipe_accept_t) * handle->pipe.serv.pending_instances); + if (!handle->pipe.serv.accept_reqs) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + for (i = 0; i < handle->pipe.serv.pending_instances; i++) { + req = &handle->pipe.serv.accept_reqs[i]; + uv_req_init(loop, (uv_req_t*) req); + req->type = UV_ACCEPT; + req->data = handle; + req->pipeHandle = INVALID_HANDLE_VALUE; + req->next_pending = NULL; + } + + /* Convert name to UTF16. */ + nameSize = MultiByteToWideChar(CP_UTF8, 0, name, -1, NULL, 0) * sizeof(WCHAR); + handle->name = (WCHAR*)uv__malloc(nameSize); + if (!handle->name) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + if (!MultiByteToWideChar(CP_UTF8, + 0, + name, + -1, + handle->name, + nameSize / sizeof(WCHAR))) { + err = GetLastError(); + goto error; + } + + /* + * Attempt to create the first pipe with FILE_FLAG_FIRST_PIPE_INSTANCE. + * If this fails then there's already a pipe server for the given pipe name. + */ + handle->pipe.serv.accept_reqs[0].pipeHandle = CreateNamedPipeW(handle->name, + PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED | + FILE_FLAG_FIRST_PIPE_INSTANCE, + PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, + PIPE_UNLIMITED_INSTANCES, 65536, 65536, 0, NULL); + + if (handle->pipe.serv.accept_reqs[0].pipeHandle == INVALID_HANDLE_VALUE) { + err = GetLastError(); + if (err == ERROR_ACCESS_DENIED) { + err = WSAEADDRINUSE; /* Translates to UV_EADDRINUSE. */ + } else if (err == ERROR_PATH_NOT_FOUND || err == ERROR_INVALID_NAME) { + err = WSAEACCES; /* Translates to UV_EACCES. */ + } + goto error; + } + + if (uv_set_pipe_handle(loop, + handle, + handle->pipe.serv.accept_reqs[0].pipeHandle, + -1, + 0)) { + err = GetLastError(); + goto error; + } + + handle->pipe.serv.pending_accepts = NULL; + handle->flags |= UV_HANDLE_PIPESERVER; + handle->flags |= UV_HANDLE_BOUND; + + return 0; + +error: + if (handle->name) { + uv__free(handle->name); + handle->name = NULL; + } + + if (handle->pipe.serv.accept_reqs[0].pipeHandle != INVALID_HANDLE_VALUE) { + CloseHandle(handle->pipe.serv.accept_reqs[0].pipeHandle); + handle->pipe.serv.accept_reqs[0].pipeHandle = INVALID_HANDLE_VALUE; + } + + return uv_translate_sys_error(err); +} + + +static DWORD WINAPI pipe_connect_thread_proc(void* parameter) { + uv_loop_t* loop; + uv_pipe_t* handle; + uv_connect_t* req; + HANDLE pipeHandle = INVALID_HANDLE_VALUE; + DWORD duplex_flags; + + req = (uv_connect_t*) parameter; + assert(req); + handle = (uv_pipe_t*) req->handle; + assert(handle); + loop = handle->loop; + assert(loop); + + /* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. */ + /* We wait for the pipe to become available with WaitNamedPipe. */ + while (WaitNamedPipeW(handle->name, 30000)) { + /* The pipe is now available, try to connect. */ + pipeHandle = open_named_pipe(handle->name, &duplex_flags); + if (pipeHandle != INVALID_HANDLE_VALUE) { + break; + } + + SwitchToThread(); + } + + if (pipeHandle != INVALID_HANDLE_VALUE && + !uv_set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags)) { + SET_REQ_SUCCESS(req); + } else { + SET_REQ_ERROR(req, GetLastError()); + } + + /* Post completed */ + POST_COMPLETION_FOR_REQ(loop, req); + + return 0; +} + + +void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, + const char* name, uv_connect_cb cb) { + uv_loop_t* loop = handle->loop; + int err, nameSize; + HANDLE pipeHandle = INVALID_HANDLE_VALUE; + DWORD duplex_flags; + + uv_req_init(loop, (uv_req_t*) req); + req->type = UV_CONNECT; + req->handle = (uv_stream_t*) handle; + req->cb = cb; + + /* Convert name to UTF16. */ + nameSize = MultiByteToWideChar(CP_UTF8, 0, name, -1, NULL, 0) * sizeof(WCHAR); + handle->name = (WCHAR*)uv__malloc(nameSize); + if (!handle->name) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + if (!MultiByteToWideChar(CP_UTF8, + 0, + name, + -1, + handle->name, + nameSize / sizeof(WCHAR))) { + err = GetLastError(); + goto error; + } + + pipeHandle = open_named_pipe(handle->name, &duplex_flags); + if (pipeHandle == INVALID_HANDLE_VALUE) { + if (GetLastError() == ERROR_PIPE_BUSY) { + /* Wait for the server to make a pipe instance available. */ + if (!QueueUserWorkItem(&pipe_connect_thread_proc, + req, + WT_EXECUTELONGFUNCTION)) { + err = GetLastError(); + goto error; + } + + REGISTER_HANDLE_REQ(loop, handle, req); + handle->reqs_pending++; + + return; + } + + err = GetLastError(); + goto error; + } + + assert(pipeHandle != INVALID_HANDLE_VALUE); + + if (uv_set_pipe_handle(loop, + (uv_pipe_t*) req->handle, + pipeHandle, + -1, + duplex_flags)) { + err = GetLastError(); + goto error; + } + + SET_REQ_SUCCESS(req); + uv_insert_pending_req(loop, (uv_req_t*) req); + handle->reqs_pending++; + REGISTER_HANDLE_REQ(loop, handle, req); + return; + +error: + if (handle->name) { + uv__free(handle->name); + handle->name = NULL; + } + + if (pipeHandle != INVALID_HANDLE_VALUE) { + CloseHandle(pipeHandle); + } + + /* Make this req pending reporting an error. */ + SET_REQ_ERROR(req, err); + uv_insert_pending_req(loop, (uv_req_t*) req); + handle->reqs_pending++; + REGISTER_HANDLE_REQ(loop, handle, req); + return; +} + + +void uv__pipe_pause_read(uv_pipe_t* handle) { + if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) { + /* Pause the ReadFile task briefly, to work + around the Windows kernel bug that causes + any access to a NamedPipe to deadlock if + any process has called ReadFile */ + HANDLE h; + uv_mutex_lock(&handle->pipe.conn.readfile_mutex); + h = handle->pipe.conn.readfile_thread; + while (h) { + /* spinlock: we expect this to finish quickly, + or we are probably about to deadlock anyways + (in the kernel), so it doesn't matter */ + pCancelSynchronousIo(h); + SwitchToThread(); /* yield thread control briefly */ + h = handle->pipe.conn.readfile_thread; + } + } +} + + +void uv__pipe_unpause_read(uv_pipe_t* handle) { + if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) { + uv_mutex_unlock(&handle->pipe.conn.readfile_mutex); + } +} + + +void uv__pipe_stop_read(uv_pipe_t* handle) { + handle->flags &= ~UV_HANDLE_READING; + uv__pipe_pause_read((uv_pipe_t*)handle); + uv__pipe_unpause_read((uv_pipe_t*)handle); +} + + +/* Cleans up uv_pipe_t (server or connection) and all resources associated */ +/* with it. */ +void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) { + int i; + HANDLE pipeHandle; + + uv__pipe_stop_read(handle); + + if (handle->name) { + uv__free(handle->name); + handle->name = NULL; + } + + if (handle->flags & UV_HANDLE_PIPESERVER) { + for (i = 0; i < handle->pipe.serv.pending_instances; i++) { + pipeHandle = handle->pipe.serv.accept_reqs[i].pipeHandle; + if (pipeHandle != INVALID_HANDLE_VALUE) { + CloseHandle(pipeHandle); + handle->pipe.serv.accept_reqs[i].pipeHandle = INVALID_HANDLE_VALUE; + } + } + handle->handle = INVALID_HANDLE_VALUE; + } + + if (handle->flags & UV_HANDLE_CONNECTION) { + handle->flags &= ~UV_HANDLE_WRITABLE; + eof_timer_destroy(handle); + } + + if ((handle->flags & UV_HANDLE_CONNECTION) + && handle->handle != INVALID_HANDLE_VALUE) + close_pipe(handle); +} + + +void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle) { + if (handle->flags & UV_HANDLE_READING) { + handle->flags &= ~UV_HANDLE_READING; + DECREASE_ACTIVE_COUNT(loop, handle); + } + + if (handle->flags & UV_HANDLE_LISTENING) { + handle->flags &= ~UV_HANDLE_LISTENING; + DECREASE_ACTIVE_COUNT(loop, handle); + } + + uv_pipe_cleanup(loop, handle); + + if (handle->reqs_pending == 0) { + uv_want_endgame(loop, (uv_handle_t*) handle); + } + + handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE); + uv__handle_closing(handle); +} + + +static void uv_pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle, + uv_pipe_accept_t* req, BOOL firstInstance) { + assert(handle->flags & UV_HANDLE_LISTENING); + + if (!firstInstance) { + assert(req->pipeHandle == INVALID_HANDLE_VALUE); + + req->pipeHandle = CreateNamedPipeW(handle->name, + PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED, + PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, + PIPE_UNLIMITED_INSTANCES, 65536, 65536, 0, NULL); + + if (req->pipeHandle == INVALID_HANDLE_VALUE) { + SET_REQ_ERROR(req, GetLastError()); + uv_insert_pending_req(loop, (uv_req_t*) req); + handle->reqs_pending++; + return; + } + + if (uv_set_pipe_handle(loop, handle, req->pipeHandle, -1, 0)) { + CloseHandle(req->pipeHandle); + req->pipeHandle = INVALID_HANDLE_VALUE; + SET_REQ_ERROR(req, GetLastError()); + uv_insert_pending_req(loop, (uv_req_t*) req); + handle->reqs_pending++; + return; + } + } + + assert(req->pipeHandle != INVALID_HANDLE_VALUE); + + /* Prepare the overlapped structure. */ + memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped)); + + if (!ConnectNamedPipe(req->pipeHandle, &req->u.io.overlapped) && + GetLastError() != ERROR_IO_PENDING) { + if (GetLastError() == ERROR_PIPE_CONNECTED) { + SET_REQ_SUCCESS(req); + } else { + CloseHandle(req->pipeHandle); + req->pipeHandle = INVALID_HANDLE_VALUE; + /* Make this req pending reporting an error. */ + SET_REQ_ERROR(req, GetLastError()); + } + uv_insert_pending_req(loop, (uv_req_t*) req); + handle->reqs_pending++; + return; + } + + handle->reqs_pending++; +} + + +int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) { + uv_loop_t* loop = server->loop; + uv_pipe_t* pipe_client; + uv_pipe_accept_t* req; + QUEUE* q; + uv__ipc_queue_item_t* item; + int err; + + if (server->ipc) { + if (QUEUE_EMPTY(&server->pipe.conn.pending_ipc_info.queue)) { + /* No valid pending sockets. */ + return WSAEWOULDBLOCK; + } + + q = QUEUE_HEAD(&server->pipe.conn.pending_ipc_info.queue); + QUEUE_REMOVE(q); + server->pipe.conn.pending_ipc_info.queue_len--; + item = QUEUE_DATA(q, uv__ipc_queue_item_t, member); + + err = uv_tcp_import((uv_tcp_t*)client, + &item->socket_info_ex, + item->tcp_connection); + if (err != 0) + return err; + + uv__free(item); + + } else { + pipe_client = (uv_pipe_t*)client; + + /* Find a connection instance that has been connected, but not yet */ + /* accepted. */ + req = server->pipe.serv.pending_accepts; + + if (!req) { + /* No valid connections found, so we error out. */ + return WSAEWOULDBLOCK; + } + + /* Initialize the client handle and copy the pipeHandle to the client */ + uv_pipe_connection_init(pipe_client); + pipe_client->handle = req->pipeHandle; + pipe_client->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE; + + /* Prepare the req to pick up a new connection */ + server->pipe.serv.pending_accepts = req->next_pending; + req->next_pending = NULL; + req->pipeHandle = INVALID_HANDLE_VALUE; + + if (!(server->flags & UV__HANDLE_CLOSING)) { + uv_pipe_queue_accept(loop, server, req, FALSE); + } + } + + return 0; +} + + +/* Starts listening for connections for the given pipe. */ +int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) { + uv_loop_t* loop = handle->loop; + int i; + + if (handle->flags & UV_HANDLE_LISTENING) { + handle->stream.serv.connection_cb = cb; + } + + if (!(handle->flags & UV_HANDLE_BOUND)) { + return WSAEINVAL; + } + + if (handle->flags & UV_HANDLE_READING) { + return WSAEISCONN; + } + + if (!(handle->flags & UV_HANDLE_PIPESERVER)) { + return ERROR_NOT_SUPPORTED; + } + + handle->flags |= UV_HANDLE_LISTENING; + INCREASE_ACTIVE_COUNT(loop, handle); + handle->stream.serv.connection_cb = cb; + + /* First pipe handle should have already been created in uv_pipe_bind */ + assert(handle->pipe.serv.accept_reqs[0].pipeHandle != INVALID_HANDLE_VALUE); + + for (i = 0; i < handle->pipe.serv.pending_instances; i++) { + uv_pipe_queue_accept(loop, handle, &handle->pipe.serv.accept_reqs[i], i == 0); + } + + return 0; +} + + +static DWORD WINAPI uv_pipe_zero_readfile_thread_proc(void* parameter) { + int result; + DWORD bytes; + uv_read_t* req = (uv_read_t*) parameter; + uv_pipe_t* handle = (uv_pipe_t*) req->data; + uv_loop_t* loop = handle->loop; + HANDLE hThread = NULL; + DWORD err; + uv_mutex_t *m = &handle->pipe.conn.readfile_mutex; + + assert(req != NULL); + assert(req->type == UV_READ); + assert(handle->type == UV_NAMED_PIPE); + + if (handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) { + uv_mutex_lock(m); /* mutex controls *setting* of readfile_thread */ + if (DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), + GetCurrentProcess(), &hThread, + 0, TRUE, DUPLICATE_SAME_ACCESS)) { + handle->pipe.conn.readfile_thread = hThread; + } else { + hThread = NULL; + } + uv_mutex_unlock(m); + } +restart_readfile: + result = ReadFile(handle->handle, + &uv_zero_, + 0, + &bytes, + NULL); + if (!result) { + err = GetLastError(); + if (err == ERROR_OPERATION_ABORTED && + handle->flags & UV_HANDLE_PIPE_READ_CANCELABLE) { + if (handle->flags & UV_HANDLE_READING) { + /* just a brief break to do something else */ + handle->pipe.conn.readfile_thread = NULL; + /* resume after it is finished */ + uv_mutex_lock(m); + handle->pipe.conn.readfile_thread = hThread; + uv_mutex_unlock(m); + goto restart_readfile; + } else { + result = 1; /* successfully stopped reading */ + } + } + } + if (hThread) { + assert(hThread == handle->pipe.conn.readfile_thread); + /* mutex does not control clearing readfile_thread */ + handle->pipe.conn.readfile_thread = NULL; + uv_mutex_lock(m); + /* only when we hold the mutex lock is it safe to + open or close the handle */ + CloseHandle(hThread); + uv_mutex_unlock(m); + } + + if (!result) { + SET_REQ_ERROR(req, err); + } + + POST_COMPLETION_FOR_REQ(loop, req); + return 0; +} + + +static DWORD WINAPI uv_pipe_writefile_thread_proc(void* parameter) { + int result; + DWORD bytes; + uv_write_t* req = (uv_write_t*) parameter; + uv_pipe_t* handle = (uv_pipe_t*) req->handle; + uv_loop_t* loop = handle->loop; + + assert(req != NULL); + assert(req->type == UV_WRITE); + assert(handle->type == UV_NAMED_PIPE); + assert(req->write_buffer.base); + + result = WriteFile(handle->handle, + req->write_buffer.base, + req->write_buffer.len, + &bytes, + NULL); + + if (!result) { + SET_REQ_ERROR(req, GetLastError()); + } + + POST_COMPLETION_FOR_REQ(loop, req); + return 0; +} + + +static void CALLBACK post_completion_read_wait(void* context, BOOLEAN timed_out) { + uv_read_t* req; + uv_tcp_t* handle; + + req = (uv_read_t*) context; + assert(req != NULL); + handle = (uv_tcp_t*)req->data; + assert(handle != NULL); + assert(!timed_out); + + if (!PostQueuedCompletionStatus(handle->loop->iocp, + req->u.io.overlapped.InternalHigh, + 0, + &req->u.io.overlapped)) { + uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus"); + } +} + + +static void CALLBACK post_completion_write_wait(void* context, BOOLEAN timed_out) { + uv_write_t* req; + uv_tcp_t* handle; + + req = (uv_write_t*) context; + assert(req != NULL); + handle = (uv_tcp_t*)req->handle; + assert(handle != NULL); + assert(!timed_out); + + if (!PostQueuedCompletionStatus(handle->loop->iocp, + req->u.io.overlapped.InternalHigh, + 0, + &req->u.io.overlapped)) { + uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus"); + } +} + + +static void uv_pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) { + uv_read_t* req; + int result; + + assert(handle->flags & UV_HANDLE_READING); + assert(!(handle->flags & UV_HANDLE_READ_PENDING)); + + assert(handle->handle != INVALID_HANDLE_VALUE); + + req = &handle->read_req; + + if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) { + if (!QueueUserWorkItem(&uv_pipe_zero_readfile_thread_proc, + req, + WT_EXECUTELONGFUNCTION)) { + /* Make this req pending reporting an error. */ + SET_REQ_ERROR(req, GetLastError()); + goto error; + } + } else { + memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + req->u.io.overlapped.hEvent = (HANDLE) ((uintptr_t) req->event_handle | 1); + } + + /* Do 0-read */ + result = ReadFile(handle->handle, + &uv_zero_, + 0, + NULL, + &req->u.io.overlapped); + + if (!result && GetLastError() != ERROR_IO_PENDING) { + /* Make this req pending reporting an error. */ + SET_REQ_ERROR(req, GetLastError()); + goto error; + } + + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + if (!req->event_handle) { + req->event_handle = CreateEvent(NULL, 0, 0, NULL); + if (!req->event_handle) { + uv_fatal_error(GetLastError(), "CreateEvent"); + } + } + if (req->wait_handle == INVALID_HANDLE_VALUE) { + if (!RegisterWaitForSingleObject(&req->wait_handle, + req->u.io.overlapped.hEvent, post_completion_read_wait, (void*) req, + INFINITE, WT_EXECUTEINWAITTHREAD)) { + SET_REQ_ERROR(req, GetLastError()); + goto error; + } + } + } + } + + /* Start the eof timer if there is one */ + eof_timer_start(handle); + handle->flags |= UV_HANDLE_READ_PENDING; + handle->reqs_pending++; + return; + +error: + uv_insert_pending_req(loop, (uv_req_t*)req); + handle->flags |= UV_HANDLE_READ_PENDING; + handle->reqs_pending++; +} + + +int uv_pipe_read_start(uv_pipe_t* handle, + uv_alloc_cb alloc_cb, + uv_read_cb read_cb) { + uv_loop_t* loop = handle->loop; + + handle->flags |= UV_HANDLE_READING; + INCREASE_ACTIVE_COUNT(loop, handle); + handle->read_cb = read_cb; + handle->alloc_cb = alloc_cb; + + /* If reading was stopped and then started again, there could still be a */ + /* read request pending. */ + if (!(handle->flags & UV_HANDLE_READ_PENDING)) + uv_pipe_queue_read(loop, handle); + + return 0; +} + + +static void uv_insert_non_overlapped_write_req(uv_pipe_t* handle, + uv_write_t* req) { + req->next_req = NULL; + if (handle->pipe.conn.non_overlapped_writes_tail) { + req->next_req = + handle->pipe.conn.non_overlapped_writes_tail->next_req; + handle->pipe.conn.non_overlapped_writes_tail->next_req = (uv_req_t*)req; + handle->pipe.conn.non_overlapped_writes_tail = req; + } else { + req->next_req = (uv_req_t*)req; + handle->pipe.conn.non_overlapped_writes_tail = req; + } +} + + +static uv_write_t* uv_remove_non_overlapped_write_req(uv_pipe_t* handle) { + uv_write_t* req; + + if (handle->pipe.conn.non_overlapped_writes_tail) { + req = (uv_write_t*)handle->pipe.conn.non_overlapped_writes_tail->next_req; + + if (req == handle->pipe.conn.non_overlapped_writes_tail) { + handle->pipe.conn.non_overlapped_writes_tail = NULL; + } else { + handle->pipe.conn.non_overlapped_writes_tail->next_req = + req->next_req; + } + + return req; + } else { + /* queue empty */ + return NULL; + } +} + + +static void uv_queue_non_overlapped_write(uv_pipe_t* handle) { + uv_write_t* req = uv_remove_non_overlapped_write_req(handle); + if (req) { + if (!QueueUserWorkItem(&uv_pipe_writefile_thread_proc, + req, + WT_EXECUTELONGFUNCTION)) { + uv_fatal_error(GetLastError(), "QueueUserWorkItem"); + } + } +} + + +static int uv_pipe_write_impl(uv_loop_t* loop, + uv_write_t* req, + uv_pipe_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_stream_t* send_handle, + uv_write_cb cb) { + int err; + int result; + uv_tcp_t* tcp_send_handle; + uv_write_t* ipc_header_req = NULL; + uv_ipc_frame_uv_stream ipc_frame; + + if (nbufs != 1 && (nbufs != 0 || !send_handle)) { + return ERROR_NOT_SUPPORTED; + } + + /* Only TCP handles are supported for sharing. */ + if (send_handle && ((send_handle->type != UV_TCP) || + (!(send_handle->flags & UV_HANDLE_BOUND) && + !(send_handle->flags & UV_HANDLE_CONNECTION)))) { + return ERROR_NOT_SUPPORTED; + } + + assert(handle->handle != INVALID_HANDLE_VALUE); + + uv_req_init(loop, (uv_req_t*) req); + req->type = UV_WRITE; + req->handle = (uv_stream_t*) handle; + req->cb = cb; + req->ipc_header = 0; + req->event_handle = NULL; + req->wait_handle = INVALID_HANDLE_VALUE; + memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); + + if (handle->ipc) { + assert(!(handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE)); + ipc_frame.header.flags = 0; + + /* Use the IPC framing protocol. */ + if (send_handle) { + tcp_send_handle = (uv_tcp_t*)send_handle; + + if (handle->pipe.conn.ipc_pid == 0) { + handle->pipe.conn.ipc_pid = uv_current_pid(); + } + + err = uv_tcp_duplicate_socket(tcp_send_handle, handle->pipe.conn.ipc_pid, + &ipc_frame.socket_info_ex.socket_info); + if (err) { + return err; + } + + ipc_frame.socket_info_ex.delayed_error = tcp_send_handle->delayed_error; + + ipc_frame.header.flags |= UV_IPC_TCP_SERVER; + + if (tcp_send_handle->flags & UV_HANDLE_CONNECTION) { + ipc_frame.header.flags |= UV_IPC_TCP_CONNECTION; + } + } + + if (nbufs == 1) { + ipc_frame.header.flags |= UV_IPC_RAW_DATA; + ipc_frame.header.raw_data_length = bufs[0].len; + } + + /* + * Use the provided req if we're only doing a single write. + * If we're doing multiple writes, use ipc_header_write_req to do + * the first write, and then use the provided req for the second write. + */ + if (!(ipc_frame.header.flags & UV_IPC_RAW_DATA)) { + ipc_header_req = req; + } else { + /* + * Try to use the preallocated write req if it's available. + * Otherwise allocate a new one. + */ + if (handle->pipe.conn.ipc_header_write_req.type != UV_WRITE) { + ipc_header_req = (uv_write_t*)&handle->pipe.conn.ipc_header_write_req; + } else { + ipc_header_req = (uv_write_t*)uv__malloc(sizeof(uv_write_t)); + if (!ipc_header_req) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + } + + uv_req_init(loop, (uv_req_t*) ipc_header_req); + ipc_header_req->type = UV_WRITE; + ipc_header_req->handle = (uv_stream_t*) handle; + ipc_header_req->cb = NULL; + ipc_header_req->ipc_header = 1; + } + + /* Write the header or the whole frame. */ + memset(&ipc_header_req->u.io.overlapped, 0, + sizeof(ipc_header_req->u.io.overlapped)); + + /* Using overlapped IO, but wait for completion before returning. + This write is blocking because ipc_frame is on stack. */ + ipc_header_req->u.io.overlapped.hEvent = CreateEvent(NULL, 1, 0, NULL); + if (!ipc_header_req->u.io.overlapped.hEvent) { + uv_fatal_error(GetLastError(), "CreateEvent"); + } + + result = WriteFile(handle->handle, + &ipc_frame, + ipc_frame.header.flags & UV_IPC_TCP_SERVER ? + sizeof(ipc_frame) : sizeof(ipc_frame.header), + NULL, + &ipc_header_req->u.io.overlapped); + if (!result && GetLastError() != ERROR_IO_PENDING) { + err = GetLastError(); + CloseHandle(ipc_header_req->u.io.overlapped.hEvent); + return err; + } + + if (!result) { + /* Request not completed immediately. Wait for it.*/ + if (WaitForSingleObject(ipc_header_req->u.io.overlapped.hEvent, INFINITE) != + WAIT_OBJECT_0) { + err = GetLastError(); + CloseHandle(ipc_header_req->u.io.overlapped.hEvent); + return err; + } + } + ipc_header_req->u.io.queued_bytes = 0; + CloseHandle(ipc_header_req->u.io.overlapped.hEvent); + ipc_header_req->u.io.overlapped.hEvent = NULL; + + REGISTER_HANDLE_REQ(loop, handle, ipc_header_req); + handle->reqs_pending++; + handle->stream.conn.write_reqs_pending++; + + /* If we don't have any raw data to write - we're done. */ + if (!(ipc_frame.header.flags & UV_IPC_RAW_DATA)) { + return 0; + } + } + + if ((handle->flags & + (UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) == + (UV_HANDLE_BLOCKING_WRITES | UV_HANDLE_NON_OVERLAPPED_PIPE)) { + DWORD bytes; + result = WriteFile(handle->handle, + bufs[0].base, + bufs[0].len, + &bytes, + NULL); + + if (!result) { + err = GetLastError(); + return err; + } else { + /* Request completed immediately. */ + req->u.io.queued_bytes = 0; + } + + REGISTER_HANDLE_REQ(loop, handle, req); + handle->reqs_pending++; + handle->stream.conn.write_reqs_pending++; + POST_COMPLETION_FOR_REQ(loop, req); + return 0; + } else if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) { + req->write_buffer = bufs[0]; + uv_insert_non_overlapped_write_req(handle, req); + if (handle->stream.conn.write_reqs_pending == 0) { + uv_queue_non_overlapped_write(handle); + } + + /* Request queued by the kernel. */ + req->u.io.queued_bytes = bufs[0].len; + handle->write_queue_size += req->u.io.queued_bytes; + } else if (handle->flags & UV_HANDLE_BLOCKING_WRITES) { + /* Using overlapped IO, but wait for completion before returning */ + req->u.io.overlapped.hEvent = CreateEvent(NULL, 1, 0, NULL); + if (!req->u.io.overlapped.hEvent) { + uv_fatal_error(GetLastError(), "CreateEvent"); + } + + result = WriteFile(handle->handle, + bufs[0].base, + bufs[0].len, + NULL, + &req->u.io.overlapped); + + if (!result && GetLastError() != ERROR_IO_PENDING) { + err = GetLastError(); + CloseHandle(req->u.io.overlapped.hEvent); + return err; + } + + if (result) { + /* Request completed immediately. */ + req->u.io.queued_bytes = 0; + } else { + /* Request queued by the kernel. */ + req->u.io.queued_bytes = bufs[0].len; + handle->write_queue_size += req->u.io.queued_bytes; + if (WaitForSingleObject(req->u.io.overlapped.hEvent, INFINITE) != + WAIT_OBJECT_0) { + err = GetLastError(); + CloseHandle(req->u.io.overlapped.hEvent); + return uv_translate_sys_error(err); + } + } + CloseHandle(req->u.io.overlapped.hEvent); + + REGISTER_HANDLE_REQ(loop, handle, req); + handle->reqs_pending++; + handle->stream.conn.write_reqs_pending++; + return 0; + } else { + result = WriteFile(handle->handle, + bufs[0].base, + bufs[0].len, + NULL, + &req->u.io.overlapped); + + if (!result && GetLastError() != ERROR_IO_PENDING) { + return GetLastError(); + } + + if (result) { + /* Request completed immediately. */ + req->u.io.queued_bytes = 0; + } else { + /* Request queued by the kernel. */ + req->u.io.queued_bytes = bufs[0].len; + handle->write_queue_size += req->u.io.queued_bytes; + } + + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + req->event_handle = CreateEvent(NULL, 0, 0, NULL); + if (!req->event_handle) { + uv_fatal_error(GetLastError(), "CreateEvent"); + } + if (!RegisterWaitForSingleObject(&req->wait_handle, + req->u.io.overlapped.hEvent, post_completion_write_wait, (void*) req, + INFINITE, WT_EXECUTEINWAITTHREAD)) { + return GetLastError(); + } + } + } + + REGISTER_HANDLE_REQ(loop, handle, req); + handle->reqs_pending++; + handle->stream.conn.write_reqs_pending++; + + return 0; +} + + +int uv_pipe_write(uv_loop_t* loop, + uv_write_t* req, + uv_pipe_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_write_cb cb) { + return uv_pipe_write_impl(loop, req, handle, bufs, nbufs, NULL, cb); +} + + +int uv_pipe_write2(uv_loop_t* loop, + uv_write_t* req, + uv_pipe_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_stream_t* send_handle, + uv_write_cb cb) { + if (!handle->ipc) { + return WSAEINVAL; + } + + return uv_pipe_write_impl(loop, req, handle, bufs, nbufs, send_handle, cb); +} + + +static void uv_pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle, + uv_buf_t buf) { + /* If there is an eof timer running, we don't need it any more, */ + /* so discard it. */ + eof_timer_destroy(handle); + + handle->flags &= ~UV_HANDLE_READABLE; + uv_read_stop((uv_stream_t*) handle); + + handle->read_cb((uv_stream_t*) handle, UV_EOF, &buf); +} + + +static void uv_pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error, + uv_buf_t buf) { + /* If there is an eof timer running, we don't need it any more, */ + /* so discard it. */ + eof_timer_destroy(handle); + + uv_read_stop((uv_stream_t*) handle); + + handle->read_cb((uv_stream_t*)handle, uv_translate_sys_error(error), &buf); +} + + +static void uv_pipe_read_error_or_eof(uv_loop_t* loop, uv_pipe_t* handle, + int error, uv_buf_t buf) { + if (error == ERROR_BROKEN_PIPE) { + uv_pipe_read_eof(loop, handle, buf); + } else { + uv_pipe_read_error(loop, handle, error, buf); + } +} + + +void uv__pipe_insert_pending_socket(uv_pipe_t* handle, + uv__ipc_socket_info_ex* info, + int tcp_connection) { + uv__ipc_queue_item_t* item; + + item = (uv__ipc_queue_item_t*) uv__malloc(sizeof(*item)); + if (item == NULL) + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + + memcpy(&item->socket_info_ex, info, sizeof(item->socket_info_ex)); + item->tcp_connection = tcp_connection; + QUEUE_INSERT_TAIL(&handle->pipe.conn.pending_ipc_info.queue, &item->member); + handle->pipe.conn.pending_ipc_info.queue_len++; +} + + +void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle, + uv_req_t* req) { + DWORD bytes, avail; + uv_buf_t buf; + uv_ipc_frame_uv_stream ipc_frame; + + assert(handle->type == UV_NAMED_PIPE); + + handle->flags &= ~UV_HANDLE_READ_PENDING; + eof_timer_stop(handle); + + if (!REQ_SUCCESS(req)) { + /* An error occurred doing the 0-read. */ + if (handle->flags & UV_HANDLE_READING) { + uv_pipe_read_error_or_eof(loop, + handle, + GET_REQ_ERROR(req), + uv_null_buf_); + } + } else { + /* Do non-blocking reads until the buffer is empty */ + while (handle->flags & UV_HANDLE_READING) { + if (!PeekNamedPipe(handle->handle, + NULL, + 0, + NULL, + &avail, + NULL)) { + uv_pipe_read_error_or_eof(loop, handle, GetLastError(), uv_null_buf_); + break; + } + + if (avail == 0) { + /* There is nothing to read after all. */ + break; + } + + if (handle->ipc) { + /* Use the IPC framing protocol to read the incoming data. */ + if (handle->pipe.conn.remaining_ipc_rawdata_bytes == 0) { + /* We're reading a new frame. First, read the header. */ + assert(avail >= sizeof(ipc_frame.header)); + + if (!ReadFile(handle->handle, + &ipc_frame.header, + sizeof(ipc_frame.header), + &bytes, + NULL)) { + uv_pipe_read_error_or_eof(loop, handle, GetLastError(), + uv_null_buf_); + break; + } + + assert(bytes == sizeof(ipc_frame.header)); + assert(ipc_frame.header.flags <= (UV_IPC_TCP_SERVER | UV_IPC_RAW_DATA | + UV_IPC_TCP_CONNECTION)); + + if (ipc_frame.header.flags & UV_IPC_TCP_SERVER) { + assert(avail - sizeof(ipc_frame.header) >= + sizeof(ipc_frame.socket_info_ex)); + + /* Read the TCP socket info. */ + if (!ReadFile(handle->handle, + &ipc_frame.socket_info_ex, + sizeof(ipc_frame) - sizeof(ipc_frame.header), + &bytes, + NULL)) { + uv_pipe_read_error_or_eof(loop, handle, GetLastError(), + uv_null_buf_); + break; + } + + assert(bytes == sizeof(ipc_frame) - sizeof(ipc_frame.header)); + + /* Store the pending socket info. */ + uv__pipe_insert_pending_socket( + handle, + &ipc_frame.socket_info_ex, + ipc_frame.header.flags & UV_IPC_TCP_CONNECTION); + } + + if (ipc_frame.header.flags & UV_IPC_RAW_DATA) { + handle->pipe.conn.remaining_ipc_rawdata_bytes = + ipc_frame.header.raw_data_length; + continue; + } + } else { + avail = min(avail, (DWORD)handle->pipe.conn.remaining_ipc_rawdata_bytes); + } + } + + buf = uv_buf_init(NULL, 0); + handle->alloc_cb((uv_handle_t*) handle, avail, &buf); + if (buf.base == NULL || buf.len == 0) { + handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf); + break; + } + assert(buf.base != NULL); + + if (ReadFile(handle->handle, + buf.base, + min(buf.len, avail), + &bytes, + NULL)) { + /* Successful read */ + if (handle->ipc) { + assert(handle->pipe.conn.remaining_ipc_rawdata_bytes >= bytes); + handle->pipe.conn.remaining_ipc_rawdata_bytes = + handle->pipe.conn.remaining_ipc_rawdata_bytes - bytes; + } + handle->read_cb((uv_stream_t*)handle, bytes, &buf); + + /* Read again only if bytes == buf.len */ + if (bytes <= buf.len) { + break; + } + } else { + uv_pipe_read_error_or_eof(loop, handle, GetLastError(), buf); + break; + } + } + + /* Post another 0-read if still reading and not closing. */ + if ((handle->flags & UV_HANDLE_READING) && + !(handle->flags & UV_HANDLE_READ_PENDING)) { + uv_pipe_queue_read(loop, handle); + } + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle, + uv_write_t* req) { + int err; + + assert(handle->type == UV_NAMED_PIPE); + + assert(handle->write_queue_size >= req->u.io.queued_bytes); + handle->write_queue_size -= req->u.io.queued_bytes; + + UNREGISTER_HANDLE_REQ(loop, handle, req); + + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + if (req->wait_handle != INVALID_HANDLE_VALUE) { + UnregisterWait(req->wait_handle); + req->wait_handle = INVALID_HANDLE_VALUE; + } + if (req->event_handle) { + CloseHandle(req->event_handle); + req->event_handle = NULL; + } + } + + if (req->ipc_header) { + if (req == &handle->pipe.conn.ipc_header_write_req) { + req->type = UV_UNKNOWN_REQ; + } else { + uv__free(req); + } + } else { + if (req->cb) { + err = GET_REQ_ERROR(req); + req->cb(req, uv_translate_sys_error(err)); + } + } + + handle->stream.conn.write_reqs_pending--; + + if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE && + handle->pipe.conn.non_overlapped_writes_tail) { + assert(handle->stream.conn.write_reqs_pending > 0); + uv_queue_non_overlapped_write(handle); + } + + if (handle->stream.conn.shutdown_req != NULL && + handle->stream.conn.write_reqs_pending == 0) { + uv_want_endgame(loop, (uv_handle_t*)handle); + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle, + uv_req_t* raw_req) { + uv_pipe_accept_t* req = (uv_pipe_accept_t*) raw_req; + + assert(handle->type == UV_NAMED_PIPE); + + if (handle->flags & UV__HANDLE_CLOSING) { + /* The req->pipeHandle should be freed already in uv_pipe_cleanup(). */ + assert(req->pipeHandle == INVALID_HANDLE_VALUE); + DECREASE_PENDING_REQ_COUNT(handle); + return; + } + + if (REQ_SUCCESS(req)) { + assert(req->pipeHandle != INVALID_HANDLE_VALUE); + req->next_pending = handle->pipe.serv.pending_accepts; + handle->pipe.serv.pending_accepts = req; + + if (handle->stream.serv.connection_cb) { + handle->stream.serv.connection_cb((uv_stream_t*)handle, 0); + } + } else { + if (req->pipeHandle != INVALID_HANDLE_VALUE) { + CloseHandle(req->pipeHandle); + req->pipeHandle = INVALID_HANDLE_VALUE; + } + if (!(handle->flags & UV__HANDLE_CLOSING)) { + uv_pipe_queue_accept(loop, handle, req, FALSE); + } + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle, + uv_connect_t* req) { + int err; + + assert(handle->type == UV_NAMED_PIPE); + + UNREGISTER_HANDLE_REQ(loop, handle, req); + + if (req->cb) { + err = 0; + if (REQ_SUCCESS(req)) { + uv_pipe_connection_init(handle); + } else { + err = GET_REQ_ERROR(req); + } + req->cb(req, uv_translate_sys_error(err)); + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle, + uv_shutdown_t* req) { + assert(handle->type == UV_NAMED_PIPE); + + UNREGISTER_HANDLE_REQ(loop, handle, req); + + if (handle->flags & UV_HANDLE_READABLE) { + /* Initialize and optionally start the eof timer. Only do this if the */ + /* pipe is readable and we haven't seen EOF come in ourselves. */ + eof_timer_init(handle); + + /* If reading start the timer right now. */ + /* Otherwise uv_pipe_queue_read will start it. */ + if (handle->flags & UV_HANDLE_READ_PENDING) { + eof_timer_start(handle); + } + + } else { + /* This pipe is not readable. We can just close it to let the other end */ + /* know that we're done writing. */ + close_pipe(handle); + } + + if (req->cb) { + req->cb(req, 0); + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +static void eof_timer_init(uv_pipe_t* pipe) { + int r; + + assert(pipe->pipe.conn.eof_timer == NULL); + assert(pipe->flags & UV_HANDLE_CONNECTION); + + pipe->pipe.conn.eof_timer = (uv_timer_t*) uv__malloc(sizeof *pipe->pipe.conn.eof_timer); + + r = uv_timer_init(pipe->loop, pipe->pipe.conn.eof_timer); + assert(r == 0); /* timers can't fail */ + pipe->pipe.conn.eof_timer->data = pipe; + uv_unref((uv_handle_t*) pipe->pipe.conn.eof_timer); +} + + +static void eof_timer_start(uv_pipe_t* pipe) { + assert(pipe->flags & UV_HANDLE_CONNECTION); + + if (pipe->pipe.conn.eof_timer != NULL) { + uv_timer_start(pipe->pipe.conn.eof_timer, eof_timer_cb, eof_timeout, 0); + } +} + + +static void eof_timer_stop(uv_pipe_t* pipe) { + assert(pipe->flags & UV_HANDLE_CONNECTION); + + if (pipe->pipe.conn.eof_timer != NULL) { + uv_timer_stop(pipe->pipe.conn.eof_timer); + } +} + + +static void eof_timer_cb(uv_timer_t* timer) { + uv_pipe_t* pipe = (uv_pipe_t*) timer->data; + uv_loop_t* loop = timer->loop; + + assert(pipe->type == UV_NAMED_PIPE); + + /* This should always be true, since we start the timer only */ + /* in uv_pipe_queue_read after successfully calling ReadFile, */ + /* or in uv_process_pipe_shutdown_req if a read is pending, */ + /* and we always immediately stop the timer in */ + /* uv_process_pipe_read_req. */ + assert(pipe->flags & UV_HANDLE_READ_PENDING); + + /* If there are many packets coming off the iocp then the timer callback */ + /* may be called before the read request is coming off the queue. */ + /* Therefore we check here if the read request has completed but will */ + /* be processed later. */ + if ((pipe->flags & UV_HANDLE_READ_PENDING) && + HasOverlappedIoCompleted(&pipe->read_req.u.io.overlapped)) { + return; + } + + /* Force both ends off the pipe. */ + close_pipe(pipe); + + /* Stop reading, so the pending read that is going to fail will */ + /* not be reported to the user. */ + uv_read_stop((uv_stream_t*) pipe); + + /* Report the eof and update flags. This will get reported even if the */ + /* user stopped reading in the meantime. TODO: is that okay? */ + uv_pipe_read_eof(loop, pipe, uv_null_buf_); +} + + +static void eof_timer_destroy(uv_pipe_t* pipe) { + assert(pipe->flags & UV_HANDLE_CONNECTION); + + if (pipe->pipe.conn.eof_timer) { + uv_close((uv_handle_t*) pipe->pipe.conn.eof_timer, eof_timer_close_cb); + pipe->pipe.conn.eof_timer = NULL; + } +} + + +static void eof_timer_close_cb(uv_handle_t* handle) { + assert(handle->type == UV_TIMER); + uv__free(handle); +} + + +int uv_pipe_open(uv_pipe_t* pipe, uv_file file) { + HANDLE os_handle = uv__get_osfhandle(file); + NTSTATUS nt_status; + IO_STATUS_BLOCK io_status; + FILE_ACCESS_INFORMATION access; + DWORD duplex_flags = 0; + + if (os_handle == INVALID_HANDLE_VALUE) + return UV_EBADF; + + /* In order to avoid closing a stdio file descriptor 0-2, duplicate the + * underlying OS handle and forget about the original fd. + * We could also opt to use the original OS handle and just never close it, + * but then there would be no reliable way to cancel pending read operations + * upon close. + */ + if (file <= 2) { + if (!DuplicateHandle(INVALID_HANDLE_VALUE, + os_handle, + INVALID_HANDLE_VALUE, + &os_handle, + 0, + FALSE, + DUPLICATE_SAME_ACCESS)) + return uv_translate_sys_error(GetLastError()); + file = -1; + } + + /* Determine what kind of permissions we have on this handle. + * Cygwin opens the pipe in message mode, but we can support it, + * just query the access flags and set the stream flags accordingly. + */ + nt_status = pNtQueryInformationFile(os_handle, + &io_status, + &access, + sizeof(access), + FileAccessInformation); + if (nt_status != STATUS_SUCCESS) + return UV_EINVAL; + + if (pipe->ipc) { + if (!(access.AccessFlags & FILE_WRITE_DATA) || + !(access.AccessFlags & FILE_READ_DATA)) { + return UV_EINVAL; + } + } + + if (access.AccessFlags & FILE_WRITE_DATA) + duplex_flags |= UV_HANDLE_WRITABLE; + if (access.AccessFlags & FILE_READ_DATA) + duplex_flags |= UV_HANDLE_READABLE; + + if (os_handle == INVALID_HANDLE_VALUE || + uv_set_pipe_handle(pipe->loop, + pipe, + os_handle, + file, + duplex_flags) == -1) { + return UV_EINVAL; + } + + uv_pipe_connection_init(pipe); + + if (pipe->ipc) { + assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE)); + pipe->pipe.conn.ipc_pid = uv_parent_pid(); + assert(pipe->pipe.conn.ipc_pid != -1); + } + return 0; +} + + +static int uv__pipe_getname(const uv_pipe_t* handle, char* buffer, size_t* size) { + NTSTATUS nt_status; + IO_STATUS_BLOCK io_status; + FILE_NAME_INFORMATION tmp_name_info; + FILE_NAME_INFORMATION* name_info; + WCHAR* name_buf; + unsigned int addrlen; + unsigned int name_size; + unsigned int name_len; + int err; + + name_info = NULL; + + if (handle->handle == INVALID_HANDLE_VALUE) { + *size = 0; + return UV_EINVAL; + } + + uv__pipe_pause_read((uv_pipe_t*)handle); /* cast away const warning */ + + nt_status = pNtQueryInformationFile(handle->handle, + &io_status, + &tmp_name_info, + sizeof tmp_name_info, + FileNameInformation); + if (nt_status == STATUS_BUFFER_OVERFLOW) { + name_size = sizeof(*name_info) + tmp_name_info.FileNameLength; + name_info = uv__malloc(name_size); + if (!name_info) { + *size = 0; + err = UV_ENOMEM; + goto cleanup; + } + + nt_status = pNtQueryInformationFile(handle->handle, + &io_status, + name_info, + name_size, + FileNameInformation); + } + + if (nt_status != STATUS_SUCCESS) { + *size = 0; + err = uv_translate_sys_error(pRtlNtStatusToDosError(nt_status)); + goto error; + } + + if (!name_info) { + /* the struct on stack was used */ + name_buf = tmp_name_info.FileName; + name_len = tmp_name_info.FileNameLength; + } else { + name_buf = name_info->FileName; + name_len = name_info->FileNameLength; + } + + if (name_len == 0) { + *size = 0; + err = 0; + goto error; + } + + name_len /= sizeof(WCHAR); + + /* check how much space we need */ + addrlen = WideCharToMultiByte(CP_UTF8, + 0, + name_buf, + name_len, + NULL, + 0, + NULL, + NULL); + if (!addrlen) { + *size = 0; + err = uv_translate_sys_error(GetLastError()); + goto error; + } else if (pipe_prefix_len + addrlen >= *size) { + /* "\\\\.\\pipe" + name */ + *size = pipe_prefix_len + addrlen + 1; + err = UV_ENOBUFS; + goto error; + } + + memcpy(buffer, pipe_prefix, pipe_prefix_len); + addrlen = WideCharToMultiByte(CP_UTF8, + 0, + name_buf, + name_len, + buffer+pipe_prefix_len, + *size-pipe_prefix_len, + NULL, + NULL); + if (!addrlen) { + *size = 0; + err = uv_translate_sys_error(GetLastError()); + goto error; + } + + addrlen += pipe_prefix_len; + *size = addrlen; + buffer[addrlen] = '\0'; + + err = 0; + goto cleanup; + +error: + uv__free(name_info); + +cleanup: + uv__pipe_unpause_read((uv_pipe_t*)handle); /* cast away const warning */ + return err; +} + + +int uv_pipe_pending_count(uv_pipe_t* handle) { + if (!handle->ipc) + return 0; + return handle->pipe.conn.pending_ipc_info.queue_len; +} + + +int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) { + if (handle->flags & UV_HANDLE_BOUND) + return uv__pipe_getname(handle, buffer, size); + + if (handle->flags & UV_HANDLE_CONNECTION || + handle->handle != INVALID_HANDLE_VALUE) { + *size = 0; + return 0; + } + + return UV_EBADF; +} + + +int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) { + /* emulate unix behaviour */ + if (handle->flags & UV_HANDLE_BOUND) + return UV_ENOTCONN; + + if (handle->handle != INVALID_HANDLE_VALUE) + return uv__pipe_getname(handle, buffer, size); + + return UV_EBADF; +} + + +uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) { + if (!handle->ipc) + return UV_UNKNOWN_HANDLE; + if (handle->pipe.conn.pending_ipc_info.queue_len == 0) + return UV_UNKNOWN_HANDLE; + else + return UV_TCP; +} diff --git a/deps/libuv/src/win/poll.c b/deps/libuv/src/win/poll.c new file mode 100644 index 00000000..d479e521 --- /dev/null +++ b/deps/libuv/src/win/poll.c @@ -0,0 +1,646 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" +#include "req-inl.h" + + +static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = { + {0xe70f1aa0, 0xab8b, 0x11cf, + {0x8c, 0xa3, 0x00, 0x80, 0x5f, 0x48, 0xa1, 0x92}}, + {0xf9eab0c0, 0x26d4, 0x11d0, + {0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}}, + {0x9fc48064, 0x7298, 0x43e4, + {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}} +}; + +typedef struct uv_single_fd_set_s { + unsigned int fd_count; + SOCKET fd_array[1]; +} uv_single_fd_set_t; + + +static OVERLAPPED overlapped_dummy_; +static uv_once_t overlapped_dummy_init_guard_ = UV_ONCE_INIT; + +static AFD_POLL_INFO afd_poll_info_dummy_; + + +static void uv__init_overlapped_dummy(void) { + HANDLE event; + + event = CreateEvent(NULL, TRUE, TRUE, NULL); + if (event == NULL) + uv_fatal_error(GetLastError(), "CreateEvent"); + + memset(&overlapped_dummy_, 0, sizeof overlapped_dummy_); + overlapped_dummy_.hEvent = (HANDLE) ((uintptr_t) event | 1); +} + + +static OVERLAPPED* uv__get_overlapped_dummy() { + uv_once(&overlapped_dummy_init_guard_, uv__init_overlapped_dummy); + return &overlapped_dummy_; +} + + +static AFD_POLL_INFO* uv__get_afd_poll_info_dummy() { + return &afd_poll_info_dummy_; +} + + +static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) { + uv_req_t* req; + AFD_POLL_INFO* afd_poll_info; + DWORD result; + + /* Find a yet unsubmitted req to submit. */ + if (handle->submitted_events_1 == 0) { + req = &handle->poll_req_1; + afd_poll_info = &handle->afd_poll_info_1; + handle->submitted_events_1 = handle->events; + handle->mask_events_1 = 0; + handle->mask_events_2 = handle->events; + } else if (handle->submitted_events_2 == 0) { + req = &handle->poll_req_2; + afd_poll_info = &handle->afd_poll_info_2; + handle->submitted_events_2 = handle->events; + handle->mask_events_1 = handle->events; + handle->mask_events_2 = 0; + } else { + /* Just wait until there's an unsubmitted req. */ + /* This will happen almost immediately as one of the 2 outstanding */ + /* requests is about to return. When this happens, */ + /* uv__fast_poll_process_poll_req will be called, and the pending */ + /* events, if needed, will be processed in a subsequent request. */ + return; + } + + /* Setting Exclusive to TRUE makes the other poll request return if there */ + /* is any. */ + afd_poll_info->Exclusive = TRUE; + afd_poll_info->NumberOfHandles = 1; + afd_poll_info->Timeout.QuadPart = INT64_MAX; + afd_poll_info->Handles[0].Handle = (HANDLE) handle->socket; + afd_poll_info->Handles[0].Status = 0; + afd_poll_info->Handles[0].Events = 0; + + if (handle->events & UV_READABLE) { + afd_poll_info->Handles[0].Events |= AFD_POLL_RECEIVE | + AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT; + } else { + if (handle->events & UV_DISCONNECT) { + afd_poll_info->Handles[0].Events |= AFD_POLL_DISCONNECT; + } + } + if (handle->events & UV_WRITABLE) { + afd_poll_info->Handles[0].Events |= AFD_POLL_SEND | AFD_POLL_CONNECT_FAIL; + } + + memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped); + + result = uv_msafd_poll((SOCKET) handle->peer_socket, + afd_poll_info, + afd_poll_info, + &req->u.io.overlapped); + if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) { + /* Queue this req, reporting an error. */ + SET_REQ_ERROR(req, WSAGetLastError()); + uv_insert_pending_req(loop, req); + } +} + + +static int uv__fast_poll_cancel_poll_req(uv_loop_t* loop, uv_poll_t* handle) { + AFD_POLL_INFO afd_poll_info; + DWORD result; + + afd_poll_info.Exclusive = TRUE; + afd_poll_info.NumberOfHandles = 1; + afd_poll_info.Timeout.QuadPart = INT64_MAX; + afd_poll_info.Handles[0].Handle = (HANDLE) handle->socket; + afd_poll_info.Handles[0].Status = 0; + afd_poll_info.Handles[0].Events = AFD_POLL_ALL; + + result = uv_msafd_poll(handle->socket, + &afd_poll_info, + uv__get_afd_poll_info_dummy(), + uv__get_overlapped_dummy()); + + if (result == SOCKET_ERROR) { + DWORD error = WSAGetLastError(); + if (error != WSA_IO_PENDING) + return error; + } + + return 0; +} + + +static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, + uv_req_t* req) { + unsigned char mask_events; + AFD_POLL_INFO* afd_poll_info; + + if (req == &handle->poll_req_1) { + afd_poll_info = &handle->afd_poll_info_1; + handle->submitted_events_1 = 0; + mask_events = handle->mask_events_1; + } else if (req == &handle->poll_req_2) { + afd_poll_info = &handle->afd_poll_info_2; + handle->submitted_events_2 = 0; + mask_events = handle->mask_events_2; + } else { + assert(0); + return; + } + + /* Report an error unless the select was just interrupted. */ + if (!REQ_SUCCESS(req)) { + DWORD error = GET_REQ_SOCK_ERROR(req); + if (error != WSAEINTR && handle->events != 0) { + handle->events = 0; /* Stop the watcher */ + handle->poll_cb(handle, uv_translate_sys_error(error), 0); + } + + } else if (afd_poll_info->NumberOfHandles >= 1) { + unsigned char events = 0; + + if ((afd_poll_info->Handles[0].Events & (AFD_POLL_RECEIVE | + AFD_POLL_DISCONNECT | AFD_POLL_ACCEPT | AFD_POLL_ABORT)) != 0) { + events |= UV_READABLE; + if ((afd_poll_info->Handles[0].Events & AFD_POLL_DISCONNECT) != 0) { + events |= UV_DISCONNECT; + } + } + if ((afd_poll_info->Handles[0].Events & (AFD_POLL_SEND | + AFD_POLL_CONNECT_FAIL)) != 0) { + events |= UV_WRITABLE; + } + + events &= handle->events & ~mask_events; + + if (afd_poll_info->Handles[0].Events & AFD_POLL_LOCAL_CLOSE) { + /* Stop polling. */ + handle->events = 0; + if (uv__is_active(handle)) + uv__handle_stop(handle); + } + + if (events != 0) { + handle->poll_cb(handle, 0, events); + } + } + + if ((handle->events & ~(handle->submitted_events_1 | + handle->submitted_events_2)) != 0) { + uv__fast_poll_submit_poll_req(loop, handle); + } else if ((handle->flags & UV__HANDLE_CLOSING) && + handle->submitted_events_1 == 0 && + handle->submitted_events_2 == 0) { + uv_want_endgame(loop, (uv_handle_t*) handle); + } +} + + +static int uv__fast_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) { + assert(handle->type == UV_POLL); + assert(!(handle->flags & UV__HANDLE_CLOSING)); + assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0); + + handle->events = events; + + if (handle->events != 0) { + uv__handle_start(handle); + } else { + uv__handle_stop(handle); + } + + if ((handle->events & ~(handle->submitted_events_1 | + handle->submitted_events_2)) != 0) { + uv__fast_poll_submit_poll_req(handle->loop, handle); + } + + return 0; +} + + +static int uv__fast_poll_close(uv_loop_t* loop, uv_poll_t* handle) { + handle->events = 0; + uv__handle_closing(handle); + + if (handle->submitted_events_1 == 0 && + handle->submitted_events_2 == 0) { + uv_want_endgame(loop, (uv_handle_t*) handle); + return 0; + } else { + /* Cancel outstanding poll requests by executing another, unique poll */ + /* request that forces the outstanding ones to return. */ + return uv__fast_poll_cancel_poll_req(loop, handle); + } +} + + +static SOCKET uv__fast_poll_create_peer_socket(HANDLE iocp, + WSAPROTOCOL_INFOW* protocol_info) { + SOCKET sock = 0; + + sock = WSASocketW(protocol_info->iAddressFamily, + protocol_info->iSocketType, + protocol_info->iProtocol, + protocol_info, + 0, + WSA_FLAG_OVERLAPPED); + if (sock == INVALID_SOCKET) { + return INVALID_SOCKET; + } + + if (!SetHandleInformation((HANDLE) sock, HANDLE_FLAG_INHERIT, 0)) { + goto error; + }; + + if (CreateIoCompletionPort((HANDLE) sock, + iocp, + (ULONG_PTR) sock, + 0) == NULL) { + goto error; + } + + return sock; + + error: + closesocket(sock); + return INVALID_SOCKET; +} + + +static SOCKET uv__fast_poll_get_peer_socket(uv_loop_t* loop, + WSAPROTOCOL_INFOW* protocol_info) { + int index, i; + SOCKET peer_socket; + + index = -1; + for (i = 0; (size_t) i < ARRAY_SIZE(uv_msafd_provider_ids); i++) { + if (memcmp((void*) &protocol_info->ProviderId, + (void*) &uv_msafd_provider_ids[i], + sizeof protocol_info->ProviderId) == 0) { + index = i; + } + } + + /* Check if the protocol uses an msafd socket. */ + if (index < 0) { + return INVALID_SOCKET; + } + + /* If we didn't (try) to create a peer socket yet, try to make one. Don't */ + /* try again if the peer socket creation failed earlier for the same */ + /* protocol. */ + peer_socket = loop->poll_peer_sockets[index]; + if (peer_socket == 0) { + peer_socket = uv__fast_poll_create_peer_socket(loop->iocp, protocol_info); + loop->poll_peer_sockets[index] = peer_socket; + } + + return peer_socket; +} + + +static DWORD WINAPI uv__slow_poll_thread_proc(void* arg) { + uv_req_t* req = (uv_req_t*) arg; + uv_poll_t* handle = (uv_poll_t*) req->data; + unsigned char reported_events; + int r; + uv_single_fd_set_t rfds, wfds, efds; + struct timeval timeout; + + assert(handle->type == UV_POLL); + assert(req->type == UV_POLL_REQ); + + if (handle->events & UV_READABLE) { + rfds.fd_count = 1; + rfds.fd_array[0] = handle->socket; + } else { + rfds.fd_count = 0; + } + + if (handle->events & UV_WRITABLE) { + wfds.fd_count = 1; + wfds.fd_array[0] = handle->socket; + efds.fd_count = 1; + efds.fd_array[0] = handle->socket; + } else { + wfds.fd_count = 0; + efds.fd_count = 0; + } + + /* Make the select() time out after 3 minutes. If select() hangs because */ + /* the user closed the socket, we will at least not hang indefinitely. */ + timeout.tv_sec = 3 * 60; + timeout.tv_usec = 0; + + r = select(1, (fd_set*) &rfds, (fd_set*) &wfds, (fd_set*) &efds, &timeout); + if (r == SOCKET_ERROR) { + /* Queue this req, reporting an error. */ + SET_REQ_ERROR(&handle->poll_req_1, WSAGetLastError()); + POST_COMPLETION_FOR_REQ(handle->loop, req); + return 0; + } + + reported_events = 0; + + if (r > 0) { + if (rfds.fd_count > 0) { + assert(rfds.fd_count == 1); + assert(rfds.fd_array[0] == handle->socket); + reported_events |= UV_READABLE; + } + + if (wfds.fd_count > 0) { + assert(wfds.fd_count == 1); + assert(wfds.fd_array[0] == handle->socket); + reported_events |= UV_WRITABLE; + } else if (efds.fd_count > 0) { + assert(efds.fd_count == 1); + assert(efds.fd_array[0] == handle->socket); + reported_events |= UV_WRITABLE; + } + } + + SET_REQ_SUCCESS(req); + req->u.io.overlapped.InternalHigh = (DWORD) reported_events; + POST_COMPLETION_FOR_REQ(handle->loop, req); + + return 0; +} + + +static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) { + uv_req_t* req; + + /* Find a yet unsubmitted req to submit. */ + if (handle->submitted_events_1 == 0) { + req = &handle->poll_req_1; + handle->submitted_events_1 = handle->events; + handle->mask_events_1 = 0; + handle->mask_events_2 = handle->events; + } else if (handle->submitted_events_2 == 0) { + req = &handle->poll_req_2; + handle->submitted_events_2 = handle->events; + handle->mask_events_1 = handle->events; + handle->mask_events_2 = 0; + } else { + assert(0); + return; + } + + if (!QueueUserWorkItem(uv__slow_poll_thread_proc, + (void*) req, + WT_EXECUTELONGFUNCTION)) { + /* Make this req pending, reporting an error. */ + SET_REQ_ERROR(req, GetLastError()); + uv_insert_pending_req(loop, req); + } +} + + + +static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, + uv_req_t* req) { + unsigned char mask_events; + int err; + + if (req == &handle->poll_req_1) { + handle->submitted_events_1 = 0; + mask_events = handle->mask_events_1; + } else if (req == &handle->poll_req_2) { + handle->submitted_events_2 = 0; + mask_events = handle->mask_events_2; + } else { + assert(0); + return; + } + + if (!REQ_SUCCESS(req)) { + /* Error. */ + if (handle->events != 0) { + err = GET_REQ_ERROR(req); + handle->events = 0; /* Stop the watcher */ + handle->poll_cb(handle, uv_translate_sys_error(err), 0); + } + } else { + /* Got some events. */ + int events = req->u.io.overlapped.InternalHigh & handle->events & ~mask_events; + if (events != 0) { + handle->poll_cb(handle, 0, events); + } + } + + if ((handle->events & ~(handle->submitted_events_1 | + handle->submitted_events_2)) != 0) { + uv__slow_poll_submit_poll_req(loop, handle); + } else if ((handle->flags & UV__HANDLE_CLOSING) && + handle->submitted_events_1 == 0 && + handle->submitted_events_2 == 0) { + uv_want_endgame(loop, (uv_handle_t*) handle); + } +} + + +static int uv__slow_poll_set(uv_loop_t* loop, uv_poll_t* handle, int events) { + assert(handle->type == UV_POLL); + assert(!(handle->flags & UV__HANDLE_CLOSING)); + assert((events & ~(UV_READABLE | UV_WRITABLE)) == 0); + + handle->events = events; + + if (handle->events != 0) { + uv__handle_start(handle); + } else { + uv__handle_stop(handle); + } + + if ((handle->events & + ~(handle->submitted_events_1 | handle->submitted_events_2)) != 0) { + uv__slow_poll_submit_poll_req(handle->loop, handle); + } + + return 0; +} + + +static int uv__slow_poll_close(uv_loop_t* loop, uv_poll_t* handle) { + handle->events = 0; + uv__handle_closing(handle); + + if (handle->submitted_events_1 == 0 && + handle->submitted_events_2 == 0) { + uv_want_endgame(loop, (uv_handle_t*) handle); + } + + return 0; +} + + +int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) { + return uv_poll_init_socket(loop, handle, (SOCKET) uv__get_osfhandle(fd)); +} + + +int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle, + uv_os_sock_t socket) { + WSAPROTOCOL_INFOW protocol_info; + int len; + SOCKET peer_socket, base_socket; + DWORD bytes; + DWORD yes = 1; + + /* Set the socket to nonblocking mode */ + if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) + return uv_translate_sys_error(WSAGetLastError()); + + /* Try to obtain a base handle for the socket. This increases this chances */ + /* that we find an AFD handle and are able to use the fast poll mechanism. */ + /* This will always fail on windows XP/2k3, since they don't support the */ + /* SIO_BASE_HANDLE ioctl. */ +#ifndef NDEBUG + base_socket = INVALID_SOCKET; +#endif + + if (WSAIoctl(socket, + SIO_BASE_HANDLE, + NULL, + 0, + &base_socket, + sizeof base_socket, + &bytes, + NULL, + NULL) == 0) { + assert(base_socket != 0 && base_socket != INVALID_SOCKET); + socket = base_socket; + } + + uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL); + handle->socket = socket; + handle->events = 0; + + /* Obtain protocol information about the socket. */ + len = sizeof protocol_info; + if (getsockopt(socket, + SOL_SOCKET, + SO_PROTOCOL_INFOW, + (char*) &protocol_info, + &len) != 0) { + return uv_translate_sys_error(WSAGetLastError()); + } + + /* Get the peer socket that is needed to enable fast poll. If the returned */ + /* value is NULL, the protocol is not implemented by MSAFD and we'll have */ + /* to use slow mode. */ + peer_socket = uv__fast_poll_get_peer_socket(loop, &protocol_info); + + if (peer_socket != INVALID_SOCKET) { + /* Initialize fast poll specific fields. */ + handle->peer_socket = peer_socket; + } else { + /* Initialize slow poll specific fields. */ + handle->flags |= UV_HANDLE_POLL_SLOW; + } + + /* Initialize 2 poll reqs. */ + handle->submitted_events_1 = 0; + uv_req_init(loop, (uv_req_t*) &(handle->poll_req_1)); + handle->poll_req_1.type = UV_POLL_REQ; + handle->poll_req_1.data = handle; + + handle->submitted_events_2 = 0; + uv_req_init(loop, (uv_req_t*) &(handle->poll_req_2)); + handle->poll_req_2.type = UV_POLL_REQ; + handle->poll_req_2.data = handle; + + return 0; +} + + +int uv_poll_start(uv_poll_t* handle, int events, uv_poll_cb cb) { + int err; + + if (!(handle->flags & UV_HANDLE_POLL_SLOW)) { + err = uv__fast_poll_set(handle->loop, handle, events); + } else { + err = uv__slow_poll_set(handle->loop, handle, events); + } + + if (err) { + return uv_translate_sys_error(err); + } + + handle->poll_cb = cb; + + return 0; +} + + +int uv_poll_stop(uv_poll_t* handle) { + int err; + + if (!(handle->flags & UV_HANDLE_POLL_SLOW)) { + err = uv__fast_poll_set(handle->loop, handle, 0); + } else { + err = uv__slow_poll_set(handle->loop, handle, 0); + } + + return uv_translate_sys_error(err); +} + + +void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) { + if (!(handle->flags & UV_HANDLE_POLL_SLOW)) { + uv__fast_poll_process_poll_req(loop, handle, req); + } else { + uv__slow_poll_process_poll_req(loop, handle, req); + } +} + + +int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) { + if (!(handle->flags & UV_HANDLE_POLL_SLOW)) { + return uv__fast_poll_close(loop, handle); + } else { + return uv__slow_poll_close(loop, handle); + } +} + + +void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) { + assert(handle->flags & UV__HANDLE_CLOSING); + assert(!(handle->flags & UV_HANDLE_CLOSED)); + + assert(handle->submitted_events_1 == 0); + assert(handle->submitted_events_2 == 0); + + uv__handle_close(handle); +} diff --git a/deps/libuv/src/win/process-stdio.c b/deps/libuv/src/win/process-stdio.c new file mode 100644 index 00000000..e3c06f57 --- /dev/null +++ b/deps/libuv/src/win/process-stdio.c @@ -0,0 +1,510 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" + + +/* + * The `child_stdio_buffer` buffer has the following layout: + * int number_of_fds + * unsigned char crt_flags[number_of_fds] + * HANDLE os_handle[number_of_fds] + */ +#define CHILD_STDIO_SIZE(count) \ + (sizeof(int) + \ + sizeof(unsigned char) * (count) + \ + sizeof(uintptr_t) * (count)) + +#define CHILD_STDIO_COUNT(buffer) \ + *((unsigned int*) (buffer)) + +#define CHILD_STDIO_CRT_FLAGS(buffer, fd) \ + *((unsigned char*) (buffer) + sizeof(int) + fd) + +#define CHILD_STDIO_HANDLE(buffer, fd) \ + *((HANDLE*) ((unsigned char*) (buffer) + \ + sizeof(int) + \ + sizeof(unsigned char) * \ + CHILD_STDIO_COUNT((buffer)) + \ + sizeof(HANDLE) * (fd))) + + +/* CRT file descriptor mode flags */ +#define FOPEN 0x01 +#define FEOFLAG 0x02 +#define FCRLF 0x04 +#define FPIPE 0x08 +#define FNOINHERIT 0x10 +#define FAPPEND 0x20 +#define FDEV 0x40 +#define FTEXT 0x80 + + +/* + * Clear the HANDLE_FLAG_INHERIT flag from all HANDLEs that were inherited + * the parent process. Don't check for errors - the stdio handles may not be + * valid, or may be closed already. There is no guarantee that this function + * does a perfect job. + */ +void uv_disable_stdio_inheritance(void) { + HANDLE handle; + STARTUPINFOW si; + + /* Make the windows stdio handles non-inheritable. */ + handle = GetStdHandle(STD_INPUT_HANDLE); + if (handle != NULL && handle != INVALID_HANDLE_VALUE) + SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0); + + handle = GetStdHandle(STD_OUTPUT_HANDLE); + if (handle != NULL && handle != INVALID_HANDLE_VALUE) + SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0); + + handle = GetStdHandle(STD_ERROR_HANDLE); + if (handle != NULL && handle != INVALID_HANDLE_VALUE) + SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0); + + /* Make inherited CRT FDs non-inheritable. */ + GetStartupInfoW(&si); + if (uv__stdio_verify(si.lpReserved2, si.cbReserved2)) + uv__stdio_noinherit(si.lpReserved2); +} + + +static int uv__create_stdio_pipe_pair(uv_loop_t* loop, + uv_pipe_t* server_pipe, HANDLE* child_pipe_ptr, unsigned int flags) { + char pipe_name[64]; + SECURITY_ATTRIBUTES sa; + DWORD server_access = 0; + DWORD client_access = 0; + HANDLE child_pipe = INVALID_HANDLE_VALUE; + int err; + + if (flags & UV_READABLE_PIPE) { + /* The server needs inbound access too, otherwise CreateNamedPipe() */ + /* won't give us the FILE_READ_ATTRIBUTES permission. We need that to */ + /* probe the state of the write buffer when we're trying to shutdown */ + /* the pipe. */ + server_access |= PIPE_ACCESS_OUTBOUND | PIPE_ACCESS_INBOUND; + client_access |= GENERIC_READ | FILE_WRITE_ATTRIBUTES; + } + if (flags & UV_WRITABLE_PIPE) { + server_access |= PIPE_ACCESS_INBOUND; + client_access |= GENERIC_WRITE | FILE_READ_ATTRIBUTES; + } + + /* Create server pipe handle. */ + err = uv_stdio_pipe_server(loop, + server_pipe, + server_access, + pipe_name, + sizeof(pipe_name)); + if (err) + goto error; + + /* Create child pipe handle. */ + sa.nLength = sizeof sa; + sa.lpSecurityDescriptor = NULL; + sa.bInheritHandle = TRUE; + + child_pipe = CreateFileA(pipe_name, + client_access, + 0, + &sa, + OPEN_EXISTING, + server_pipe->ipc ? FILE_FLAG_OVERLAPPED : 0, + NULL); + if (child_pipe == INVALID_HANDLE_VALUE) { + err = GetLastError(); + goto error; + } + +#ifndef NDEBUG + /* Validate that the pipe was opened in the right mode. */ + { + DWORD mode; + BOOL r = GetNamedPipeHandleState(child_pipe, + &mode, + NULL, + NULL, + NULL, + NULL, + 0); + assert(r == TRUE); + assert(mode == (PIPE_READMODE_BYTE | PIPE_WAIT)); + } +#endif + + /* Do a blocking ConnectNamedPipe. This should not block because we have */ + /* both ends of the pipe created. */ + if (!ConnectNamedPipe(server_pipe->handle, NULL)) { + if (GetLastError() != ERROR_PIPE_CONNECTED) { + err = GetLastError(); + goto error; + } + } + + /* The server end is now readable and/or writable. */ + if (flags & UV_READABLE_PIPE) + server_pipe->flags |= UV_HANDLE_WRITABLE; + if (flags & UV_WRITABLE_PIPE) + server_pipe->flags |= UV_HANDLE_READABLE; + + *child_pipe_ptr = child_pipe; + return 0; + + error: + if (server_pipe->handle != INVALID_HANDLE_VALUE) { + uv_pipe_cleanup(loop, server_pipe); + } + + if (child_pipe != INVALID_HANDLE_VALUE) { + CloseHandle(child_pipe); + } + + return err; +} + + +static int uv__duplicate_handle(uv_loop_t* loop, HANDLE handle, HANDLE* dup) { + HANDLE current_process; + + + /* _get_osfhandle will sometimes return -2 in case of an error. This seems */ + /* to happen when fd <= 2 and the process' corresponding stdio handle is */ + /* set to NULL. Unfortunately DuplicateHandle will happily duplicate */ + /* (HANDLE) -2, so this situation goes unnoticed until someone tries to */ + /* use the duplicate. Therefore we filter out known-invalid handles here. */ + if (handle == INVALID_HANDLE_VALUE || + handle == NULL || + handle == (HANDLE) -2) { + *dup = INVALID_HANDLE_VALUE; + return ERROR_INVALID_HANDLE; + } + + current_process = GetCurrentProcess(); + + if (!DuplicateHandle(current_process, + handle, + current_process, + dup, + 0, + TRUE, + DUPLICATE_SAME_ACCESS)) { + *dup = INVALID_HANDLE_VALUE; + return GetLastError(); + } + + return 0; +} + + +static int uv__duplicate_fd(uv_loop_t* loop, int fd, HANDLE* dup) { + HANDLE handle; + + if (fd == -1) { + *dup = INVALID_HANDLE_VALUE; + return ERROR_INVALID_HANDLE; + } + + handle = uv__get_osfhandle(fd); + return uv__duplicate_handle(loop, handle, dup); +} + + +int uv__create_nul_handle(HANDLE* handle_ptr, + DWORD access) { + HANDLE handle; + SECURITY_ATTRIBUTES sa; + + sa.nLength = sizeof sa; + sa.lpSecurityDescriptor = NULL; + sa.bInheritHandle = TRUE; + + handle = CreateFileW(L"NUL", + access, + FILE_SHARE_READ | FILE_SHARE_WRITE, + &sa, + OPEN_EXISTING, + 0, + NULL); + if (handle == INVALID_HANDLE_VALUE) { + return GetLastError(); + } + + *handle_ptr = handle; + return 0; +} + + +int uv__stdio_create(uv_loop_t* loop, + const uv_process_options_t* options, + BYTE** buffer_ptr) { + BYTE* buffer; + int count, i; + int err; + + count = options->stdio_count; + + if (count < 0 || count > 255) { + /* Only support FDs 0-255 */ + return ERROR_NOT_SUPPORTED; + } else if (count < 3) { + /* There should always be at least 3 stdio handles. */ + count = 3; + } + + /* Allocate the child stdio buffer */ + buffer = (BYTE*) uv__malloc(CHILD_STDIO_SIZE(count)); + if (buffer == NULL) { + return ERROR_OUTOFMEMORY; + } + + /* Prepopulate the buffer with INVALID_HANDLE_VALUE handles so we can */ + /* clean up on failure. */ + CHILD_STDIO_COUNT(buffer) = count; + for (i = 0; i < count; i++) { + CHILD_STDIO_CRT_FLAGS(buffer, i) = 0; + CHILD_STDIO_HANDLE(buffer, i) = INVALID_HANDLE_VALUE; + } + + for (i = 0; i < count; i++) { + uv_stdio_container_t fdopt; + if (i < options->stdio_count) { + fdopt = options->stdio[i]; + } else { + fdopt.flags = UV_IGNORE; + } + + switch (fdopt.flags & (UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD | + UV_INHERIT_STREAM)) { + case UV_IGNORE: + /* Starting a process with no stdin/stout/stderr can confuse it. */ + /* So no matter what the user specified, we make sure the first */ + /* three FDs are always open in their typical modes, e.g. stdin */ + /* be readable and stdout/err should be writable. For FDs > 2, don't */ + /* do anything - all handles in the stdio buffer are initialized with */ + /* INVALID_HANDLE_VALUE, which should be okay. */ + if (i <= 2) { + DWORD access = (i == 0) ? FILE_GENERIC_READ : + FILE_GENERIC_WRITE | FILE_READ_ATTRIBUTES; + + err = uv__create_nul_handle(&CHILD_STDIO_HANDLE(buffer, i), + access); + if (err) + goto error; + + CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV; + } + break; + + case UV_CREATE_PIPE: { + /* Create a pair of two connected pipe ends; one end is turned into */ + /* an uv_pipe_t for use by the parent. The other one is given to */ + /* the child. */ + uv_pipe_t* parent_pipe = (uv_pipe_t*) fdopt.data.stream; + HANDLE child_pipe = INVALID_HANDLE_VALUE; + + /* Create a new, connected pipe pair. stdio[i].stream should point */ + /* to an uninitialized, but not connected pipe handle. */ + assert(fdopt.data.stream->type == UV_NAMED_PIPE); + assert(!(fdopt.data.stream->flags & UV_HANDLE_CONNECTION)); + assert(!(fdopt.data.stream->flags & UV_HANDLE_PIPESERVER)); + + err = uv__create_stdio_pipe_pair(loop, + parent_pipe, + &child_pipe, + fdopt.flags); + if (err) + goto error; + + CHILD_STDIO_HANDLE(buffer, i) = child_pipe; + CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE; + break; + } + + case UV_INHERIT_FD: { + /* Inherit a raw FD. */ + HANDLE child_handle; + + /* Make an inheritable duplicate of the handle. */ + err = uv__duplicate_fd(loop, fdopt.data.fd, &child_handle); + if (err) { + /* If fdopt.data.fd is not valid and fd fd <= 2, then ignore the */ + /* error. */ + if (fdopt.data.fd <= 2 && err == ERROR_INVALID_HANDLE) { + CHILD_STDIO_CRT_FLAGS(buffer, i) = 0; + CHILD_STDIO_HANDLE(buffer, i) = INVALID_HANDLE_VALUE; + break; + } + goto error; + } + + /* Figure out what the type is. */ + switch (GetFileType(child_handle)) { + case FILE_TYPE_DISK: + CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN; + break; + + case FILE_TYPE_PIPE: + CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FPIPE; + + case FILE_TYPE_CHAR: + case FILE_TYPE_REMOTE: + CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV; + break; + + case FILE_TYPE_UNKNOWN: + if (GetLastError() != 0) { + err = GetLastError(); + CloseHandle(child_handle); + goto error; + } + CHILD_STDIO_CRT_FLAGS(buffer, i) = FOPEN | FDEV; + break; + + default: + assert(0); + return -1; + } + + CHILD_STDIO_HANDLE(buffer, i) = child_handle; + break; + } + + case UV_INHERIT_STREAM: { + /* Use an existing stream as the stdio handle for the child. */ + HANDLE stream_handle, child_handle; + unsigned char crt_flags; + uv_stream_t* stream = fdopt.data.stream; + + /* Leech the handle out of the stream. */ + if (stream->type == UV_TTY) { + stream_handle = ((uv_tty_t*) stream)->handle; + crt_flags = FOPEN | FDEV; + } else if (stream->type == UV_NAMED_PIPE && + stream->flags & UV_HANDLE_CONNECTION) { + stream_handle = ((uv_pipe_t*) stream)->handle; + crt_flags = FOPEN | FPIPE; + } else { + stream_handle = INVALID_HANDLE_VALUE; + crt_flags = 0; + } + + if (stream_handle == NULL || + stream_handle == INVALID_HANDLE_VALUE) { + /* The handle is already closed, or not yet created, or the */ + /* stream type is not supported. */ + err = ERROR_NOT_SUPPORTED; + goto error; + } + + /* Make an inheritable copy of the handle. */ + err = uv__duplicate_handle(loop, stream_handle, &child_handle); + if (err) + goto error; + + CHILD_STDIO_HANDLE(buffer, i) = child_handle; + CHILD_STDIO_CRT_FLAGS(buffer, i) = crt_flags; + break; + } + + default: + assert(0); + return -1; + } + } + + *buffer_ptr = buffer; + return 0; + + error: + uv__stdio_destroy(buffer); + return err; +} + + +void uv__stdio_destroy(BYTE* buffer) { + int i, count; + + count = CHILD_STDIO_COUNT(buffer); + for (i = 0; i < count; i++) { + HANDLE handle = CHILD_STDIO_HANDLE(buffer, i); + if (handle != INVALID_HANDLE_VALUE) { + CloseHandle(handle); + } + } + + uv__free(buffer); +} + + +void uv__stdio_noinherit(BYTE* buffer) { + int i, count; + + count = CHILD_STDIO_COUNT(buffer); + for (i = 0; i < count; i++) { + HANDLE handle = CHILD_STDIO_HANDLE(buffer, i); + if (handle != INVALID_HANDLE_VALUE) { + SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0); + } + } +} + + +int uv__stdio_verify(BYTE* buffer, WORD size) { + unsigned int count; + + /* Check the buffer pointer. */ + if (buffer == NULL) + return 0; + + /* Verify that the buffer is at least big enough to hold the count. */ + if (size < CHILD_STDIO_SIZE(0)) + return 0; + + /* Verify if the count is within range. */ + count = CHILD_STDIO_COUNT(buffer); + if (count > 256) + return 0; + + /* Verify that the buffer size is big enough to hold info for N FDs. */ + if (size < CHILD_STDIO_SIZE(count)) + return 0; + + return 1; +} + + +WORD uv__stdio_size(BYTE* buffer) { + return (WORD) CHILD_STDIO_SIZE(CHILD_STDIO_COUNT((buffer))); +} + + +HANDLE uv__stdio_handle(BYTE* buffer, int fd) { + return CHILD_STDIO_HANDLE(buffer, fd); +} diff --git a/deps/libuv/src/win/process.c b/deps/libuv/src/win/process.c new file mode 100644 index 00000000..855c3740 --- /dev/null +++ b/deps/libuv/src/win/process.c @@ -0,0 +1,1247 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include /* alloca */ + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" +#include "req-inl.h" + + +#define SIGKILL 9 + + +typedef struct env_var { + const WCHAR* const wide; + const WCHAR* const wide_eq; + const size_t len; /* including null or '=' */ +} env_var_t; + +#define E_V(str) { L##str, L##str L"=", sizeof(str) } + +static const env_var_t required_vars[] = { /* keep me sorted */ + E_V("HOMEDRIVE"), + E_V("HOMEPATH"), + E_V("LOGONSERVER"), + E_V("PATH"), + E_V("SYSTEMDRIVE"), + E_V("SYSTEMROOT"), + E_V("TEMP"), + E_V("USERDOMAIN"), + E_V("USERNAME"), + E_V("USERPROFILE"), + E_V("WINDIR"), +}; +static size_t n_required_vars = ARRAY_SIZE(required_vars); + + +static HANDLE uv_global_job_handle_; +static uv_once_t uv_global_job_handle_init_guard_ = UV_ONCE_INIT; + + +static void uv__init_global_job_handle(void) { + /* Create a job object and set it up to kill all contained processes when + * it's closed. Since this handle is made non-inheritable and we're not + * giving it to anyone, we're the only process holding a reference to it. + * That means that if this process exits it is closed and all the processes + * it contains are killed. All processes created with uv_spawn that are not + * spawned with the UV_PROCESS_DETACHED flag are assigned to this job. + * + * We're setting the JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK flag so only the + * processes that we explicitly add are affected, and *their* subprocesses + * are not. This ensures that our child processes are not limited in their + * ability to use job control on Windows versions that don't deal with + * nested jobs (prior to Windows 8 / Server 2012). It also lets our child + * processes created detached processes without explicitly breaking away + * from job control (which uv_spawn doesn't, either). + */ + SECURITY_ATTRIBUTES attr; + JOBOBJECT_EXTENDED_LIMIT_INFORMATION info; + + memset(&attr, 0, sizeof attr); + attr.bInheritHandle = FALSE; + + memset(&info, 0, sizeof info); + info.BasicLimitInformation.LimitFlags = + JOB_OBJECT_LIMIT_BREAKAWAY_OK | + JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK | + JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION | + JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; + + uv_global_job_handle_ = CreateJobObjectW(&attr, NULL); + if (uv_global_job_handle_ == NULL) + uv_fatal_error(GetLastError(), "CreateJobObjectW"); + + if (!SetInformationJobObject(uv_global_job_handle_, + JobObjectExtendedLimitInformation, + &info, + sizeof info)) + uv_fatal_error(GetLastError(), "SetInformationJobObject"); +} + + +static int uv_utf8_to_utf16_alloc(const char* s, WCHAR** ws_ptr) { + int ws_len, r; + WCHAR* ws; + + ws_len = MultiByteToWideChar(CP_UTF8, + 0, + s, + -1, + NULL, + 0); + if (ws_len <= 0) { + return GetLastError(); + } + + ws = (WCHAR*) uv__malloc(ws_len * sizeof(WCHAR)); + if (ws == NULL) { + return ERROR_OUTOFMEMORY; + } + + r = MultiByteToWideChar(CP_UTF8, + 0, + s, + -1, + ws, + ws_len); + assert(r == ws_len); + + *ws_ptr = ws; + return 0; +} + + +static void uv_process_init(uv_loop_t* loop, uv_process_t* handle) { + uv__handle_init(loop, (uv_handle_t*) handle, UV_PROCESS); + handle->exit_cb = NULL; + handle->pid = 0; + handle->exit_signal = 0; + handle->wait_handle = INVALID_HANDLE_VALUE; + handle->process_handle = INVALID_HANDLE_VALUE; + handle->child_stdio_buffer = NULL; + handle->exit_cb_pending = 0; + + uv_req_init(loop, (uv_req_t*)&handle->exit_req); + handle->exit_req.type = UV_PROCESS_EXIT; + handle->exit_req.data = handle; +} + + +/* + * Path search functions + */ + +/* + * Helper function for search_path + */ +static WCHAR* search_path_join_test(const WCHAR* dir, + size_t dir_len, + const WCHAR* name, + size_t name_len, + const WCHAR* ext, + size_t ext_len, + const WCHAR* cwd, + size_t cwd_len) { + WCHAR *result, *result_pos; + DWORD attrs; + if (dir_len > 2 && dir[0] == L'\\' && dir[1] == L'\\') { + /* It's a UNC path so ignore cwd */ + cwd_len = 0; + } else if (dir_len >= 1 && (dir[0] == L'/' || dir[0] == L'\\')) { + /* It's a full path without drive letter, use cwd's drive letter only */ + cwd_len = 2; + } else if (dir_len >= 2 && dir[1] == L':' && + (dir_len < 3 || (dir[2] != L'/' && dir[2] != L'\\'))) { + /* It's a relative path with drive letter (ext.g. D:../some/file) + * Replace drive letter in dir by full cwd if it points to the same drive, + * otherwise use the dir only. + */ + if (cwd_len < 2 || _wcsnicmp(cwd, dir, 2) != 0) { + cwd_len = 0; + } else { + dir += 2; + dir_len -= 2; + } + } else if (dir_len > 2 && dir[1] == L':') { + /* It's an absolute path with drive letter + * Don't use the cwd at all + */ + cwd_len = 0; + } + + /* Allocate buffer for output */ + result = result_pos = (WCHAR*)uv__malloc(sizeof(WCHAR) * + (cwd_len + 1 + dir_len + 1 + name_len + 1 + ext_len + 1)); + + /* Copy cwd */ + wcsncpy(result_pos, cwd, cwd_len); + result_pos += cwd_len; + + /* Add a path separator if cwd didn't end with one */ + if (cwd_len && wcsrchr(L"\\/:", result_pos[-1]) == NULL) { + result_pos[0] = L'\\'; + result_pos++; + } + + /* Copy dir */ + wcsncpy(result_pos, dir, dir_len); + result_pos += dir_len; + + /* Add a separator if the dir didn't end with one */ + if (dir_len && wcsrchr(L"\\/:", result_pos[-1]) == NULL) { + result_pos[0] = L'\\'; + result_pos++; + } + + /* Copy filename */ + wcsncpy(result_pos, name, name_len); + result_pos += name_len; + + if (ext_len) { + /* Add a dot if the filename didn't end with one */ + if (name_len && result_pos[-1] != '.') { + result_pos[0] = L'.'; + result_pos++; + } + + /* Copy extension */ + wcsncpy(result_pos, ext, ext_len); + result_pos += ext_len; + } + + /* Null terminator */ + result_pos[0] = L'\0'; + + attrs = GetFileAttributesW(result); + + if (attrs != INVALID_FILE_ATTRIBUTES && + !(attrs & FILE_ATTRIBUTE_DIRECTORY)) { + return result; + } + + uv__free(result); + return NULL; +} + + +/* + * Helper function for search_path + */ +static WCHAR* path_search_walk_ext(const WCHAR *dir, + size_t dir_len, + const WCHAR *name, + size_t name_len, + WCHAR *cwd, + size_t cwd_len, + int name_has_ext) { + WCHAR* result; + + /* If the name itself has a nonempty extension, try this extension first */ + if (name_has_ext) { + result = search_path_join_test(dir, dir_len, + name, name_len, + L"", 0, + cwd, cwd_len); + if (result != NULL) { + return result; + } + } + + /* Try .com extension */ + result = search_path_join_test(dir, dir_len, + name, name_len, + L"com", 3, + cwd, cwd_len); + if (result != NULL) { + return result; + } + + /* Try .exe extension */ + result = search_path_join_test(dir, dir_len, + name, name_len, + L"exe", 3, + cwd, cwd_len); + if (result != NULL) { + return result; + } + + return NULL; +} + + +/* + * search_path searches the system path for an executable filename - + * the windows API doesn't provide this as a standalone function nor as an + * option to CreateProcess. + * + * It tries to return an absolute filename. + * + * Furthermore, it tries to follow the semantics that cmd.exe, with this + * exception that PATHEXT environment variable isn't used. Since CreateProcess + * can start only .com and .exe files, only those extensions are tried. This + * behavior equals that of msvcrt's spawn functions. + * + * - Do not search the path if the filename already contains a path (either + * relative or absolute). + * + * - If there's really only a filename, check the current directory for file, + * then search all path directories. + * + * - If filename specified has *any* extension, search for the file with the + * specified extension first. + * + * - If the literal filename is not found in a directory, try *appending* + * (not replacing) .com first and then .exe. + * + * - The path variable may contain relative paths; relative paths are relative + * to the cwd. + * + * - Directories in path may or may not end with a trailing backslash. + * + * - CMD does not trim leading/trailing whitespace from path/pathex entries + * nor from the environment variables as a whole. + * + * - When cmd.exe cannot read a directory, it will just skip it and go on + * searching. However, unlike posix-y systems, it will happily try to run a + * file that is not readable/executable; if the spawn fails it will not + * continue searching. + * + * UNC path support: we are dealing with UNC paths in both the path and the + * filename. This is a deviation from what cmd.exe does (it does not let you + * start a program by specifying an UNC path on the command line) but this is + * really a pointless restriction. + * + */ +static WCHAR* search_path(const WCHAR *file, + WCHAR *cwd, + const WCHAR *path) { + int file_has_dir; + WCHAR* result = NULL; + WCHAR *file_name_start; + WCHAR *dot; + const WCHAR *dir_start, *dir_end, *dir_path; + size_t dir_len; + int name_has_ext; + + size_t file_len = wcslen(file); + size_t cwd_len = wcslen(cwd); + + /* If the caller supplies an empty filename, + * we're not gonna return c:\windows\.exe -- GFY! + */ + if (file_len == 0 + || (file_len == 1 && file[0] == L'.')) { + return NULL; + } + + /* Find the start of the filename so we can split the directory from the */ + /* name. */ + for (file_name_start = (WCHAR*)file + file_len; + file_name_start > file + && file_name_start[-1] != L'\\' + && file_name_start[-1] != L'/' + && file_name_start[-1] != L':'; + file_name_start--); + + file_has_dir = file_name_start != file; + + /* Check if the filename includes an extension */ + dot = wcschr(file_name_start, L'.'); + name_has_ext = (dot != NULL && dot[1] != L'\0'); + + if (file_has_dir) { + /* The file has a path inside, don't use path */ + result = path_search_walk_ext( + file, file_name_start - file, + file_name_start, file_len - (file_name_start - file), + cwd, cwd_len, + name_has_ext); + + } else { + dir_end = path; + + /* The file is really only a name; look in cwd first, then scan path */ + result = path_search_walk_ext(L"", 0, + file, file_len, + cwd, cwd_len, + name_has_ext); + + while (result == NULL) { + if (*dir_end == L'\0') { + break; + } + + /* Skip the separator that dir_end now points to */ + if (dir_end != path || *path == L';') { + dir_end++; + } + + /* Next slice starts just after where the previous one ended */ + dir_start = dir_end; + + /* Slice until the next ; or \0 is found */ + dir_end = wcschr(dir_start, L';'); + if (dir_end == NULL) { + dir_end = wcschr(dir_start, L'\0'); + } + + /* If the slice is zero-length, don't bother */ + if (dir_end - dir_start == 0) { + continue; + } + + dir_path = dir_start; + dir_len = dir_end - dir_start; + + /* Adjust if the path is quoted. */ + if (dir_path[0] == '"' || dir_path[0] == '\'') { + ++dir_path; + --dir_len; + } + + if (dir_path[dir_len - 1] == '"' || dir_path[dir_len - 1] == '\'') { + --dir_len; + } + + result = path_search_walk_ext(dir_path, dir_len, + file, file_len, + cwd, cwd_len, + name_has_ext); + } + } + + return result; +} + + +/* + * Quotes command line arguments + * Returns a pointer to the end (next char to be written) of the buffer + */ +WCHAR* quote_cmd_arg(const WCHAR *source, WCHAR *target) { + size_t len = wcslen(source); + size_t i; + int quote_hit; + WCHAR* start; + + if (len == 0) { + /* Need double quotation for empty argument */ + *(target++) = L'"'; + *(target++) = L'"'; + return target; + } + + if (NULL == wcspbrk(source, L" \t\"")) { + /* No quotation needed */ + wcsncpy(target, source, len); + target += len; + return target; + } + + if (NULL == wcspbrk(source, L"\"\\")) { + /* + * No embedded double quotes or backlashes, so I can just wrap + * quote marks around the whole thing. + */ + *(target++) = L'"'; + wcsncpy(target, source, len); + target += len; + *(target++) = L'"'; + return target; + } + + /* + * Expected input/output: + * input : hello"world + * output: "hello\"world" + * input : hello""world + * output: "hello\"\"world" + * input : hello\world + * output: hello\world + * input : hello\\world + * output: hello\\world + * input : hello\"world + * output: "hello\\\"world" + * input : hello\\"world + * output: "hello\\\\\"world" + * input : hello world\ + * output: "hello world\" + */ + + *(target++) = L'"'; + start = target; + quote_hit = 1; + + for (i = len; i > 0; --i) { + *(target++) = source[i - 1]; + + if (quote_hit && source[i - 1] == L'\\') { + *(target++) = L'\\'; + } else if(source[i - 1] == L'"') { + quote_hit = 1; + *(target++) = L'\\'; + } else { + quote_hit = 0; + } + } + target[0] = L'\0'; + wcsrev(start); + *(target++) = L'"'; + return target; +} + + +int make_program_args(char** args, int verbatim_arguments, WCHAR** dst_ptr) { + char** arg; + WCHAR* dst = NULL; + WCHAR* temp_buffer = NULL; + size_t dst_len = 0; + size_t temp_buffer_len = 0; + WCHAR* pos; + int arg_count = 0; + int err = 0; + + /* Count the required size. */ + for (arg = args; *arg; arg++) { + DWORD arg_len; + + arg_len = MultiByteToWideChar(CP_UTF8, + 0, + *arg, + -1, + NULL, + 0); + if (arg_len == 0) { + return GetLastError(); + } + + dst_len += arg_len; + + if (arg_len > temp_buffer_len) + temp_buffer_len = arg_len; + + arg_count++; + } + + /* Adjust for potential quotes. Also assume the worst-case scenario */ + /* that every character needs escaping, so we need twice as much space. */ + dst_len = dst_len * 2 + arg_count * 2; + + /* Allocate buffer for the final command line. */ + dst = (WCHAR*) uv__malloc(dst_len * sizeof(WCHAR)); + if (dst == NULL) { + err = ERROR_OUTOFMEMORY; + goto error; + } + + /* Allocate temporary working buffer. */ + temp_buffer = (WCHAR*) uv__malloc(temp_buffer_len * sizeof(WCHAR)); + if (temp_buffer == NULL) { + err = ERROR_OUTOFMEMORY; + goto error; + } + + pos = dst; + for (arg = args; *arg; arg++) { + DWORD arg_len; + + /* Convert argument to wide char. */ + arg_len = MultiByteToWideChar(CP_UTF8, + 0, + *arg, + -1, + temp_buffer, + (int) (dst + dst_len - pos)); + if (arg_len == 0) { + err = GetLastError(); + goto error; + } + + if (verbatim_arguments) { + /* Copy verbatim. */ + wcscpy(pos, temp_buffer); + pos += arg_len - 1; + } else { + /* Quote/escape, if needed. */ + pos = quote_cmd_arg(temp_buffer, pos); + } + + *pos++ = *(arg + 1) ? L' ' : L'\0'; + } + + uv__free(temp_buffer); + + *dst_ptr = dst; + return 0; + +error: + uv__free(dst); + uv__free(temp_buffer); + return err; +} + + +int env_strncmp(const wchar_t* a, int na, const wchar_t* b) { + wchar_t* a_eq; + wchar_t* b_eq; + wchar_t* A; + wchar_t* B; + int nb; + int r; + + if (na < 0) { + a_eq = wcschr(a, L'='); + assert(a_eq); + na = (int)(long)(a_eq - a); + } else { + na--; + } + b_eq = wcschr(b, L'='); + assert(b_eq); + nb = b_eq - b; + + A = alloca((na+1) * sizeof(wchar_t)); + B = alloca((nb+1) * sizeof(wchar_t)); + + r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, a, na, A, na); + assert(r==na); + A[na] = L'\0'; + r = LCMapStringW(LOCALE_INVARIANT, LCMAP_UPPERCASE, b, nb, B, nb); + assert(r==nb); + B[nb] = L'\0'; + + while (1) { + wchar_t AA = *A++; + wchar_t BB = *B++; + if (AA < BB) { + return -1; + } else if (AA > BB) { + return 1; + } else if (!AA && !BB) { + return 0; + } + } +} + + +static int qsort_wcscmp(const void *a, const void *b) { + wchar_t* astr = *(wchar_t* const*)a; + wchar_t* bstr = *(wchar_t* const*)b; + return env_strncmp(astr, -1, bstr); +} + + +/* + * The way windows takes environment variables is different than what C does; + * Windows wants a contiguous block of null-terminated strings, terminated + * with an additional null. + * + * Windows has a few "essential" environment variables. winsock will fail + * to initialize if SYSTEMROOT is not defined; some APIs make reference to + * TEMP. SYSTEMDRIVE is probably also important. We therefore ensure that + * these get defined if the input environment block does not contain any + * values for them. + * + * Also add variables known to Cygwin to be required for correct + * subprocess operation in many cases: + * https://github.com/Alexpux/Cygwin/blob/b266b04fbbd3a595f02ea149e4306d3ab9b1fe3d/winsup/cygwin/environ.cc#L955 + * + */ +int make_program_env(char* env_block[], WCHAR** dst_ptr) { + WCHAR* dst; + WCHAR* ptr; + char** env; + size_t env_len = 0; + int len; + size_t i; + DWORD var_size; + size_t env_block_count = 1; /* 1 for null-terminator */ + WCHAR* dst_copy; + WCHAR** ptr_copy; + WCHAR** env_copy; + DWORD* required_vars_value_len = alloca(n_required_vars * sizeof(DWORD*)); + + /* first pass: determine size in UTF-16 */ + for (env = env_block; *env; env++) { + int len; + if (strchr(*env, '=')) { + len = MultiByteToWideChar(CP_UTF8, + 0, + *env, + -1, + NULL, + 0); + if (len <= 0) { + return GetLastError(); + } + env_len += len; + env_block_count++; + } + } + + /* second pass: copy to UTF-16 environment block */ + dst_copy = (WCHAR*)uv__malloc(env_len * sizeof(WCHAR)); + if (!dst_copy) { + return ERROR_OUTOFMEMORY; + } + env_copy = alloca(env_block_count * sizeof(WCHAR*)); + + ptr = dst_copy; + ptr_copy = env_copy; + for (env = env_block; *env; env++) { + if (strchr(*env, '=')) { + len = MultiByteToWideChar(CP_UTF8, + 0, + *env, + -1, + ptr, + (int) (env_len - (ptr - dst_copy))); + if (len <= 0) { + DWORD err = GetLastError(); + uv__free(dst_copy); + return err; + } + *ptr_copy++ = ptr; + ptr += len; + } + } + *ptr_copy = NULL; + assert(env_len == ptr - dst_copy); + + /* sort our (UTF-16) copy */ + qsort(env_copy, env_block_count-1, sizeof(wchar_t*), qsort_wcscmp); + + /* third pass: check for required variables */ + for (ptr_copy = env_copy, i = 0; i < n_required_vars; ) { + int cmp; + if (!*ptr_copy) { + cmp = -1; + } else { + cmp = env_strncmp(required_vars[i].wide_eq, + required_vars[i].len, + *ptr_copy); + } + if (cmp < 0) { + /* missing required var */ + var_size = GetEnvironmentVariableW(required_vars[i].wide, NULL, 0); + required_vars_value_len[i] = var_size; + if (var_size != 0) { + env_len += required_vars[i].len; + env_len += var_size; + } + i++; + } else { + ptr_copy++; + if (cmp == 0) + i++; + } + } + + /* final pass: copy, in sort order, and inserting required variables */ + dst = uv__malloc((1+env_len) * sizeof(WCHAR)); + if (!dst) { + uv__free(dst_copy); + return ERROR_OUTOFMEMORY; + } + + for (ptr = dst, ptr_copy = env_copy, i = 0; + *ptr_copy || i < n_required_vars; + ptr += len) { + int cmp; + if (i >= n_required_vars) { + cmp = 1; + } else if (!*ptr_copy) { + cmp = -1; + } else { + cmp = env_strncmp(required_vars[i].wide_eq, + required_vars[i].len, + *ptr_copy); + } + if (cmp < 0) { + /* missing required var */ + len = required_vars_value_len[i]; + if (len) { + wcscpy(ptr, required_vars[i].wide_eq); + ptr += required_vars[i].len; + var_size = GetEnvironmentVariableW(required_vars[i].wide, + ptr, + (int) (env_len - (ptr - dst))); + if (var_size != len-1) { /* race condition? */ + uv_fatal_error(GetLastError(), "GetEnvironmentVariableW"); + } + } + i++; + } else { + /* copy var from env_block */ + len = wcslen(*ptr_copy) + 1; + wmemcpy(ptr, *ptr_copy, len); + ptr_copy++; + if (cmp == 0) + i++; + } + } + + /* Terminate with an extra NULL. */ + assert(env_len == (ptr - dst)); + *ptr = L'\0'; + + uv__free(dst_copy); + *dst_ptr = dst; + return 0; +} + +/* + * Attempt to find the value of the PATH environment variable in the child's + * preprocessed environment. + * + * If found, a pointer into `env` is returned. If not found, NULL is returned. + */ +static WCHAR* find_path(WCHAR *env) { + for (; env != NULL && *env != 0; env += wcslen(env) + 1) { + if (wcsncmp(env, L"PATH=", 5) == 0) + return &env[5]; + } + + return NULL; +} + +/* + * Called on Windows thread-pool thread to indicate that + * a child process has exited. + */ +static void CALLBACK exit_wait_callback(void* data, BOOLEAN didTimeout) { + uv_process_t* process = (uv_process_t*) data; + uv_loop_t* loop = process->loop; + + assert(didTimeout == FALSE); + assert(process); + assert(!process->exit_cb_pending); + + process->exit_cb_pending = 1; + + /* Post completed */ + POST_COMPLETION_FOR_REQ(loop, &process->exit_req); +} + + +/* Called on main thread after a child process has exited. */ +void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) { + int64_t exit_code; + DWORD status; + + assert(handle->exit_cb_pending); + handle->exit_cb_pending = 0; + + /* If we're closing, don't call the exit callback. Just schedule a close */ + /* callback now. */ + if (handle->flags & UV__HANDLE_CLOSING) { + uv_want_endgame(loop, (uv_handle_t*) handle); + return; + } + + /* Unregister from process notification. */ + if (handle->wait_handle != INVALID_HANDLE_VALUE) { + UnregisterWait(handle->wait_handle); + handle->wait_handle = INVALID_HANDLE_VALUE; + } + + /* Set the handle to inactive: no callbacks will be made after the exit */ + /* callback.*/ + uv__handle_stop(handle); + + if (GetExitCodeProcess(handle->process_handle, &status)) { + exit_code = status; + } else { + /* Unable to to obtain the exit code. This should never happen. */ + exit_code = uv_translate_sys_error(GetLastError()); + } + + /* Fire the exit callback. */ + if (handle->exit_cb) { + handle->exit_cb(handle, exit_code, handle->exit_signal); + } +} + + +void uv_process_close(uv_loop_t* loop, uv_process_t* handle) { + uv__handle_closing(handle); + + if (handle->wait_handle != INVALID_HANDLE_VALUE) { + /* This blocks until either the wait was cancelled, or the callback has */ + /* completed. */ + BOOL r = UnregisterWaitEx(handle->wait_handle, INVALID_HANDLE_VALUE); + if (!r) { + /* This should never happen, and if it happens, we can't recover... */ + uv_fatal_error(GetLastError(), "UnregisterWaitEx"); + } + + handle->wait_handle = INVALID_HANDLE_VALUE; + } + + if (!handle->exit_cb_pending) { + uv_want_endgame(loop, (uv_handle_t*)handle); + } +} + + +void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle) { + assert(!handle->exit_cb_pending); + assert(handle->flags & UV__HANDLE_CLOSING); + assert(!(handle->flags & UV_HANDLE_CLOSED)); + + /* Clean-up the process handle. */ + CloseHandle(handle->process_handle); + + uv__handle_close(handle); +} + + +int uv_spawn(uv_loop_t* loop, + uv_process_t* process, + const uv_process_options_t* options) { + int i; + int err = 0; + WCHAR* path = NULL, *alloc_path = NULL; + BOOL result; + WCHAR* application_path = NULL, *application = NULL, *arguments = NULL, + *env = NULL, *cwd = NULL; + STARTUPINFOW startup; + PROCESS_INFORMATION info; + DWORD process_flags; + + uv_process_init(loop, process); + process->exit_cb = options->exit_cb; + + if (options->flags & (UV_PROCESS_SETGID | UV_PROCESS_SETUID)) { + return UV_ENOTSUP; + } + + if (options->file == NULL || + options->args == NULL) { + return UV_EINVAL; + } + + assert(options->file != NULL); + assert(!(options->flags & ~(UV_PROCESS_DETACHED | + UV_PROCESS_SETGID | + UV_PROCESS_SETUID | + UV_PROCESS_WINDOWS_HIDE | + UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS))); + + err = uv_utf8_to_utf16_alloc(options->file, &application); + if (err) + goto done; + + err = make_program_args( + options->args, + options->flags & UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS, + &arguments); + if (err) + goto done; + + if (options->env) { + err = make_program_env(options->env, &env); + if (err) + goto done; + } + + if (options->cwd) { + /* Explicit cwd */ + err = uv_utf8_to_utf16_alloc(options->cwd, &cwd); + if (err) + goto done; + + } else { + /* Inherit cwd */ + DWORD cwd_len, r; + + cwd_len = GetCurrentDirectoryW(0, NULL); + if (!cwd_len) { + err = GetLastError(); + goto done; + } + + cwd = (WCHAR*) uv__malloc(cwd_len * sizeof(WCHAR)); + if (cwd == NULL) { + err = ERROR_OUTOFMEMORY; + goto done; + } + + r = GetCurrentDirectoryW(cwd_len, cwd); + if (r == 0 || r >= cwd_len) { + err = GetLastError(); + goto done; + } + } + + /* Get PATH environment variable. */ + path = find_path(env); + if (path == NULL) { + DWORD path_len, r; + + path_len = GetEnvironmentVariableW(L"PATH", NULL, 0); + if (path_len == 0) { + err = GetLastError(); + goto done; + } + + alloc_path = (WCHAR*) uv__malloc(path_len * sizeof(WCHAR)); + if (alloc_path == NULL) { + err = ERROR_OUTOFMEMORY; + goto done; + } + path = alloc_path; + + r = GetEnvironmentVariableW(L"PATH", path, path_len); + if (r == 0 || r >= path_len) { + err = GetLastError(); + goto done; + } + } + + err = uv__stdio_create(loop, options, &process->child_stdio_buffer); + if (err) + goto done; + + application_path = search_path(application, + cwd, + path); + if (application_path == NULL) { + /* Not found. */ + err = ERROR_FILE_NOT_FOUND; + goto done; + } + + startup.cb = sizeof(startup); + startup.lpReserved = NULL; + startup.lpDesktop = NULL; + startup.lpTitle = NULL; + startup.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW; + + startup.cbReserved2 = uv__stdio_size(process->child_stdio_buffer); + startup.lpReserved2 = (BYTE*) process->child_stdio_buffer; + + startup.hStdInput = uv__stdio_handle(process->child_stdio_buffer, 0); + startup.hStdOutput = uv__stdio_handle(process->child_stdio_buffer, 1); + startup.hStdError = uv__stdio_handle(process->child_stdio_buffer, 2); + + if (options->flags & UV_PROCESS_WINDOWS_HIDE) { + /* Use SW_HIDE to avoid any potential process window. */ + startup.wShowWindow = SW_HIDE; + } else { + startup.wShowWindow = SW_SHOWDEFAULT; + } + + process_flags = CREATE_UNICODE_ENVIRONMENT; + + if (options->flags & UV_PROCESS_DETACHED) { + /* Note that we're not setting the CREATE_BREAKAWAY_FROM_JOB flag. That + * means that libuv might not let you create a fully daemonized process + * when run under job control. However the type of job control that libuv + * itself creates doesn't trickle down to subprocesses so they can still + * daemonize. + * + * A reason to not do this is that CREATE_BREAKAWAY_FROM_JOB makes the + * CreateProcess call fail if we're under job control that doesn't allow + * breakaway. + */ + process_flags |= DETACHED_PROCESS | CREATE_NEW_PROCESS_GROUP; + } + + if (!CreateProcessW(application_path, + arguments, + NULL, + NULL, + 1, + process_flags, + env, + cwd, + &startup, + &info)) { + /* CreateProcessW failed. */ + err = GetLastError(); + goto done; + } + + /* Spawn succeeded */ + /* Beyond this point, failure is reported asynchronously. */ + + process->process_handle = info.hProcess; + process->pid = info.dwProcessId; + + /* If the process isn't spawned as detached, assign to the global job */ + /* object so windows will kill it when the parent process dies. */ + if (!(options->flags & UV_PROCESS_DETACHED)) { + uv_once(&uv_global_job_handle_init_guard_, uv__init_global_job_handle); + + if (!AssignProcessToJobObject(uv_global_job_handle_, info.hProcess)) { + /* AssignProcessToJobObject might fail if this process is under job + * control and the job doesn't have the + * JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK flag set, on a Windows version + * that doesn't support nested jobs. + * + * When that happens we just swallow the error and continue without + * establishing a kill-child-on-parent-exit relationship, otherwise + * there would be no way for libuv applications run under job control + * to spawn processes at all. + */ + DWORD err = GetLastError(); + if (err != ERROR_ACCESS_DENIED) + uv_fatal_error(err, "AssignProcessToJobObject"); + } + } + + /* Set IPC pid to all IPC pipes. */ + for (i = 0; i < options->stdio_count; i++) { + const uv_stdio_container_t* fdopt = &options->stdio[i]; + if (fdopt->flags & UV_CREATE_PIPE && + fdopt->data.stream->type == UV_NAMED_PIPE && + ((uv_pipe_t*) fdopt->data.stream)->ipc) { + ((uv_pipe_t*) fdopt->data.stream)->pipe.conn.ipc_pid = info.dwProcessId; + } + } + + /* Setup notifications for when the child process exits. */ + result = RegisterWaitForSingleObject(&process->wait_handle, + process->process_handle, exit_wait_callback, (void*)process, INFINITE, + WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE); + if (!result) { + uv_fatal_error(GetLastError(), "RegisterWaitForSingleObject"); + } + + CloseHandle(info.hThread); + + assert(!err); + + /* Make the handle active. It will remain active until the exit callback */ + /* is made or the handle is closed, whichever happens first. */ + uv__handle_start(process); + + /* Cleanup, whether we succeeded or failed. */ + done: + uv__free(application); + uv__free(application_path); + uv__free(arguments); + uv__free(cwd); + uv__free(env); + uv__free(alloc_path); + + if (process->child_stdio_buffer != NULL) { + /* Clean up child stdio handles. */ + uv__stdio_destroy(process->child_stdio_buffer); + process->child_stdio_buffer = NULL; + } + + return uv_translate_sys_error(err); +} + + +static int uv__kill(HANDLE process_handle, int signum) { + switch (signum) { + case SIGTERM: + case SIGKILL: + case SIGINT: { + /* Unconditionally terminate the process. On Windows, killed processes */ + /* normally return 1. */ + DWORD status; + int err; + + if (TerminateProcess(process_handle, 1)) + return 0; + + /* If the process already exited before TerminateProcess was called, */ + /* TerminateProcess will fail with ERROR_ACCESS_DENIED. */ + err = GetLastError(); + if (err == ERROR_ACCESS_DENIED && + GetExitCodeProcess(process_handle, &status) && + status != STILL_ACTIVE) { + return UV_ESRCH; + } + + return uv_translate_sys_error(err); + } + + case 0: { + /* Health check: is the process still alive? */ + DWORD status; + + if (!GetExitCodeProcess(process_handle, &status)) + return uv_translate_sys_error(GetLastError()); + + if (status != STILL_ACTIVE) + return UV_ESRCH; + + return 0; + } + + default: + /* Unsupported signal. */ + return UV_ENOSYS; + } +} + + +int uv_process_kill(uv_process_t* process, int signum) { + int err; + + if (process->process_handle == INVALID_HANDLE_VALUE) { + return UV_EINVAL; + } + + err = uv__kill(process->process_handle, signum); + if (err) { + return err; /* err is already translated. */ + } + + process->exit_signal = signum; + + return 0; +} + + +int uv_kill(int pid, int signum) { + int err; + HANDLE process_handle = OpenProcess(PROCESS_TERMINATE | + PROCESS_QUERY_INFORMATION, FALSE, pid); + + if (process_handle == NULL) { + err = GetLastError(); + if (err == ERROR_INVALID_PARAMETER) { + return UV_ESRCH; + } else { + return uv_translate_sys_error(err); + } + } + + err = uv__kill(process_handle, signum); + CloseHandle(process_handle); + + return err; /* err is already translated. */ +} diff --git a/deps/libuv/src/win/req-inl.h b/deps/libuv/src/win/req-inl.h new file mode 100644 index 00000000..b5e502ee --- /dev/null +++ b/deps/libuv/src/win/req-inl.h @@ -0,0 +1,224 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_WIN_REQ_INL_H_ +#define UV_WIN_REQ_INL_H_ + +#include + +#include "uv.h" +#include "internal.h" + + +#define SET_REQ_STATUS(req, status) \ + (req)->u.io.overlapped.Internal = (ULONG_PTR) (status) + +#define SET_REQ_ERROR(req, error) \ + SET_REQ_STATUS((req), NTSTATUS_FROM_WIN32((error))) + +#define SET_REQ_SUCCESS(req) \ + SET_REQ_STATUS((req), STATUS_SUCCESS) + +#define GET_REQ_STATUS(req) \ + ((NTSTATUS) (req)->u.io.overlapped.Internal) + +#define REQ_SUCCESS(req) \ + (NT_SUCCESS(GET_REQ_STATUS((req)))) + +#define GET_REQ_ERROR(req) \ + (pRtlNtStatusToDosError(GET_REQ_STATUS((req)))) + +#define GET_REQ_SOCK_ERROR(req) \ + (uv_ntstatus_to_winsock_error(GET_REQ_STATUS((req)))) + + +#define REGISTER_HANDLE_REQ(loop, handle, req) \ + do { \ + INCREASE_ACTIVE_COUNT((loop), (handle)); \ + uv__req_register((loop), (req)); \ + } while (0) + +#define UNREGISTER_HANDLE_REQ(loop, handle, req) \ + do { \ + DECREASE_ACTIVE_COUNT((loop), (handle)); \ + uv__req_unregister((loop), (req)); \ + } while (0) + + +#define UV_SUCCEEDED_WITHOUT_IOCP(result) \ + ((result) && (handle->flags & UV_HANDLE_SYNC_BYPASS_IOCP)) + +#define UV_SUCCEEDED_WITH_IOCP(result) \ + ((result) || (GetLastError() == ERROR_IO_PENDING)) + + +#define POST_COMPLETION_FOR_REQ(loop, req) \ + if (!PostQueuedCompletionStatus((loop)->iocp, \ + 0, \ + 0, \ + &((req)->u.io.overlapped))) { \ + uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus"); \ + } + + +INLINE static void uv_req_init(uv_loop_t* loop, uv_req_t* req) { + req->type = UV_UNKNOWN_REQ; + SET_REQ_SUCCESS(req); +} + + +INLINE static uv_req_t* uv_overlapped_to_req(OVERLAPPED* overlapped) { + return CONTAINING_RECORD(overlapped, uv_req_t, u.io.overlapped); +} + + +INLINE static void uv_insert_pending_req(uv_loop_t* loop, uv_req_t* req) { + req->next_req = NULL; + if (loop->pending_reqs_tail) { +#ifdef _DEBUG + /* Ensure the request is not already in the queue, or the queue + * will get corrupted. + */ + uv_req_t* current = loop->pending_reqs_tail; + do { + assert(req != current); + current = current->next_req; + } while(current != loop->pending_reqs_tail); +#endif + + req->next_req = loop->pending_reqs_tail->next_req; + loop->pending_reqs_tail->next_req = req; + loop->pending_reqs_tail = req; + } else { + req->next_req = req; + loop->pending_reqs_tail = req; + } +} + + +#define DELEGATE_STREAM_REQ(loop, req, method, handle_at) \ + do { \ + switch (((uv_handle_t*) (req)->handle_at)->type) { \ + case UV_TCP: \ + uv_process_tcp_##method##_req(loop, \ + (uv_tcp_t*) ((req)->handle_at), \ + req); \ + break; \ + \ + case UV_NAMED_PIPE: \ + uv_process_pipe_##method##_req(loop, \ + (uv_pipe_t*) ((req)->handle_at), \ + req); \ + break; \ + \ + case UV_TTY: \ + uv_process_tty_##method##_req(loop, \ + (uv_tty_t*) ((req)->handle_at), \ + req); \ + break; \ + \ + default: \ + assert(0); \ + } \ + } while (0) + + +INLINE static int uv_process_reqs(uv_loop_t* loop) { + uv_req_t* req; + uv_req_t* first; + uv_req_t* next; + + if (loop->pending_reqs_tail == NULL) + return 0; + + first = loop->pending_reqs_tail->next_req; + next = first; + loop->pending_reqs_tail = NULL; + + while (next != NULL) { + req = next; + next = req->next_req != first ? req->next_req : NULL; + + switch (req->type) { + case UV_READ: + DELEGATE_STREAM_REQ(loop, req, read, data); + break; + + case UV_WRITE: + DELEGATE_STREAM_REQ(loop, (uv_write_t*) req, write, handle); + break; + + case UV_ACCEPT: + DELEGATE_STREAM_REQ(loop, req, accept, data); + break; + + case UV_CONNECT: + DELEGATE_STREAM_REQ(loop, (uv_connect_t*) req, connect, handle); + break; + + case UV_SHUTDOWN: + /* Tcp shutdown requests don't come here. */ + assert(((uv_shutdown_t*) req)->handle->type == UV_NAMED_PIPE); + uv_process_pipe_shutdown_req( + loop, + (uv_pipe_t*) ((uv_shutdown_t*) req)->handle, + (uv_shutdown_t*) req); + break; + + case UV_UDP_RECV: + uv_process_udp_recv_req(loop, (uv_udp_t*) req->data, req); + break; + + case UV_UDP_SEND: + uv_process_udp_send_req(loop, + ((uv_udp_send_t*) req)->handle, + (uv_udp_send_t*) req); + break; + + case UV_WAKEUP: + uv_process_async_wakeup_req(loop, (uv_async_t*) req->data, req); + break; + + case UV_SIGNAL_REQ: + uv_process_signal_req(loop, (uv_signal_t*) req->data, req); + break; + + case UV_POLL_REQ: + uv_process_poll_req(loop, (uv_poll_t*) req->data, req); + break; + + case UV_PROCESS_EXIT: + uv_process_proc_exit(loop, (uv_process_t*) req->data); + break; + + case UV_FS_EVENT_REQ: + uv_process_fs_event_req(loop, req, (uv_fs_event_t*) req->data); + break; + + default: + assert(0); + } + } + + return 1; +} + +#endif /* UV_WIN_REQ_INL_H_ */ diff --git a/deps/libuv/src/win/req.c b/deps/libuv/src/win/req.c new file mode 100644 index 00000000..111cc5e2 --- /dev/null +++ b/deps/libuv/src/win/req.c @@ -0,0 +1,25 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include + +#include "uv.h" +#include "internal.h" diff --git a/deps/libuv/src/win/signal.c b/deps/libuv/src/win/signal.c new file mode 100644 index 00000000..2c64a55d --- /dev/null +++ b/deps/libuv/src/win/signal.c @@ -0,0 +1,356 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" +#include "req-inl.h" + + +RB_HEAD(uv_signal_tree_s, uv_signal_s); + +static struct uv_signal_tree_s uv__signal_tree = RB_INITIALIZER(uv__signal_tree); +static ssize_t volatile uv__signal_control_handler_refs = 0; +static CRITICAL_SECTION uv__signal_lock; + + +void uv_signals_init() { + InitializeCriticalSection(&uv__signal_lock); +} + + +static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) { + /* Compare signums first so all watchers with the same signnum end up */ + /* adjacent. */ + if (w1->signum < w2->signum) return -1; + if (w1->signum > w2->signum) return 1; + + /* Sort by loop pointer, so we can easily look up the first item after */ + /* { .signum = x, .loop = NULL } */ + if ((uintptr_t) w1->loop < (uintptr_t) w2->loop) return -1; + if ((uintptr_t) w1->loop > (uintptr_t) w2->loop) return 1; + + if ((uintptr_t) w1 < (uintptr_t) w2) return -1; + if ((uintptr_t) w1 > (uintptr_t) w2) return 1; + + return 0; +} + + +RB_GENERATE_STATIC(uv_signal_tree_s, uv_signal_s, tree_entry, uv__signal_compare); + + +/* + * Dispatches signal {signum} to all active uv_signal_t watchers in all loops. + * Returns 1 if the signal was dispatched to any watcher, or 0 if there were + * no active signal watchers observing this signal. + */ +int uv__signal_dispatch(int signum) { + uv_signal_t lookup; + uv_signal_t* handle; + int dispatched = 0; + + EnterCriticalSection(&uv__signal_lock); + + lookup.signum = signum; + lookup.loop = NULL; + + for (handle = RB_NFIND(uv_signal_tree_s, &uv__signal_tree, &lookup); + handle != NULL && handle->signum == signum; + handle = RB_NEXT(uv_signal_tree_s, &uv__signal_tree, handle)) { + unsigned long previous = InterlockedExchange( + (volatile LONG*) &handle->pending_signum, signum); + + if (!previous) { + POST_COMPLETION_FOR_REQ(handle->loop, &handle->signal_req); + } + + dispatched = 1; + } + + LeaveCriticalSection(&uv__signal_lock); + + return dispatched; +} + + +static BOOL WINAPI uv__signal_control_handler(DWORD type) { + switch (type) { + case CTRL_C_EVENT: + return uv__signal_dispatch(SIGINT); + + case CTRL_BREAK_EVENT: + return uv__signal_dispatch(SIGBREAK); + + case CTRL_CLOSE_EVENT: + if (uv__signal_dispatch(SIGHUP)) { + /* Windows will terminate the process after the control handler */ + /* returns. After that it will just terminate our process. Therefore */ + /* block the signal handler so the main loop has some time to pick */ + /* up the signal and do something for a few seconds. */ + Sleep(INFINITE); + return TRUE; + } + return FALSE; + + case CTRL_LOGOFF_EVENT: + case CTRL_SHUTDOWN_EVENT: + /* These signals are only sent to services. Services have their own */ + /* notification mechanism, so there's no point in handling these. */ + + default: + /* We don't handle these. */ + return FALSE; + } +} + + +static int uv__signal_register_control_handler() { + /* When this function is called, the uv__signal_lock must be held. */ + + /* If the console control handler has already been hooked, just add a */ + /* reference. */ + if (uv__signal_control_handler_refs > 0) { + uv__signal_control_handler_refs++; + return 0; + } + + if (!SetConsoleCtrlHandler(uv__signal_control_handler, TRUE)) + return GetLastError(); + + uv__signal_control_handler_refs++; + + return 0; +} + + +static void uv__signal_unregister_control_handler() { + /* When this function is called, the uv__signal_lock must be held. */ + BOOL r; + + /* Don't unregister if the number of console control handlers exceeds one. */ + /* Just remove a reference in that case. */ + if (uv__signal_control_handler_refs > 1) { + uv__signal_control_handler_refs--; + return; + } + + assert(uv__signal_control_handler_refs == 1); + + r = SetConsoleCtrlHandler(uv__signal_control_handler, FALSE); + /* This should never fail; if it does it is probably a bug in libuv. */ + assert(r); + + uv__signal_control_handler_refs--; +} + + +static int uv__signal_register(int signum) { + switch (signum) { + case SIGINT: + case SIGBREAK: + case SIGHUP: + return uv__signal_register_control_handler(); + + case SIGWINCH: + /* SIGWINCH is generated in tty.c. No need to register anything. */ + return 0; + + case SIGILL: + case SIGABRT_COMPAT: + case SIGFPE: + case SIGSEGV: + case SIGTERM: + case SIGABRT: + /* Signal is never raised. */ + return 0; + + default: + /* Invalid signal. */ + return ERROR_INVALID_PARAMETER; + } +} + + +static void uv__signal_unregister(int signum) { + switch (signum) { + case SIGINT: + case SIGBREAK: + case SIGHUP: + uv__signal_unregister_control_handler(); + return; + + case SIGWINCH: + /* SIGWINCH is generated in tty.c. No need to unregister anything. */ + return; + + case SIGILL: + case SIGABRT_COMPAT: + case SIGFPE: + case SIGSEGV: + case SIGTERM: + case SIGABRT: + /* Nothing is registered for this signal. */ + return; + + default: + /* Libuv bug. */ + assert(0 && "Invalid signum"); + return; + } +} + + +int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) { + uv_req_t* req; + + uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL); + handle->pending_signum = 0; + handle->signum = 0; + handle->signal_cb = NULL; + + req = &handle->signal_req; + uv_req_init(loop, req); + req->type = UV_SIGNAL_REQ; + req->data = handle; + + return 0; +} + + +int uv_signal_stop(uv_signal_t* handle) { + uv_signal_t* removed_handle; + + /* If the watcher wasn't started, this is a no-op. */ + if (handle->signum == 0) + return 0; + + EnterCriticalSection(&uv__signal_lock); + + uv__signal_unregister(handle->signum); + + removed_handle = RB_REMOVE(uv_signal_tree_s, &uv__signal_tree, handle); + assert(removed_handle == handle); + + LeaveCriticalSection(&uv__signal_lock); + + handle->signum = 0; + uv__handle_stop(handle); + + return 0; +} + + +int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) { + int err; + + /* If the user supplies signum == 0, then return an error already. If the */ + /* signum is otherwise invalid then uv__signal_register will find out */ + /* eventually. */ + if (signum == 0) { + return UV_EINVAL; + } + + /* Short circuit: if the signal watcher is already watching {signum} don't */ + /* go through the process of deregistering and registering the handler. */ + /* Additionally, this avoids pending signals getting lost in the (small) */ + /* time frame that handle->signum == 0. */ + if (signum == handle->signum) { + handle->signal_cb = signal_cb; + return 0; + } + + /* If the signal handler was already active, stop it first. */ + if (handle->signum != 0) { + int r = uv_signal_stop(handle); + /* uv_signal_stop is infallible. */ + assert(r == 0); + } + + EnterCriticalSection(&uv__signal_lock); + + err = uv__signal_register(signum); + if (err) { + /* Uh-oh, didn't work. */ + LeaveCriticalSection(&uv__signal_lock); + return uv_translate_sys_error(err); + } + + handle->signum = signum; + RB_INSERT(uv_signal_tree_s, &uv__signal_tree, handle); + + LeaveCriticalSection(&uv__signal_lock); + + handle->signal_cb = signal_cb; + uv__handle_start(handle); + + return 0; +} + + +void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle, + uv_req_t* req) { + long dispatched_signum; + + assert(handle->type == UV_SIGNAL); + assert(req->type == UV_SIGNAL_REQ); + + dispatched_signum = InterlockedExchange( + (volatile LONG*) &handle->pending_signum, 0); + assert(dispatched_signum != 0); + + /* Check if the pending signal equals the signum that we are watching for. */ + /* These can get out of sync when the handler is stopped and restarted */ + /* while the signal_req is pending. */ + if (dispatched_signum == handle->signum) + handle->signal_cb(handle, dispatched_signum); + + if (handle->flags & UV__HANDLE_CLOSING) { + /* When it is closing, it must be stopped at this point. */ + assert(handle->signum == 0); + uv_want_endgame(loop, (uv_handle_t*) handle); + } +} + + +void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle) { + uv_signal_stop(handle); + uv__handle_closing(handle); + + if (handle->pending_signum == 0) { + uv_want_endgame(loop, (uv_handle_t*) handle); + } +} + + +void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle) { + assert(handle->flags & UV__HANDLE_CLOSING); + assert(!(handle->flags & UV_HANDLE_CLOSED)); + + assert(handle->signum == 0); + assert(handle->pending_signum == 0); + + handle->flags |= UV_HANDLE_CLOSED; + + uv__handle_close(handle); +} diff --git a/deps/libuv/src/win/snprintf.c b/deps/libuv/src/win/snprintf.c new file mode 100644 index 00000000..776c0e39 --- /dev/null +++ b/deps/libuv/src/win/snprintf.c @@ -0,0 +1,42 @@ +/* Copyright the libuv project contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#if defined(_MSC_VER) && _MSC_VER < 1900 + +#include +#include + +/* Emulate snprintf() on MSVC<2015, _snprintf() doesn't zero-terminate the buffer + * on overflow... + */ +int snprintf(char* buf, size_t len, const char* fmt, ...) { + int n; + va_list ap; + va_start(ap, fmt); + + n = _vscprintf(fmt, ap); + vsnprintf_s(buf, len, _TRUNCATE, fmt, ap); + + va_end(ap); + return n; +} + +#endif diff --git a/deps/libuv/src/win/stream-inl.h b/deps/libuv/src/win/stream-inl.h new file mode 100644 index 00000000..b7a3c119 --- /dev/null +++ b/deps/libuv/src/win/stream-inl.h @@ -0,0 +1,56 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_WIN_STREAM_INL_H_ +#define UV_WIN_STREAM_INL_H_ + +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" +#include "req-inl.h" + + +INLINE static void uv_stream_init(uv_loop_t* loop, + uv_stream_t* handle, + uv_handle_type type) { + uv__handle_init(loop, (uv_handle_t*) handle, type); + handle->write_queue_size = 0; + handle->activecnt = 0; +} + + +INLINE static void uv_connection_init(uv_stream_t* handle) { + handle->flags |= UV_HANDLE_CONNECTION; + handle->stream.conn.write_reqs_pending = 0; + + uv_req_init(handle->loop, (uv_req_t*) &(handle->read_req)); + handle->read_req.event_handle = NULL; + handle->read_req.wait_handle = INVALID_HANDLE_VALUE; + handle->read_req.type = UV_READ; + handle->read_req.data = handle; + + handle->stream.conn.shutdown_req = NULL; +} + + +#endif /* UV_WIN_STREAM_INL_H_ */ diff --git a/deps/libuv/src/win/stream.c b/deps/libuv/src/win/stream.c new file mode 100644 index 00000000..a2466e5e --- /dev/null +++ b/deps/libuv/src/win/stream.c @@ -0,0 +1,249 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" +#include "req-inl.h" + + +int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) { + int err; + + err = ERROR_INVALID_PARAMETER; + switch (stream->type) { + case UV_TCP: + err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb); + break; + case UV_NAMED_PIPE: + err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb); + break; + default: + assert(0); + } + + return uv_translate_sys_error(err); +} + + +int uv_accept(uv_stream_t* server, uv_stream_t* client) { + int err; + + err = ERROR_INVALID_PARAMETER; + switch (server->type) { + case UV_TCP: + err = uv_tcp_accept((uv_tcp_t*)server, (uv_tcp_t*)client); + break; + case UV_NAMED_PIPE: + err = uv_pipe_accept((uv_pipe_t*)server, client); + break; + default: + assert(0); + } + + return uv_translate_sys_error(err); +} + + +int uv_read_start(uv_stream_t* handle, uv_alloc_cb alloc_cb, + uv_read_cb read_cb) { + int err; + + if (handle->flags & UV_HANDLE_READING) { + return UV_EALREADY; + } + + if (!(handle->flags & UV_HANDLE_READABLE)) { + return UV_ENOTCONN; + } + + err = ERROR_INVALID_PARAMETER; + switch (handle->type) { + case UV_TCP: + err = uv_tcp_read_start((uv_tcp_t*)handle, alloc_cb, read_cb); + break; + case UV_NAMED_PIPE: + err = uv_pipe_read_start((uv_pipe_t*)handle, alloc_cb, read_cb); + break; + case UV_TTY: + err = uv_tty_read_start((uv_tty_t*) handle, alloc_cb, read_cb); + break; + default: + assert(0); + } + + return uv_translate_sys_error(err); +} + + +int uv_read_stop(uv_stream_t* handle) { + int err; + + if (!(handle->flags & UV_HANDLE_READING)) + return 0; + + err = 0; + if (handle->type == UV_TTY) { + err = uv_tty_read_stop((uv_tty_t*) handle); + } else { + if (handle->type == UV_NAMED_PIPE) { + uv__pipe_stop_read((uv_pipe_t*) handle); + } else { + handle->flags &= ~UV_HANDLE_READING; + } + DECREASE_ACTIVE_COUNT(handle->loop, handle); + } + + return uv_translate_sys_error(err); +} + + +int uv_write(uv_write_t* req, + uv_stream_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_write_cb cb) { + uv_loop_t* loop = handle->loop; + int err; + + if (!(handle->flags & UV_HANDLE_WRITABLE)) { + return UV_EPIPE; + } + + err = ERROR_INVALID_PARAMETER; + switch (handle->type) { + case UV_TCP: + err = uv_tcp_write(loop, req, (uv_tcp_t*) handle, bufs, nbufs, cb); + break; + case UV_NAMED_PIPE: + err = uv_pipe_write(loop, req, (uv_pipe_t*) handle, bufs, nbufs, cb); + break; + case UV_TTY: + err = uv_tty_write(loop, req, (uv_tty_t*) handle, bufs, nbufs, cb); + break; + default: + assert(0); + } + + return uv_translate_sys_error(err); +} + + +int uv_write2(uv_write_t* req, + uv_stream_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_stream_t* send_handle, + uv_write_cb cb) { + uv_loop_t* loop = handle->loop; + int err; + + if (!(handle->flags & UV_HANDLE_WRITABLE)) { + return UV_EPIPE; + } + + err = ERROR_INVALID_PARAMETER; + switch (handle->type) { + case UV_NAMED_PIPE: + err = uv_pipe_write2(loop, + req, + (uv_pipe_t*) handle, + bufs, + nbufs, + send_handle, + cb); + break; + default: + assert(0); + } + + return uv_translate_sys_error(err); +} + + +int uv_try_write(uv_stream_t* stream, + const uv_buf_t bufs[], + unsigned int nbufs) { + if (stream->flags & UV__HANDLE_CLOSING) + return UV_EBADF; + if (!(stream->flags & UV_HANDLE_WRITABLE)) + return UV_EPIPE; + + switch (stream->type) { + case UV_TCP: + return uv__tcp_try_write((uv_tcp_t*) stream, bufs, nbufs); + case UV_TTY: + return uv__tty_try_write((uv_tty_t*) stream, bufs, nbufs); + case UV_NAMED_PIPE: + return UV_EAGAIN; + default: + assert(0); + return UV_ENOSYS; + } +} + + +int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) { + uv_loop_t* loop = handle->loop; + + if (!(handle->flags & UV_HANDLE_WRITABLE)) { + return UV_EPIPE; + } + + uv_req_init(loop, (uv_req_t*) req); + req->type = UV_SHUTDOWN; + req->handle = handle; + req->cb = cb; + + handle->flags &= ~UV_HANDLE_WRITABLE; + handle->stream.conn.shutdown_req = req; + handle->reqs_pending++; + REGISTER_HANDLE_REQ(loop, handle, req); + + uv_want_endgame(loop, (uv_handle_t*)handle); + + return 0; +} + + +int uv_is_readable(const uv_stream_t* handle) { + return !!(handle->flags & UV_HANDLE_READABLE); +} + + +int uv_is_writable(const uv_stream_t* handle) { + return !!(handle->flags & UV_HANDLE_WRITABLE); +} + + +int uv_stream_set_blocking(uv_stream_t* handle, int blocking) { + if (handle->type != UV_NAMED_PIPE) + return UV_EINVAL; + + if (blocking != 0) + handle->flags |= UV_HANDLE_BLOCKING_WRITES; + else + handle->flags &= ~UV_HANDLE_BLOCKING_WRITES; + + return 0; +} diff --git a/deps/libuv/src/win/tcp.c b/deps/libuv/src/win/tcp.c new file mode 100644 index 00000000..0709696f --- /dev/null +++ b/deps/libuv/src/win/tcp.c @@ -0,0 +1,1510 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" +#include "stream-inl.h" +#include "req-inl.h" + + +/* + * Threshold of active tcp streams for which to preallocate tcp read buffers. + * (Due to node slab allocator performing poorly under this pattern, + * the optimization is temporarily disabled (threshold=0). This will be + * revisited once node allocator is improved.) + */ +const unsigned int uv_active_tcp_streams_threshold = 0; + +/* + * Number of simultaneous pending AcceptEx calls. + */ +const unsigned int uv_simultaneous_server_accepts = 32; + +/* A zero-size buffer for use by uv_tcp_read */ +static char uv_zero_[] = ""; + +static int uv__tcp_nodelay(uv_tcp_t* handle, SOCKET socket, int enable) { + if (setsockopt(socket, + IPPROTO_TCP, + TCP_NODELAY, + (const char*)&enable, + sizeof enable) == -1) { + return WSAGetLastError(); + } + return 0; +} + + +static int uv__tcp_keepalive(uv_tcp_t* handle, SOCKET socket, int enable, unsigned int delay) { + if (setsockopt(socket, + SOL_SOCKET, + SO_KEEPALIVE, + (const char*)&enable, + sizeof enable) == -1) { + return WSAGetLastError(); + } + + if (enable && setsockopt(socket, + IPPROTO_TCP, + TCP_KEEPALIVE, + (const char*)&delay, + sizeof delay) == -1) { + return WSAGetLastError(); + } + + return 0; +} + + +static int uv_tcp_set_socket(uv_loop_t* loop, + uv_tcp_t* handle, + SOCKET socket, + int family, + int imported) { + DWORD yes = 1; + int non_ifs_lsp; + int err; + + if (handle->socket != INVALID_SOCKET) + return UV_EBUSY; + + /* Set the socket to nonblocking mode */ + if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) { + return WSAGetLastError(); + } + + /* Make the socket non-inheritable */ + if (!SetHandleInformation((HANDLE) socket, HANDLE_FLAG_INHERIT, 0)) + return GetLastError(); + + /* Associate it with the I/O completion port. */ + /* Use uv_handle_t pointer as completion key. */ + if (CreateIoCompletionPort((HANDLE)socket, + loop->iocp, + (ULONG_PTR)socket, + 0) == NULL) { + if (imported) { + handle->flags |= UV_HANDLE_EMULATE_IOCP; + } else { + return GetLastError(); + } + } + + if (family == AF_INET6) { + non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv6; + } else { + non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv4; + } + + if (pSetFileCompletionNotificationModes && + !(handle->flags & UV_HANDLE_EMULATE_IOCP) && !non_ifs_lsp) { + if (pSetFileCompletionNotificationModes((HANDLE) socket, + FILE_SKIP_SET_EVENT_ON_HANDLE | + FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) { + handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP; + } else if (GetLastError() != ERROR_INVALID_FUNCTION) { + return GetLastError(); + } + } + + if (handle->flags & UV_HANDLE_TCP_NODELAY) { + err = uv__tcp_nodelay(handle, socket, 1); + if (err) + return err; + } + + /* TODO: Use stored delay. */ + if (handle->flags & UV_HANDLE_TCP_KEEPALIVE) { + err = uv__tcp_keepalive(handle, socket, 1, 60); + if (err) + return err; + } + + handle->socket = socket; + + if (family == AF_INET6) { + handle->flags |= UV_HANDLE_IPV6; + } else { + assert(!(handle->flags & UV_HANDLE_IPV6)); + } + + return 0; +} + + +int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) { + int domain; + + /* Use the lower 8 bits for the domain */ + domain = flags & 0xFF; + if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC) + return UV_EINVAL; + + if (flags & ~0xFF) + return UV_EINVAL; + + uv_stream_init(loop, (uv_stream_t*) handle, UV_TCP); + handle->tcp.serv.accept_reqs = NULL; + handle->tcp.serv.pending_accepts = NULL; + handle->socket = INVALID_SOCKET; + handle->reqs_pending = 0; + handle->tcp.serv.func_acceptex = NULL; + handle->tcp.conn.func_connectex = NULL; + handle->tcp.serv.processed_accepts = 0; + handle->delayed_error = 0; + + /* If anything fails beyond this point we need to remove the handle from + * the handle queue, since it was added by uv__handle_init in uv_stream_init. + */ + + if (domain != AF_UNSPEC) { + SOCKET sock; + DWORD err; + + sock = socket(domain, SOCK_STREAM, 0); + if (sock == INVALID_SOCKET) { + err = WSAGetLastError(); + QUEUE_REMOVE(&handle->handle_queue); + return uv_translate_sys_error(err); + } + + err = uv_tcp_set_socket(handle->loop, handle, sock, domain, 0); + if (err) { + closesocket(sock); + QUEUE_REMOVE(&handle->handle_queue); + return uv_translate_sys_error(err); + } + + } + + return 0; +} + + +int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) { + return uv_tcp_init_ex(loop, handle, AF_UNSPEC); +} + + +void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) { + int err; + unsigned int i; + uv_tcp_accept_t* req; + + if (handle->flags & UV_HANDLE_CONNECTION && + handle->stream.conn.shutdown_req != NULL && + handle->stream.conn.write_reqs_pending == 0) { + + UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req); + + err = 0; + if (handle->flags & UV__HANDLE_CLOSING) { + err = ERROR_OPERATION_ABORTED; + } else if (shutdown(handle->socket, SD_SEND) == SOCKET_ERROR) { + err = WSAGetLastError(); + } + + if (handle->stream.conn.shutdown_req->cb) { + handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req, + uv_translate_sys_error(err)); + } + + handle->stream.conn.shutdown_req = NULL; + DECREASE_PENDING_REQ_COUNT(handle); + return; + } + + if (handle->flags & UV__HANDLE_CLOSING && + handle->reqs_pending == 0) { + assert(!(handle->flags & UV_HANDLE_CLOSED)); + + if (!(handle->flags & UV_HANDLE_TCP_SOCKET_CLOSED)) { + closesocket(handle->socket); + handle->socket = INVALID_SOCKET; + handle->flags |= UV_HANDLE_TCP_SOCKET_CLOSED; + } + + if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->tcp.serv.accept_reqs) { + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + for (i = 0; i < uv_simultaneous_server_accepts; i++) { + req = &handle->tcp.serv.accept_reqs[i]; + if (req->wait_handle != INVALID_HANDLE_VALUE) { + UnregisterWait(req->wait_handle); + req->wait_handle = INVALID_HANDLE_VALUE; + } + if (req->event_handle) { + CloseHandle(req->event_handle); + req->event_handle = NULL; + } + } + } + + uv__free(handle->tcp.serv.accept_reqs); + handle->tcp.serv.accept_reqs = NULL; + } + + if (handle->flags & UV_HANDLE_CONNECTION && + handle->flags & UV_HANDLE_EMULATE_IOCP) { + if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) { + UnregisterWait(handle->read_req.wait_handle); + handle->read_req.wait_handle = INVALID_HANDLE_VALUE; + } + if (handle->read_req.event_handle) { + CloseHandle(handle->read_req.event_handle); + handle->read_req.event_handle = NULL; + } + } + + uv__handle_close(handle); + loop->active_tcp_streams--; + } +} + + +/* Unlike on Unix, here we don't set SO_REUSEADDR, because it doesn't just + * allow binding to addresses that are in use by sockets in TIME_WAIT, it + * effectively allows 'stealing' a port which is in use by another application. + * + * SO_EXCLUSIVEADDRUSE is also not good here because it does check all sockets, + * regardless of state, so we'd get an error even if the port is in use by a + * socket in TIME_WAIT state. + * + * See issue #1360. + * + */ +static int uv_tcp_try_bind(uv_tcp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + unsigned int flags) { + DWORD err; + int r; + + if (handle->socket == INVALID_SOCKET) { + SOCKET sock; + + /* Cannot set IPv6-only mode on non-IPv6 socket. */ + if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6) + return ERROR_INVALID_PARAMETER; + + sock = socket(addr->sa_family, SOCK_STREAM, 0); + if (sock == INVALID_SOCKET) { + return WSAGetLastError(); + } + + err = uv_tcp_set_socket(handle->loop, handle, sock, addr->sa_family, 0); + if (err) { + closesocket(sock); + return err; + } + } + +#ifdef IPV6_V6ONLY + if (addr->sa_family == AF_INET6) { + int on; + + on = (flags & UV_TCP_IPV6ONLY) != 0; + + /* TODO: how to handle errors? This may fail if there is no ipv4 stack */ + /* available, or when run on XP/2003 which have no support for dualstack */ + /* sockets. For now we're silently ignoring the error. */ + setsockopt(handle->socket, + IPPROTO_IPV6, + IPV6_V6ONLY, + (const char*)&on, + sizeof on); + } +#endif + + r = bind(handle->socket, addr, addrlen); + + if (r == SOCKET_ERROR) { + err = WSAGetLastError(); + if (err == WSAEADDRINUSE) { + /* Some errors are not to be reported until connect() or listen() */ + handle->delayed_error = err; + } else { + return err; + } + } + + handle->flags |= UV_HANDLE_BOUND; + + return 0; +} + + +static void CALLBACK post_completion(void* context, BOOLEAN timed_out) { + uv_req_t* req; + uv_tcp_t* handle; + + req = (uv_req_t*) context; + assert(req != NULL); + handle = (uv_tcp_t*)req->data; + assert(handle != NULL); + assert(!timed_out); + + if (!PostQueuedCompletionStatus(handle->loop->iocp, + req->u.io.overlapped.InternalHigh, + 0, + &req->u.io.overlapped)) { + uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus"); + } +} + + +static void CALLBACK post_write_completion(void* context, BOOLEAN timed_out) { + uv_write_t* req; + uv_tcp_t* handle; + + req = (uv_write_t*) context; + assert(req != NULL); + handle = (uv_tcp_t*)req->handle; + assert(handle != NULL); + assert(!timed_out); + + if (!PostQueuedCompletionStatus(handle->loop->iocp, + req->u.io.overlapped.InternalHigh, + 0, + &req->u.io.overlapped)) { + uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus"); + } +} + + +static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) { + uv_loop_t* loop = handle->loop; + BOOL success; + DWORD bytes; + SOCKET accept_socket; + short family; + + assert(handle->flags & UV_HANDLE_LISTENING); + assert(req->accept_socket == INVALID_SOCKET); + + /* choose family and extension function */ + if (handle->flags & UV_HANDLE_IPV6) { + family = AF_INET6; + } else { + family = AF_INET; + } + + /* Open a socket for the accepted connection. */ + accept_socket = socket(family, SOCK_STREAM, 0); + if (accept_socket == INVALID_SOCKET) { + SET_REQ_ERROR(req, WSAGetLastError()); + uv_insert_pending_req(loop, (uv_req_t*)req); + handle->reqs_pending++; + return; + } + + /* Make the socket non-inheritable */ + if (!SetHandleInformation((HANDLE) accept_socket, HANDLE_FLAG_INHERIT, 0)) { + SET_REQ_ERROR(req, GetLastError()); + uv_insert_pending_req(loop, (uv_req_t*)req); + handle->reqs_pending++; + closesocket(accept_socket); + return; + } + + /* Prepare the overlapped structure. */ + memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped)); + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1); + } + + success = handle->tcp.serv.func_acceptex(handle->socket, + accept_socket, + (void*)req->accept_buffer, + 0, + sizeof(struct sockaddr_storage), + sizeof(struct sockaddr_storage), + &bytes, + &req->u.io.overlapped); + + if (UV_SUCCEEDED_WITHOUT_IOCP(success)) { + /* Process the req without IOCP. */ + req->accept_socket = accept_socket; + handle->reqs_pending++; + uv_insert_pending_req(loop, (uv_req_t*)req); + } else if (UV_SUCCEEDED_WITH_IOCP(success)) { + /* The req will be processed with IOCP. */ + req->accept_socket = accept_socket; + handle->reqs_pending++; + if (handle->flags & UV_HANDLE_EMULATE_IOCP && + req->wait_handle == INVALID_HANDLE_VALUE && + !RegisterWaitForSingleObject(&req->wait_handle, + req->event_handle, post_completion, (void*) req, + INFINITE, WT_EXECUTEINWAITTHREAD)) { + SET_REQ_ERROR(req, GetLastError()); + uv_insert_pending_req(loop, (uv_req_t*)req); + handle->reqs_pending++; + return; + } + } else { + /* Make this req pending reporting an error. */ + SET_REQ_ERROR(req, WSAGetLastError()); + uv_insert_pending_req(loop, (uv_req_t*)req); + handle->reqs_pending++; + /* Destroy the preallocated client socket. */ + closesocket(accept_socket); + /* Destroy the event handle */ + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + CloseHandle(req->u.io.overlapped.hEvent); + req->event_handle = NULL; + } + } +} + + +static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) { + uv_read_t* req; + uv_buf_t buf; + int result; + DWORD bytes, flags; + + assert(handle->flags & UV_HANDLE_READING); + assert(!(handle->flags & UV_HANDLE_READ_PENDING)); + + req = &handle->read_req; + memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); + + /* + * Preallocate a read buffer if the number of active streams is below + * the threshold. + */ + if (loop->active_tcp_streams < uv_active_tcp_streams_threshold) { + handle->flags &= ~UV_HANDLE_ZERO_READ; + handle->tcp.conn.read_buffer = uv_buf_init(NULL, 0); + handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->tcp.conn.read_buffer); + if (handle->tcp.conn.read_buffer.base == NULL || + handle->tcp.conn.read_buffer.len == 0) { + handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &handle->tcp.conn.read_buffer); + return; + } + assert(handle->tcp.conn.read_buffer.base != NULL); + buf = handle->tcp.conn.read_buffer; + } else { + handle->flags |= UV_HANDLE_ZERO_READ; + buf.base = (char*) &uv_zero_; + buf.len = 0; + } + + /* Prepare the overlapped structure. */ + memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped)); + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + assert(req->event_handle); + req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1); + } + + flags = 0; + result = WSARecv(handle->socket, + (WSABUF*)&buf, + 1, + &bytes, + &flags, + &req->u.io.overlapped, + NULL); + + if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { + /* Process the req without IOCP. */ + handle->flags |= UV_HANDLE_READ_PENDING; + req->u.io.overlapped.InternalHigh = bytes; + handle->reqs_pending++; + uv_insert_pending_req(loop, (uv_req_t*)req); + } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { + /* The req will be processed with IOCP. */ + handle->flags |= UV_HANDLE_READ_PENDING; + handle->reqs_pending++; + if (handle->flags & UV_HANDLE_EMULATE_IOCP && + req->wait_handle == INVALID_HANDLE_VALUE && + !RegisterWaitForSingleObject(&req->wait_handle, + req->event_handle, post_completion, (void*) req, + INFINITE, WT_EXECUTEINWAITTHREAD)) { + SET_REQ_ERROR(req, GetLastError()); + uv_insert_pending_req(loop, (uv_req_t*)req); + } + } else { + /* Make this req pending reporting an error. */ + SET_REQ_ERROR(req, WSAGetLastError()); + uv_insert_pending_req(loop, (uv_req_t*)req); + handle->reqs_pending++; + } +} + + +int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) { + uv_loop_t* loop = handle->loop; + unsigned int i, simultaneous_accepts; + uv_tcp_accept_t* req; + int err; + + assert(backlog > 0); + + if (handle->flags & UV_HANDLE_LISTENING) { + handle->stream.serv.connection_cb = cb; + } + + if (handle->flags & UV_HANDLE_READING) { + return WSAEISCONN; + } + + if (handle->delayed_error) { + return handle->delayed_error; + } + + if (!(handle->flags & UV_HANDLE_BOUND)) { + err = uv_tcp_try_bind(handle, + (const struct sockaddr*) &uv_addr_ip4_any_, + sizeof(uv_addr_ip4_any_), + 0); + if (err) + return err; + if (handle->delayed_error) + return handle->delayed_error; + } + + if (!handle->tcp.serv.func_acceptex) { + if (!uv_get_acceptex_function(handle->socket, &handle->tcp.serv.func_acceptex)) { + return WSAEAFNOSUPPORT; + } + } + + if (!(handle->flags & UV_HANDLE_SHARED_TCP_SOCKET) && + listen(handle->socket, backlog) == SOCKET_ERROR) { + return WSAGetLastError(); + } + + handle->flags |= UV_HANDLE_LISTENING; + handle->stream.serv.connection_cb = cb; + INCREASE_ACTIVE_COUNT(loop, handle); + + simultaneous_accepts = handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT ? 1 + : uv_simultaneous_server_accepts; + + if(!handle->tcp.serv.accept_reqs) { + handle->tcp.serv.accept_reqs = (uv_tcp_accept_t*) + uv__malloc(uv_simultaneous_server_accepts * sizeof(uv_tcp_accept_t)); + if (!handle->tcp.serv.accept_reqs) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + for (i = 0; i < simultaneous_accepts; i++) { + req = &handle->tcp.serv.accept_reqs[i]; + uv_req_init(loop, (uv_req_t*)req); + req->type = UV_ACCEPT; + req->accept_socket = INVALID_SOCKET; + req->data = handle; + + req->wait_handle = INVALID_HANDLE_VALUE; + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + req->event_handle = CreateEvent(NULL, 0, 0, NULL); + if (!req->event_handle) { + uv_fatal_error(GetLastError(), "CreateEvent"); + } + } else { + req->event_handle = NULL; + } + + uv_tcp_queue_accept(handle, req); + } + + /* Initialize other unused requests too, because uv_tcp_endgame */ + /* doesn't know how how many requests were initialized, so it will */ + /* try to clean up {uv_simultaneous_server_accepts} requests. */ + for (i = simultaneous_accepts; i < uv_simultaneous_server_accepts; i++) { + req = &handle->tcp.serv.accept_reqs[i]; + uv_req_init(loop, (uv_req_t*) req); + req->type = UV_ACCEPT; + req->accept_socket = INVALID_SOCKET; + req->data = handle; + req->wait_handle = INVALID_HANDLE_VALUE; + req->event_handle = NULL; + } + } + + return 0; +} + + +int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) { + uv_loop_t* loop = server->loop; + int err = 0; + int family; + + uv_tcp_accept_t* req = server->tcp.serv.pending_accepts; + + if (!req) { + /* No valid connections found, so we error out. */ + return WSAEWOULDBLOCK; + } + + if (req->accept_socket == INVALID_SOCKET) { + return WSAENOTCONN; + } + + if (server->flags & UV_HANDLE_IPV6) { + family = AF_INET6; + } else { + family = AF_INET; + } + + err = uv_tcp_set_socket(client->loop, + client, + req->accept_socket, + family, + 0); + if (err) { + closesocket(req->accept_socket); + } else { + uv_connection_init((uv_stream_t*) client); + /* AcceptEx() implicitly binds the accepted socket. */ + client->flags |= UV_HANDLE_BOUND | UV_HANDLE_READABLE | UV_HANDLE_WRITABLE; + } + + /* Prepare the req to pick up a new connection */ + server->tcp.serv.pending_accepts = req->next_pending; + req->next_pending = NULL; + req->accept_socket = INVALID_SOCKET; + + if (!(server->flags & UV__HANDLE_CLOSING)) { + /* Check if we're in a middle of changing the number of pending accepts. */ + if (!(server->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING)) { + uv_tcp_queue_accept(server, req); + } else { + /* We better be switching to a single pending accept. */ + assert(server->flags & UV_HANDLE_TCP_SINGLE_ACCEPT); + + server->tcp.serv.processed_accepts++; + + if (server->tcp.serv.processed_accepts >= uv_simultaneous_server_accepts) { + server->tcp.serv.processed_accepts = 0; + /* + * All previously queued accept requests are now processed. + * We now switch to queueing just a single accept. + */ + uv_tcp_queue_accept(server, &server->tcp.serv.accept_reqs[0]); + server->flags &= ~UV_HANDLE_TCP_ACCEPT_STATE_CHANGING; + server->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT; + } + } + } + + loop->active_tcp_streams++; + + return err; +} + + +int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb, + uv_read_cb read_cb) { + uv_loop_t* loop = handle->loop; + + handle->flags |= UV_HANDLE_READING; + handle->read_cb = read_cb; + handle->alloc_cb = alloc_cb; + INCREASE_ACTIVE_COUNT(loop, handle); + + /* If reading was stopped and then started again, there could still be a */ + /* read request pending. */ + if (!(handle->flags & UV_HANDLE_READ_PENDING)) { + if (handle->flags & UV_HANDLE_EMULATE_IOCP && + !handle->read_req.event_handle) { + handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL); + if (!handle->read_req.event_handle) { + uv_fatal_error(GetLastError(), "CreateEvent"); + } + } + uv_tcp_queue_read(loop, handle); + } + + return 0; +} + + +static int uv_tcp_try_connect(uv_connect_t* req, + uv_tcp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + uv_connect_cb cb) { + uv_loop_t* loop = handle->loop; + const struct sockaddr* bind_addr; + BOOL success; + DWORD bytes; + int err; + + if (handle->delayed_error) { + return handle->delayed_error; + } + + if (!(handle->flags & UV_HANDLE_BOUND)) { + if (addrlen == sizeof(uv_addr_ip4_any_)) { + bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_; + } else if (addrlen == sizeof(uv_addr_ip6_any_)) { + bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_; + } else { + abort(); + } + err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0); + if (err) + return err; + if (handle->delayed_error) + return handle->delayed_error; + } + + if (!handle->tcp.conn.func_connectex) { + if (!uv_get_connectex_function(handle->socket, &handle->tcp.conn.func_connectex)) { + return WSAEAFNOSUPPORT; + } + } + + uv_req_init(loop, (uv_req_t*) req); + req->type = UV_CONNECT; + req->handle = (uv_stream_t*) handle; + req->cb = cb; + memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); + + success = handle->tcp.conn.func_connectex(handle->socket, + addr, + addrlen, + NULL, + 0, + &bytes, + &req->u.io.overlapped); + + if (UV_SUCCEEDED_WITHOUT_IOCP(success)) { + /* Process the req without IOCP. */ + handle->reqs_pending++; + REGISTER_HANDLE_REQ(loop, handle, req); + uv_insert_pending_req(loop, (uv_req_t*)req); + } else if (UV_SUCCEEDED_WITH_IOCP(success)) { + /* The req will be processed with IOCP. */ + handle->reqs_pending++; + REGISTER_HANDLE_REQ(loop, handle, req); + } else { + return WSAGetLastError(); + } + + return 0; +} + + +int uv_tcp_getsockname(const uv_tcp_t* handle, + struct sockaddr* name, + int* namelen) { + int result; + + if (handle->socket == INVALID_SOCKET) { + return UV_EINVAL; + } + + if (handle->delayed_error) { + return uv_translate_sys_error(handle->delayed_error); + } + + result = getsockname(handle->socket, name, namelen); + if (result != 0) { + return uv_translate_sys_error(WSAGetLastError()); + } + + return 0; +} + + +int uv_tcp_getpeername(const uv_tcp_t* handle, + struct sockaddr* name, + int* namelen) { + int result; + + if (handle->socket == INVALID_SOCKET) { + return UV_EINVAL; + } + + if (handle->delayed_error) { + return uv_translate_sys_error(handle->delayed_error); + } + + result = getpeername(handle->socket, name, namelen); + if (result != 0) { + return uv_translate_sys_error(WSAGetLastError()); + } + + return 0; +} + + +int uv_tcp_write(uv_loop_t* loop, + uv_write_t* req, + uv_tcp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_write_cb cb) { + int result; + DWORD bytes; + + uv_req_init(loop, (uv_req_t*) req); + req->type = UV_WRITE; + req->handle = (uv_stream_t*) handle; + req->cb = cb; + + /* Prepare the overlapped structure. */ + memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped)); + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + req->event_handle = CreateEvent(NULL, 0, 0, NULL); + if (!req->event_handle) { + uv_fatal_error(GetLastError(), "CreateEvent"); + } + req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1); + req->wait_handle = INVALID_HANDLE_VALUE; + } + + result = WSASend(handle->socket, + (WSABUF*) bufs, + nbufs, + &bytes, + 0, + &req->u.io.overlapped, + NULL); + + if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { + /* Request completed immediately. */ + req->u.io.queued_bytes = 0; + handle->reqs_pending++; + handle->stream.conn.write_reqs_pending++; + REGISTER_HANDLE_REQ(loop, handle, req); + uv_insert_pending_req(loop, (uv_req_t*) req); + } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { + /* Request queued by the kernel. */ + req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs); + handle->reqs_pending++; + handle->stream.conn.write_reqs_pending++; + REGISTER_HANDLE_REQ(loop, handle, req); + handle->write_queue_size += req->u.io.queued_bytes; + if (handle->flags & UV_HANDLE_EMULATE_IOCP && + !RegisterWaitForSingleObject(&req->wait_handle, + req->event_handle, post_write_completion, (void*) req, + INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) { + SET_REQ_ERROR(req, GetLastError()); + uv_insert_pending_req(loop, (uv_req_t*)req); + } + } else { + /* Send failed due to an error, report it later */ + req->u.io.queued_bytes = 0; + handle->reqs_pending++; + handle->stream.conn.write_reqs_pending++; + REGISTER_HANDLE_REQ(loop, handle, req); + SET_REQ_ERROR(req, WSAGetLastError()); + uv_insert_pending_req(loop, (uv_req_t*) req); + } + + return 0; +} + + +int uv__tcp_try_write(uv_tcp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs) { + int result; + DWORD bytes; + + if (handle->stream.conn.write_reqs_pending > 0) + return UV_EAGAIN; + + result = WSASend(handle->socket, + (WSABUF*) bufs, + nbufs, + &bytes, + 0, + NULL, + NULL); + + if (result == SOCKET_ERROR) + return uv_translate_sys_error(WSAGetLastError()); + else + return bytes; +} + + +void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, + uv_req_t* req) { + DWORD bytes, flags, err; + uv_buf_t buf; + + assert(handle->type == UV_TCP); + + handle->flags &= ~UV_HANDLE_READ_PENDING; + + if (!REQ_SUCCESS(req)) { + /* An error occurred doing the read. */ + if ((handle->flags & UV_HANDLE_READING) || + !(handle->flags & UV_HANDLE_ZERO_READ)) { + handle->flags &= ~UV_HANDLE_READING; + DECREASE_ACTIVE_COUNT(loop, handle); + buf = (handle->flags & UV_HANDLE_ZERO_READ) ? + uv_buf_init(NULL, 0) : handle->tcp.conn.read_buffer; + + err = GET_REQ_SOCK_ERROR(req); + + if (err == WSAECONNABORTED) { + /* + * Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with Unix. + */ + err = WSAECONNRESET; + } + + handle->read_cb((uv_stream_t*)handle, + uv_translate_sys_error(err), + &buf); + } + } else { + if (!(handle->flags & UV_HANDLE_ZERO_READ)) { + /* The read was done with a non-zero buffer length. */ + if (req->u.io.overlapped.InternalHigh > 0) { + /* Successful read */ + handle->read_cb((uv_stream_t*)handle, + req->u.io.overlapped.InternalHigh, + &handle->tcp.conn.read_buffer); + /* Read again only if bytes == buf.len */ + if (req->u.io.overlapped.InternalHigh < handle->tcp.conn.read_buffer.len) { + goto done; + } + } else { + /* Connection closed */ + if (handle->flags & UV_HANDLE_READING) { + handle->flags &= ~UV_HANDLE_READING; + DECREASE_ACTIVE_COUNT(loop, handle); + } + handle->flags &= ~UV_HANDLE_READABLE; + + buf.base = 0; + buf.len = 0; + handle->read_cb((uv_stream_t*)handle, UV_EOF, &handle->tcp.conn.read_buffer); + goto done; + } + } + + /* Do nonblocking reads until the buffer is empty */ + while (handle->flags & UV_HANDLE_READING) { + buf = uv_buf_init(NULL, 0); + handle->alloc_cb((uv_handle_t*) handle, 65536, &buf); + if (buf.base == NULL || buf.len == 0) { + handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf); + break; + } + assert(buf.base != NULL); + + flags = 0; + if (WSARecv(handle->socket, + (WSABUF*)&buf, + 1, + &bytes, + &flags, + NULL, + NULL) != SOCKET_ERROR) { + if (bytes > 0) { + /* Successful read */ + handle->read_cb((uv_stream_t*)handle, bytes, &buf); + /* Read again only if bytes == buf.len */ + if (bytes < buf.len) { + break; + } + } else { + /* Connection closed */ + handle->flags &= ~(UV_HANDLE_READING | UV_HANDLE_READABLE); + DECREASE_ACTIVE_COUNT(loop, handle); + + handle->read_cb((uv_stream_t*)handle, UV_EOF, &buf); + break; + } + } else { + err = WSAGetLastError(); + if (err == WSAEWOULDBLOCK) { + /* Read buffer was completely empty, report a 0-byte read. */ + handle->read_cb((uv_stream_t*)handle, 0, &buf); + } else { + /* Ouch! serious error. */ + handle->flags &= ~UV_HANDLE_READING; + DECREASE_ACTIVE_COUNT(loop, handle); + + if (err == WSAECONNABORTED) { + /* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with */ + /* Unix. */ + err = WSAECONNRESET; + } + + handle->read_cb((uv_stream_t*)handle, + uv_translate_sys_error(err), + &buf); + } + break; + } + } + +done: + /* Post another read if still reading and not closing. */ + if ((handle->flags & UV_HANDLE_READING) && + !(handle->flags & UV_HANDLE_READ_PENDING)) { + uv_tcp_queue_read(loop, handle); + } + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle, + uv_write_t* req) { + int err; + + assert(handle->type == UV_TCP); + + assert(handle->write_queue_size >= req->u.io.queued_bytes); + handle->write_queue_size -= req->u.io.queued_bytes; + + UNREGISTER_HANDLE_REQ(loop, handle, req); + + if (handle->flags & UV_HANDLE_EMULATE_IOCP) { + if (req->wait_handle != INVALID_HANDLE_VALUE) { + UnregisterWait(req->wait_handle); + req->wait_handle = INVALID_HANDLE_VALUE; + } + if (req->event_handle) { + CloseHandle(req->event_handle); + req->event_handle = NULL; + } + } + + if (req->cb) { + err = uv_translate_sys_error(GET_REQ_SOCK_ERROR(req)); + if (err == UV_ECONNABORTED) { + /* use UV_ECANCELED for consistency with Unix */ + err = UV_ECANCELED; + } + req->cb(req, err); + } + + handle->stream.conn.write_reqs_pending--; + if (handle->stream.conn.shutdown_req != NULL && + handle->stream.conn.write_reqs_pending == 0) { + uv_want_endgame(loop, (uv_handle_t*)handle); + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle, + uv_req_t* raw_req) { + uv_tcp_accept_t* req = (uv_tcp_accept_t*) raw_req; + int err; + + assert(handle->type == UV_TCP); + + /* If handle->accepted_socket is not a valid socket, then */ + /* uv_queue_accept must have failed. This is a serious error. We stop */ + /* accepting connections and report this error to the connection */ + /* callback. */ + if (req->accept_socket == INVALID_SOCKET) { + if (handle->flags & UV_HANDLE_LISTENING) { + handle->flags &= ~UV_HANDLE_LISTENING; + DECREASE_ACTIVE_COUNT(loop, handle); + if (handle->stream.serv.connection_cb) { + err = GET_REQ_SOCK_ERROR(req); + handle->stream.serv.connection_cb((uv_stream_t*)handle, + uv_translate_sys_error(err)); + } + } + } else if (REQ_SUCCESS(req) && + setsockopt(req->accept_socket, + SOL_SOCKET, + SO_UPDATE_ACCEPT_CONTEXT, + (char*)&handle->socket, + sizeof(handle->socket)) == 0) { + req->next_pending = handle->tcp.serv.pending_accepts; + handle->tcp.serv.pending_accepts = req; + + /* Accept and SO_UPDATE_ACCEPT_CONTEXT were successful. */ + if (handle->stream.serv.connection_cb) { + handle->stream.serv.connection_cb((uv_stream_t*)handle, 0); + } + } else { + /* Error related to accepted socket is ignored because the server */ + /* socket may still be healthy. If the server socket is broken */ + /* uv_queue_accept will detect it. */ + closesocket(req->accept_socket); + req->accept_socket = INVALID_SOCKET; + if (handle->flags & UV_HANDLE_LISTENING) { + uv_tcp_queue_accept(handle, req); + } + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle, + uv_connect_t* req) { + int err; + + assert(handle->type == UV_TCP); + + UNREGISTER_HANDLE_REQ(loop, handle, req); + + err = 0; + if (REQ_SUCCESS(req)) { + if (setsockopt(handle->socket, + SOL_SOCKET, + SO_UPDATE_CONNECT_CONTEXT, + NULL, + 0) == 0) { + uv_connection_init((uv_stream_t*)handle); + handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE; + loop->active_tcp_streams++; + } else { + err = WSAGetLastError(); + } + } else { + err = GET_REQ_SOCK_ERROR(req); + } + req->cb(req, uv_translate_sys_error(err)); + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +int uv_tcp_import(uv_tcp_t* tcp, uv__ipc_socket_info_ex* socket_info_ex, + int tcp_connection) { + int err; + SOCKET socket = WSASocketW(FROM_PROTOCOL_INFO, + FROM_PROTOCOL_INFO, + FROM_PROTOCOL_INFO, + &socket_info_ex->socket_info, + 0, + WSA_FLAG_OVERLAPPED); + + if (socket == INVALID_SOCKET) { + return WSAGetLastError(); + } + + err = uv_tcp_set_socket(tcp->loop, + tcp, + socket, + socket_info_ex->socket_info.iAddressFamily, + 1); + if (err) { + closesocket(socket); + return err; + } + + if (tcp_connection) { + uv_connection_init((uv_stream_t*)tcp); + tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE; + } + + tcp->flags |= UV_HANDLE_BOUND; + tcp->flags |= UV_HANDLE_SHARED_TCP_SOCKET; + + tcp->delayed_error = socket_info_ex->delayed_error; + + tcp->loop->active_tcp_streams++; + return 0; +} + + +int uv_tcp_nodelay(uv_tcp_t* handle, int enable) { + int err; + + if (handle->socket != INVALID_SOCKET) { + err = uv__tcp_nodelay(handle, handle->socket, enable); + if (err) + return err; + } + + if (enable) { + handle->flags |= UV_HANDLE_TCP_NODELAY; + } else { + handle->flags &= ~UV_HANDLE_TCP_NODELAY; + } + + return 0; +} + + +int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) { + int err; + + if (handle->socket != INVALID_SOCKET) { + err = uv__tcp_keepalive(handle, handle->socket, enable, delay); + if (err) + return err; + } + + if (enable) { + handle->flags |= UV_HANDLE_TCP_KEEPALIVE; + } else { + handle->flags &= ~UV_HANDLE_TCP_KEEPALIVE; + } + + /* TODO: Store delay if handle->socket isn't created yet. */ + + return 0; +} + + +int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid, + LPWSAPROTOCOL_INFOW protocol_info) { + if (!(handle->flags & UV_HANDLE_CONNECTION)) { + /* + * We're about to share the socket with another process. Because + * this is a listening socket, we assume that the other process will + * be accepting connections on it. So, before sharing the socket + * with another process, we call listen here in the parent process. + */ + + if (!(handle->flags & UV_HANDLE_LISTENING)) { + if (!(handle->flags & UV_HANDLE_BOUND)) { + return ERROR_INVALID_PARAMETER; + } + + if (!(handle->delayed_error)) { + if (listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) { + handle->delayed_error = WSAGetLastError(); + } + } + } + } + + if (WSADuplicateSocketW(handle->socket, pid, protocol_info)) { + return WSAGetLastError(); + } + + handle->flags |= UV_HANDLE_SHARED_TCP_SOCKET; + + return 0; +} + + +int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) { + if (handle->flags & UV_HANDLE_CONNECTION) { + return UV_EINVAL; + } + + /* Check if we're already in the desired mode. */ + if ((enable && !(handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) || + (!enable && handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) { + return 0; + } + + /* Don't allow switching from single pending accept to many. */ + if (enable) { + return UV_ENOTSUP; + } + + /* Check if we're in a middle of changing the number of pending accepts. */ + if (handle->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING) { + return 0; + } + + handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT; + + /* Flip the changing flag if we have already queued multiple accepts. */ + if (handle->flags & UV_HANDLE_LISTENING) { + handle->flags |= UV_HANDLE_TCP_ACCEPT_STATE_CHANGING; + } + + return 0; +} + + +static int uv_tcp_try_cancel_io(uv_tcp_t* tcp) { + SOCKET socket = tcp->socket; + int non_ifs_lsp; + + /* Check if we have any non-IFS LSPs stacked on top of TCP */ + non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 : + uv_tcp_non_ifs_lsp_ipv4; + + /* If there are non-ifs LSPs then try to obtain a base handle for the */ + /* socket. This will always fail on Windows XP/3k. */ + if (non_ifs_lsp) { + DWORD bytes; + if (WSAIoctl(socket, + SIO_BASE_HANDLE, + NULL, + 0, + &socket, + sizeof socket, + &bytes, + NULL, + NULL) != 0) { + /* Failed. We can't do CancelIo. */ + return -1; + } + } + + assert(socket != 0 && socket != INVALID_SOCKET); + + if (!CancelIo((HANDLE) socket)) { + return GetLastError(); + } + + /* It worked. */ + return 0; +} + + +void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) { + int close_socket = 1; + + if (tcp->flags & UV_HANDLE_READ_PENDING) { + /* In order for winsock to do a graceful close there must not be any */ + /* any pending reads, or the socket must be shut down for writing */ + if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) { + /* Just do shutdown on non-shared sockets, which ensures graceful close. */ + shutdown(tcp->socket, SD_SEND); + + } else if (uv_tcp_try_cancel_io(tcp) == 0) { + /* In case of a shared socket, we try to cancel all outstanding I/O, */ + /* If that works, don't close the socket yet - wait for the read req to */ + /* return and close the socket in uv_tcp_endgame. */ + close_socket = 0; + + } else { + /* When cancelling isn't possible - which could happen when an LSP is */ + /* present on an old Windows version, we will have to close the socket */ + /* with a read pending. That is not nice because trailing sent bytes */ + /* may not make it to the other side. */ + } + + } else if ((tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET) && + tcp->tcp.serv.accept_reqs != NULL) { + /* Under normal circumstances closesocket() will ensure that all pending */ + /* accept reqs are canceled. However, when the socket is shared the */ + /* presence of another reference to the socket in another process will */ + /* keep the accept reqs going, so we have to ensure that these are */ + /* canceled. */ + if (uv_tcp_try_cancel_io(tcp) != 0) { + /* When cancellation is not possible, there is another option: we can */ + /* close the incoming sockets, which will also cancel the accept */ + /* operations. However this is not cool because we might inadvertently */ + /* close a socket that just accepted a new connection, which will */ + /* cause the connection to be aborted. */ + unsigned int i; + for (i = 0; i < uv_simultaneous_server_accepts; i++) { + uv_tcp_accept_t* req = &tcp->tcp.serv.accept_reqs[i]; + if (req->accept_socket != INVALID_SOCKET && + !HasOverlappedIoCompleted(&req->u.io.overlapped)) { + closesocket(req->accept_socket); + req->accept_socket = INVALID_SOCKET; + } + } + } + } + + if (tcp->flags & UV_HANDLE_READING) { + tcp->flags &= ~UV_HANDLE_READING; + DECREASE_ACTIVE_COUNT(loop, tcp); + } + + if (tcp->flags & UV_HANDLE_LISTENING) { + tcp->flags &= ~UV_HANDLE_LISTENING; + DECREASE_ACTIVE_COUNT(loop, tcp); + } + + if (close_socket) { + closesocket(tcp->socket); + tcp->socket = INVALID_SOCKET; + tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED; + } + + tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE); + uv__handle_closing(tcp); + + if (tcp->reqs_pending == 0) { + uv_want_endgame(tcp->loop, (uv_handle_t*)tcp); + } +} + + +int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) { + WSAPROTOCOL_INFOW protocol_info; + int opt_len; + int err; + + /* Detect the address family of the socket. */ + opt_len = (int) sizeof protocol_info; + if (getsockopt(sock, + SOL_SOCKET, + SO_PROTOCOL_INFOW, + (char*) &protocol_info, + &opt_len) == SOCKET_ERROR) { + return uv_translate_sys_error(GetLastError()); + } + + err = uv_tcp_set_socket(handle->loop, + handle, + sock, + protocol_info.iAddressFamily, + 1); + if (err) { + return uv_translate_sys_error(err); + } + + return 0; +} + + +/* This function is an egress point, i.e. it returns libuv errors rather than + * system errors. + */ +int uv__tcp_bind(uv_tcp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + unsigned int flags) { + int err; + + err = uv_tcp_try_bind(handle, addr, addrlen, flags); + if (err) + return uv_translate_sys_error(err); + + return 0; +} + + +/* This function is an egress point, i.e. it returns libuv errors rather than + * system errors. + */ +int uv__tcp_connect(uv_connect_t* req, + uv_tcp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + uv_connect_cb cb) { + int err; + + err = uv_tcp_try_connect(req, handle, addr, addrlen, cb); + if (err) + return uv_translate_sys_error(err); + + return 0; +} diff --git a/deps/libuv/src/win/thread.c b/deps/libuv/src/win/thread.c new file mode 100644 index 00000000..91684e93 --- /dev/null +++ b/deps/libuv/src/win/thread.c @@ -0,0 +1,697 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "uv.h" +#include "internal.h" + + +#define HAVE_CONDVAR_API() (pInitializeConditionVariable != NULL) + +static int uv_cond_fallback_init(uv_cond_t* cond); +static void uv_cond_fallback_destroy(uv_cond_t* cond); +static void uv_cond_fallback_signal(uv_cond_t* cond); +static void uv_cond_fallback_broadcast(uv_cond_t* cond); +static void uv_cond_fallback_wait(uv_cond_t* cond, uv_mutex_t* mutex); +static int uv_cond_fallback_timedwait(uv_cond_t* cond, + uv_mutex_t* mutex, uint64_t timeout); + +static int uv_cond_condvar_init(uv_cond_t* cond); +static void uv_cond_condvar_destroy(uv_cond_t* cond); +static void uv_cond_condvar_signal(uv_cond_t* cond); +static void uv_cond_condvar_broadcast(uv_cond_t* cond); +static void uv_cond_condvar_wait(uv_cond_t* cond, uv_mutex_t* mutex); +static int uv_cond_condvar_timedwait(uv_cond_t* cond, + uv_mutex_t* mutex, uint64_t timeout); + + +static void uv__once_inner(uv_once_t* guard, void (*callback)(void)) { + DWORD result; + HANDLE existing_event, created_event; + + created_event = CreateEvent(NULL, 1, 0, NULL); + if (created_event == 0) { + /* Could fail in a low-memory situation? */ + uv_fatal_error(GetLastError(), "CreateEvent"); + } + + existing_event = InterlockedCompareExchangePointer(&guard->event, + created_event, + NULL); + + if (existing_event == NULL) { + /* We won the race */ + callback(); + + result = SetEvent(created_event); + assert(result); + guard->ran = 1; + + } else { + /* We lost the race. Destroy the event we created and wait for the */ + /* existing one to become signaled. */ + CloseHandle(created_event); + result = WaitForSingleObject(existing_event, INFINITE); + assert(result == WAIT_OBJECT_0); + } +} + + +void uv_once(uv_once_t* guard, void (*callback)(void)) { + /* Fast case - avoid WaitForSingleObject. */ + if (guard->ran) { + return; + } + + uv__once_inner(guard, callback); +} + + +/* Verify that uv_thread_t can be stored in a TLS slot. */ +STATIC_ASSERT(sizeof(uv_thread_t) <= sizeof(void*)); + +static uv_key_t uv__current_thread_key; +static uv_once_t uv__current_thread_init_guard = UV_ONCE_INIT; + + +static void uv__init_current_thread_key(void) { + if (uv_key_create(&uv__current_thread_key)) + abort(); +} + + +struct thread_ctx { + void (*entry)(void* arg); + void* arg; + uv_thread_t self; +}; + + +static UINT __stdcall uv__thread_start(void* arg) { + struct thread_ctx *ctx_p; + struct thread_ctx ctx; + + ctx_p = arg; + ctx = *ctx_p; + uv__free(ctx_p); + + uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key); + uv_key_set(&uv__current_thread_key, (void*) ctx.self); + + ctx.entry(ctx.arg); + + return 0; +} + + +int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { + struct thread_ctx* ctx; + int err; + HANDLE thread; + + ctx = uv__malloc(sizeof(*ctx)); + if (ctx == NULL) + return UV_ENOMEM; + + ctx->entry = entry; + ctx->arg = arg; + + /* Create the thread in suspended state so we have a chance to pass + * its own creation handle to it */ + thread = (HANDLE) _beginthreadex(NULL, + 0, + uv__thread_start, + ctx, + CREATE_SUSPENDED, + NULL); + if (thread == NULL) { + err = errno; + uv__free(ctx); + } else { + err = 0; + *tid = thread; + ctx->self = thread; + ResumeThread(thread); + } + + switch (err) { + case 0: + return 0; + case EACCES: + return UV_EACCES; + case EAGAIN: + return UV_EAGAIN; + case EINVAL: + return UV_EINVAL; + } + + return UV_EIO; +} + + +uv_thread_t uv_thread_self(void) { + uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key); + return (uv_thread_t) uv_key_get(&uv__current_thread_key); +} + + +int uv_thread_join(uv_thread_t *tid) { + if (WaitForSingleObject(*tid, INFINITE)) + return uv_translate_sys_error(GetLastError()); + else { + CloseHandle(*tid); + *tid = 0; + return 0; + } +} + + +int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) { + return *t1 == *t2; +} + + +int uv_mutex_init(uv_mutex_t* mutex) { + InitializeCriticalSection(mutex); + return 0; +} + + +void uv_mutex_destroy(uv_mutex_t* mutex) { + DeleteCriticalSection(mutex); +} + + +void uv_mutex_lock(uv_mutex_t* mutex) { + EnterCriticalSection(mutex); +} + + +int uv_mutex_trylock(uv_mutex_t* mutex) { + if (TryEnterCriticalSection(mutex)) + return 0; + else + return UV_EBUSY; +} + + +void uv_mutex_unlock(uv_mutex_t* mutex) { + LeaveCriticalSection(mutex); +} + + +int uv_rwlock_init(uv_rwlock_t* rwlock) { + /* Initialize the semaphore that acts as the write lock. */ + HANDLE handle = CreateSemaphoreW(NULL, 1, 1, NULL); + if (handle == NULL) + return uv_translate_sys_error(GetLastError()); + rwlock->state_.write_semaphore_ = handle; + + /* Initialize the critical section protecting the reader count. */ + InitializeCriticalSection(&rwlock->state_.num_readers_lock_); + + /* Initialize the reader count. */ + rwlock->state_.num_readers_ = 0; + + return 0; +} + + +void uv_rwlock_destroy(uv_rwlock_t* rwlock) { + DeleteCriticalSection(&rwlock->state_.num_readers_lock_); + CloseHandle(rwlock->state_.write_semaphore_); +} + + +void uv_rwlock_rdlock(uv_rwlock_t* rwlock) { + /* Acquire the lock that protects the reader count. */ + EnterCriticalSection(&rwlock->state_.num_readers_lock_); + + /* Increase the reader count, and lock for write if this is the first + * reader. + */ + if (++rwlock->state_.num_readers_ == 1) { + DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE); + if (r != WAIT_OBJECT_0) + uv_fatal_error(GetLastError(), "WaitForSingleObject"); + } + + /* Release the lock that protects the reader count. */ + LeaveCriticalSection(&rwlock->state_.num_readers_lock_); +} + + +int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) { + int err; + + if (!TryEnterCriticalSection(&rwlock->state_.num_readers_lock_)) + return UV_EBUSY; + + err = 0; + + if (rwlock->state_.num_readers_ == 0) { + /* Currently there are no other readers, which means that the write lock + * needs to be acquired. + */ + DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0); + if (r == WAIT_OBJECT_0) + rwlock->state_.num_readers_++; + else if (r == WAIT_TIMEOUT) + err = UV_EBUSY; + else if (r == WAIT_FAILED) + uv_fatal_error(GetLastError(), "WaitForSingleObject"); + + } else { + /* The write lock has already been acquired because there are other + * active readers. + */ + rwlock->state_.num_readers_++; + } + + LeaveCriticalSection(&rwlock->state_.num_readers_lock_); + return err; +} + + +void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) { + EnterCriticalSection(&rwlock->state_.num_readers_lock_); + + if (--rwlock->state_.num_readers_ == 0) { + if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL)) + uv_fatal_error(GetLastError(), "ReleaseSemaphore"); + } + + LeaveCriticalSection(&rwlock->state_.num_readers_lock_); +} + + +void uv_rwlock_wrlock(uv_rwlock_t* rwlock) { + DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE); + if (r != WAIT_OBJECT_0) + uv_fatal_error(GetLastError(), "WaitForSingleObject"); +} + + +int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) { + DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0); + if (r == WAIT_OBJECT_0) + return 0; + else if (r == WAIT_TIMEOUT) + return UV_EBUSY; + else + uv_fatal_error(GetLastError(), "WaitForSingleObject"); +} + + +void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) { + if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL)) + uv_fatal_error(GetLastError(), "ReleaseSemaphore"); +} + + +int uv_sem_init(uv_sem_t* sem, unsigned int value) { + *sem = CreateSemaphore(NULL, value, INT_MAX, NULL); + if (*sem == NULL) + return uv_translate_sys_error(GetLastError()); + else + return 0; +} + + +void uv_sem_destroy(uv_sem_t* sem) { + if (!CloseHandle(*sem)) + abort(); +} + + +void uv_sem_post(uv_sem_t* sem) { + if (!ReleaseSemaphore(*sem, 1, NULL)) + abort(); +} + + +void uv_sem_wait(uv_sem_t* sem) { + if (WaitForSingleObject(*sem, INFINITE) != WAIT_OBJECT_0) + abort(); +} + + +int uv_sem_trywait(uv_sem_t* sem) { + DWORD r = WaitForSingleObject(*sem, 0); + + if (r == WAIT_OBJECT_0) + return 0; + + if (r == WAIT_TIMEOUT) + return UV_EAGAIN; + + abort(); + return -1; /* Satisfy the compiler. */ +} + + +/* This condition variable implementation is based on the SetEvent solution + * (section 3.2) at http://www.cs.wustl.edu/~schmidt/win32-cv-1.html + * We could not use the SignalObjectAndWait solution (section 3.4) because + * it want the 2nd argument (type uv_mutex_t) of uv_cond_wait() and + * uv_cond_timedwait() to be HANDLEs, but we use CRITICAL_SECTIONs. + */ + +static int uv_cond_fallback_init(uv_cond_t* cond) { + int err; + + /* Initialize the count to 0. */ + cond->fallback.waiters_count = 0; + + InitializeCriticalSection(&cond->fallback.waiters_count_lock); + + /* Create an auto-reset event. */ + cond->fallback.signal_event = CreateEvent(NULL, /* no security */ + FALSE, /* auto-reset event */ + FALSE, /* non-signaled initially */ + NULL); /* unnamed */ + if (!cond->fallback.signal_event) { + err = GetLastError(); + goto error2; + } + + /* Create a manual-reset event. */ + cond->fallback.broadcast_event = CreateEvent(NULL, /* no security */ + TRUE, /* manual-reset */ + FALSE, /* non-signaled */ + NULL); /* unnamed */ + if (!cond->fallback.broadcast_event) { + err = GetLastError(); + goto error; + } + + return 0; + +error: + CloseHandle(cond->fallback.signal_event); +error2: + DeleteCriticalSection(&cond->fallback.waiters_count_lock); + return uv_translate_sys_error(err); +} + + +static int uv_cond_condvar_init(uv_cond_t* cond) { + pInitializeConditionVariable(&cond->cond_var); + return 0; +} + + +int uv_cond_init(uv_cond_t* cond) { + uv__once_init(); + + if (HAVE_CONDVAR_API()) + return uv_cond_condvar_init(cond); + else + return uv_cond_fallback_init(cond); +} + + +static void uv_cond_fallback_destroy(uv_cond_t* cond) { + if (!CloseHandle(cond->fallback.broadcast_event)) + abort(); + if (!CloseHandle(cond->fallback.signal_event)) + abort(); + DeleteCriticalSection(&cond->fallback.waiters_count_lock); +} + + +static void uv_cond_condvar_destroy(uv_cond_t* cond) { + /* nothing to do */ +} + + +void uv_cond_destroy(uv_cond_t* cond) { + if (HAVE_CONDVAR_API()) + uv_cond_condvar_destroy(cond); + else + uv_cond_fallback_destroy(cond); +} + + +static void uv_cond_fallback_signal(uv_cond_t* cond) { + int have_waiters; + + /* Avoid race conditions. */ + EnterCriticalSection(&cond->fallback.waiters_count_lock); + have_waiters = cond->fallback.waiters_count > 0; + LeaveCriticalSection(&cond->fallback.waiters_count_lock); + + if (have_waiters) + SetEvent(cond->fallback.signal_event); +} + + +static void uv_cond_condvar_signal(uv_cond_t* cond) { + pWakeConditionVariable(&cond->cond_var); +} + + +void uv_cond_signal(uv_cond_t* cond) { + if (HAVE_CONDVAR_API()) + uv_cond_condvar_signal(cond); + else + uv_cond_fallback_signal(cond); +} + + +static void uv_cond_fallback_broadcast(uv_cond_t* cond) { + int have_waiters; + + /* Avoid race conditions. */ + EnterCriticalSection(&cond->fallback.waiters_count_lock); + have_waiters = cond->fallback.waiters_count > 0; + LeaveCriticalSection(&cond->fallback.waiters_count_lock); + + if (have_waiters) + SetEvent(cond->fallback.broadcast_event); +} + + +static void uv_cond_condvar_broadcast(uv_cond_t* cond) { + pWakeAllConditionVariable(&cond->cond_var); +} + + +void uv_cond_broadcast(uv_cond_t* cond) { + if (HAVE_CONDVAR_API()) + uv_cond_condvar_broadcast(cond); + else + uv_cond_fallback_broadcast(cond); +} + + +static int uv_cond_wait_helper(uv_cond_t* cond, uv_mutex_t* mutex, + DWORD dwMilliseconds) { + DWORD result; + int last_waiter; + HANDLE handles[2] = { + cond->fallback.signal_event, + cond->fallback.broadcast_event + }; + + /* Avoid race conditions. */ + EnterCriticalSection(&cond->fallback.waiters_count_lock); + cond->fallback.waiters_count++; + LeaveCriticalSection(&cond->fallback.waiters_count_lock); + + /* It's ok to release the here since Win32 manual-reset events */ + /* maintain state when used with . This avoids the "lost wakeup" */ + /* bug. */ + uv_mutex_unlock(mutex); + + /* Wait for either event to become signaled due to being */ + /* called or being called. */ + result = WaitForMultipleObjects(2, handles, FALSE, dwMilliseconds); + + EnterCriticalSection(&cond->fallback.waiters_count_lock); + cond->fallback.waiters_count--; + last_waiter = result == WAIT_OBJECT_0 + 1 + && cond->fallback.waiters_count == 0; + LeaveCriticalSection(&cond->fallback.waiters_count_lock); + + /* Some thread called . */ + if (last_waiter) { + /* We're the last waiter to be notified or to stop waiting, so reset the */ + /* the manual-reset event. */ + ResetEvent(cond->fallback.broadcast_event); + } + + /* Reacquire the . */ + uv_mutex_lock(mutex); + + if (result == WAIT_OBJECT_0 || result == WAIT_OBJECT_0 + 1) + return 0; + + if (result == WAIT_TIMEOUT) + return UV_ETIMEDOUT; + + abort(); + return -1; /* Satisfy the compiler. */ +} + + +static void uv_cond_fallback_wait(uv_cond_t* cond, uv_mutex_t* mutex) { + if (uv_cond_wait_helper(cond, mutex, INFINITE)) + abort(); +} + + +static void uv_cond_condvar_wait(uv_cond_t* cond, uv_mutex_t* mutex) { + if (!pSleepConditionVariableCS(&cond->cond_var, mutex, INFINITE)) + abort(); +} + + +void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) { + if (HAVE_CONDVAR_API()) + uv_cond_condvar_wait(cond, mutex); + else + uv_cond_fallback_wait(cond, mutex); +} + + +static int uv_cond_fallback_timedwait(uv_cond_t* cond, + uv_mutex_t* mutex, uint64_t timeout) { + return uv_cond_wait_helper(cond, mutex, (DWORD)(timeout / 1e6)); +} + + +static int uv_cond_condvar_timedwait(uv_cond_t* cond, + uv_mutex_t* mutex, uint64_t timeout) { + if (pSleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6))) + return 0; + if (GetLastError() != ERROR_TIMEOUT) + abort(); + return UV_ETIMEDOUT; +} + + +int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, + uint64_t timeout) { + if (HAVE_CONDVAR_API()) + return uv_cond_condvar_timedwait(cond, mutex, timeout); + else + return uv_cond_fallback_timedwait(cond, mutex, timeout); +} + + +int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { + int err; + + barrier->n = count; + barrier->count = 0; + + err = uv_mutex_init(&barrier->mutex); + if (err) + return err; + + err = uv_sem_init(&barrier->turnstile1, 0); + if (err) + goto error2; + + err = uv_sem_init(&barrier->turnstile2, 1); + if (err) + goto error; + + return 0; + +error: + uv_sem_destroy(&barrier->turnstile1); +error2: + uv_mutex_destroy(&barrier->mutex); + return err; + +} + + +void uv_barrier_destroy(uv_barrier_t* barrier) { + uv_sem_destroy(&barrier->turnstile2); + uv_sem_destroy(&barrier->turnstile1); + uv_mutex_destroy(&barrier->mutex); +} + + +int uv_barrier_wait(uv_barrier_t* barrier) { + int serial_thread; + + uv_mutex_lock(&barrier->mutex); + if (++barrier->count == barrier->n) { + uv_sem_wait(&barrier->turnstile2); + uv_sem_post(&barrier->turnstile1); + } + uv_mutex_unlock(&barrier->mutex); + + uv_sem_wait(&barrier->turnstile1); + uv_sem_post(&barrier->turnstile1); + + uv_mutex_lock(&barrier->mutex); + serial_thread = (--barrier->count == 0); + if (serial_thread) { + uv_sem_wait(&barrier->turnstile1); + uv_sem_post(&barrier->turnstile2); + } + uv_mutex_unlock(&barrier->mutex); + + uv_sem_wait(&barrier->turnstile2); + uv_sem_post(&barrier->turnstile2); + return serial_thread; +} + + +int uv_key_create(uv_key_t* key) { + key->tls_index = TlsAlloc(); + if (key->tls_index == TLS_OUT_OF_INDEXES) + return UV_ENOMEM; + return 0; +} + + +void uv_key_delete(uv_key_t* key) { + if (TlsFree(key->tls_index) == FALSE) + abort(); + key->tls_index = TLS_OUT_OF_INDEXES; +} + + +void* uv_key_get(uv_key_t* key) { + void* value; + + value = TlsGetValue(key->tls_index); + if (value == NULL) + if (GetLastError() != ERROR_SUCCESS) + abort(); + + return value; +} + + +void uv_key_set(uv_key_t* key, void* value) { + if (TlsSetValue(key->tls_index, value) == FALSE) + abort(); +} diff --git a/deps/libuv/src/win/timer.c b/deps/libuv/src/win/timer.c new file mode 100644 index 00000000..27ca7716 --- /dev/null +++ b/deps/libuv/src/win/timer.c @@ -0,0 +1,195 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include + +#include "uv.h" +#include "internal.h" +#include "tree.h" +#include "handle-inl.h" + + +/* The number of milliseconds in one second. */ +#define UV__MILLISEC 1000 + + +void uv_update_time(uv_loop_t* loop) { + uint64_t new_time = uv__hrtime(UV__MILLISEC); + assert(new_time >= loop->time); + loop->time = new_time; +} + + +static int uv_timer_compare(uv_timer_t* a, uv_timer_t* b) { + if (a->due < b->due) + return -1; + if (a->due > b->due) + return 1; + /* + * compare start_id when both has the same due. start_id is + * allocated with loop->timer_counter in uv_timer_start(). + */ + if (a->start_id < b->start_id) + return -1; + if (a->start_id > b->start_id) + return 1; + return 0; +} + + +RB_GENERATE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare); + + +int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) { + uv__handle_init(loop, (uv_handle_t*) handle, UV_TIMER); + handle->timer_cb = NULL; + handle->repeat = 0; + + return 0; +} + + +void uv_timer_endgame(uv_loop_t* loop, uv_timer_t* handle) { + if (handle->flags & UV__HANDLE_CLOSING) { + assert(!(handle->flags & UV_HANDLE_CLOSED)); + uv__handle_close(handle); + } +} + + +static uint64_t get_clamped_due_time(uint64_t loop_time, uint64_t timeout) { + uint64_t clamped_timeout; + + clamped_timeout = loop_time + timeout; + if (clamped_timeout < timeout) + clamped_timeout = (uint64_t) -1; + + return clamped_timeout; +} + + +int uv_timer_start(uv_timer_t* handle, uv_timer_cb timer_cb, uint64_t timeout, + uint64_t repeat) { + uv_loop_t* loop = handle->loop; + uv_timer_t* old; + + if (timer_cb == NULL) + return UV_EINVAL; + + if (uv__is_active(handle)) + uv_timer_stop(handle); + + handle->timer_cb = timer_cb; + handle->due = get_clamped_due_time(loop->time, timeout); + handle->repeat = repeat; + uv__handle_start(handle); + + /* start_id is the second index to be compared in uv__timer_cmp() */ + handle->start_id = handle->loop->timer_counter++; + + old = RB_INSERT(uv_timer_tree_s, &loop->timers, handle); + assert(old == NULL); + + return 0; +} + + +int uv_timer_stop(uv_timer_t* handle) { + uv_loop_t* loop = handle->loop; + + if (!uv__is_active(handle)) + return 0; + + RB_REMOVE(uv_timer_tree_s, &loop->timers, handle); + uv__handle_stop(handle); + + return 0; +} + + +int uv_timer_again(uv_timer_t* handle) { + /* If timer_cb is NULL that means that the timer was never started. */ + if (!handle->timer_cb) { + return UV_EINVAL; + } + + if (handle->repeat) { + uv_timer_stop(handle); + uv_timer_start(handle, handle->timer_cb, handle->repeat, handle->repeat); + } + + return 0; +} + + +void uv_timer_set_repeat(uv_timer_t* handle, uint64_t repeat) { + assert(handle->type == UV_TIMER); + handle->repeat = repeat; +} + + +uint64_t uv_timer_get_repeat(const uv_timer_t* handle) { + assert(handle->type == UV_TIMER); + return handle->repeat; +} + + +DWORD uv__next_timeout(const uv_loop_t* loop) { + uv_timer_t* timer; + int64_t delta; + + /* Check if there are any running timers + * Need to cast away const first, since RB_MIN doesn't know what we are + * going to do with this return value, it can't be marked const + */ + timer = RB_MIN(uv_timer_tree_s, &((uv_loop_t*)loop)->timers); + if (timer) { + delta = timer->due - loop->time; + if (delta >= UINT_MAX - 1) { + /* A timeout value of UINT_MAX means infinite, so that's no good. */ + return UINT_MAX - 1; + } else if (delta < 0) { + /* Negative timeout values are not allowed */ + return 0; + } else { + return (DWORD)delta; + } + } else { + /* No timers */ + return INFINITE; + } +} + + +void uv_process_timers(uv_loop_t* loop) { + uv_timer_t* timer; + + /* Call timer callbacks */ + for (timer = RB_MIN(uv_timer_tree_s, &loop->timers); + timer != NULL && timer->due <= loop->time; + timer = RB_MIN(uv_timer_tree_s, &loop->timers)) { + + uv_timer_stop(timer); + uv_timer_again(timer); + timer->timer_cb((uv_timer_t*) timer); + } +} diff --git a/deps/libuv/src/win/tty.c b/deps/libuv/src/win/tty.c new file mode 100644 index 00000000..18d68d09 --- /dev/null +++ b/deps/libuv/src/win/tty.c @@ -0,0 +1,2257 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#if defined(_MSC_VER) && _MSC_VER < 1600 +# include "stdint-msvc2008.h" +#else +# include +#endif + +#ifndef COMMON_LVB_REVERSE_VIDEO +# define COMMON_LVB_REVERSE_VIDEO 0x4000 +#endif + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" +#include "stream-inl.h" +#include "req-inl.h" + +#ifndef InterlockedOr +# define InterlockedOr _InterlockedOr +#endif + +#define UNICODE_REPLACEMENT_CHARACTER (0xfffd) + +#define ANSI_NORMAL 0x00 +#define ANSI_ESCAPE_SEEN 0x02 +#define ANSI_CSI 0x04 +#define ANSI_ST_CONTROL 0x08 +#define ANSI_IGNORE 0x10 +#define ANSI_IN_ARG 0x20 +#define ANSI_IN_STRING 0x40 +#define ANSI_BACKSLASH_SEEN 0x80 + +#define MAX_INPUT_BUFFER_LENGTH 8192 + +#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING +#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004 +#endif + +static void uv_tty_capture_initial_style(CONSOLE_SCREEN_BUFFER_INFO* info); +static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info); +static int uv__cancel_read_console(uv_tty_t* handle); + + +/* Null uv_buf_t */ +static const uv_buf_t uv_null_buf_ = { 0, NULL }; + +enum uv__read_console_status_e { + NOT_STARTED, + IN_PROGRESS, + TRAP_REQUESTED, + COMPLETED +}; + +static volatile LONG uv__read_console_status = NOT_STARTED; +static volatile LONG uv__restore_screen_state; +static CONSOLE_SCREEN_BUFFER_INFO uv__saved_screen_state; + + +/* + * The console virtual window. + * + * Normally cursor movement in windows is relative to the console screen buffer, + * e.g. the application is allowed to overwrite the 'history'. This is very + * inconvenient, it makes absolute cursor movement pretty useless. There is + * also the concept of 'client rect' which is defined by the actual size of + * the console window and the scroll position of the screen buffer, but it's + * very volatile because it changes when the user scrolls. + * + * To make cursor movement behave sensibly we define a virtual window to which + * cursor movement is confined. The virtual window is always as wide as the + * console screen buffer, but it's height is defined by the size of the + * console window. The top of the virtual window aligns with the position + * of the caret when the first stdout/err handle is created, unless that would + * mean that it would extend beyond the bottom of the screen buffer - in that + * that case it's located as far down as possible. + * + * When the user writes a long text or many newlines, such that the output + * reaches beyond the bottom of the virtual window, the virtual window is + * shifted downwards, but not resized. + * + * Since all tty i/o happens on the same console, this window is shared + * between all stdout/stderr handles. + */ + +static int uv_tty_virtual_offset = -1; +static int uv_tty_virtual_height = -1; +static int uv_tty_virtual_width = -1; + +/* We use a semaphore rather than a mutex or critical section because in some + cases (uv__cancel_read_console) we need take the lock in the main thread and + release it in another thread. Using a semaphore ensures that in such + scenario the main thread will still block when trying to acquire the lock. */ +static uv_sem_t uv_tty_output_lock; + +static HANDLE uv_tty_output_handle = INVALID_HANDLE_VALUE; + +static WORD uv_tty_default_text_attributes = + FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE; + +static char uv_tty_default_fg_color = 7; +static char uv_tty_default_bg_color = 0; +static char uv_tty_default_fg_bright = 0; +static char uv_tty_default_bg_bright = 0; +static char uv_tty_default_inverse = 0; + +typedef enum { + UV_SUPPORTED, + UV_UNCHECKED, + UV_UNSUPPORTED +} uv_vtermstate_t; +/* Determine whether or not ANSI support is enabled. */ +static uv_vtermstate_t uv__vterm_state = UV_UNCHECKED; +static void uv__determine_vterm_state(HANDLE handle); + +void uv_console_init() { + if (uv_sem_init(&uv_tty_output_lock, 1)) + abort(); +} + + +int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, uv_file fd, int readable) { + HANDLE handle; + CONSOLE_SCREEN_BUFFER_INFO screen_buffer_info; + + handle = (HANDLE) uv__get_osfhandle(fd); + if (handle == INVALID_HANDLE_VALUE) + return UV_EBADF; + + if (fd <= 2) { + /* In order to avoid closing a stdio file descriptor 0-2, duplicate the + * underlying OS handle and forget about the original fd. + * We could also opt to use the original OS handle and just never close it, + * but then there would be no reliable way to cancel pending read operations + * upon close. + */ + if (!DuplicateHandle(INVALID_HANDLE_VALUE, + handle, + INVALID_HANDLE_VALUE, + &handle, + 0, + FALSE, + DUPLICATE_SAME_ACCESS)) + return uv_translate_sys_error(GetLastError()); + fd = -1; + } + + if (!readable) { + /* Obtain the screen buffer info with the output handle. */ + if (!GetConsoleScreenBufferInfo(handle, &screen_buffer_info)) { + return uv_translate_sys_error(GetLastError()); + } + + /* Obtain the the tty_output_lock because the virtual window state is */ + /* shared between all uv_tty_t handles. */ + uv_sem_wait(&uv_tty_output_lock); + + if (uv__vterm_state == UV_UNCHECKED) + uv__determine_vterm_state(handle); + + /* Store the global tty output handle. This handle is used by TTY read */ + /* streams to update the virtual window when a CONSOLE_BUFFER_SIZE_EVENT */ + /* is received. */ + uv_tty_output_handle = handle; + + /* Remember the original console text attributes. */ + uv_tty_capture_initial_style(&screen_buffer_info); + + uv_tty_update_virtual_window(&screen_buffer_info); + + uv_sem_post(&uv_tty_output_lock); + } + + + uv_stream_init(loop, (uv_stream_t*) tty, UV_TTY); + uv_connection_init((uv_stream_t*) tty); + + tty->handle = handle; + tty->u.fd = fd; + tty->reqs_pending = 0; + tty->flags |= UV_HANDLE_BOUND; + + if (readable) { + /* Initialize TTY input specific fields. */ + tty->flags |= UV_HANDLE_TTY_READABLE | UV_HANDLE_READABLE; + /* TODO: remove me in v2.x. */ + tty->tty.rd.unused_ = NULL; + tty->tty.rd.read_line_buffer = uv_null_buf_; + tty->tty.rd.read_raw_wait = NULL; + + /* Init keycode-to-vt100 mapper state. */ + tty->tty.rd.last_key_len = 0; + tty->tty.rd.last_key_offset = 0; + tty->tty.rd.last_utf16_high_surrogate = 0; + memset(&tty->tty.rd.last_input_record, 0, sizeof tty->tty.rd.last_input_record); + } else { + /* TTY output specific fields. */ + tty->flags |= UV_HANDLE_WRITABLE; + + /* Init utf8-to-utf16 conversion state. */ + tty->tty.wr.utf8_bytes_left = 0; + tty->tty.wr.utf8_codepoint = 0; + + /* Initialize eol conversion state */ + tty->tty.wr.previous_eol = 0; + + /* Init ANSI parser state. */ + tty->tty.wr.ansi_parser_state = ANSI_NORMAL; + } + + return 0; +} + + +/* Set the default console text attributes based on how the console was + * configured when libuv started. + */ +static void uv_tty_capture_initial_style(CONSOLE_SCREEN_BUFFER_INFO* info) { + static int style_captured = 0; + + /* Only do this once. + Assumption: Caller has acquired uv_tty_output_lock. */ + if (style_captured) + return; + + /* Save raw win32 attributes. */ + uv_tty_default_text_attributes = info->wAttributes; + + /* Convert black text on black background to use white text. */ + if (uv_tty_default_text_attributes == 0) + uv_tty_default_text_attributes = 7; + + /* Convert Win32 attributes to ANSI colors. */ + uv_tty_default_fg_color = 0; + uv_tty_default_bg_color = 0; + uv_tty_default_fg_bright = 0; + uv_tty_default_bg_bright = 0; + uv_tty_default_inverse = 0; + + if (uv_tty_default_text_attributes & FOREGROUND_RED) + uv_tty_default_fg_color |= 1; + + if (uv_tty_default_text_attributes & FOREGROUND_GREEN) + uv_tty_default_fg_color |= 2; + + if (uv_tty_default_text_attributes & FOREGROUND_BLUE) + uv_tty_default_fg_color |= 4; + + if (uv_tty_default_text_attributes & BACKGROUND_RED) + uv_tty_default_bg_color |= 1; + + if (uv_tty_default_text_attributes & BACKGROUND_GREEN) + uv_tty_default_bg_color |= 2; + + if (uv_tty_default_text_attributes & BACKGROUND_BLUE) + uv_tty_default_bg_color |= 4; + + if (uv_tty_default_text_attributes & FOREGROUND_INTENSITY) + uv_tty_default_fg_bright = 1; + + if (uv_tty_default_text_attributes & BACKGROUND_INTENSITY) + uv_tty_default_bg_bright = 1; + + if (uv_tty_default_text_attributes & COMMON_LVB_REVERSE_VIDEO) + uv_tty_default_inverse = 1; + + style_captured = 1; +} + + +int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) { + DWORD flags; + unsigned char was_reading; + uv_alloc_cb alloc_cb; + uv_read_cb read_cb; + int err; + + if (!(tty->flags & UV_HANDLE_TTY_READABLE)) { + return UV_EINVAL; + } + + if (!!mode == !!(tty->flags & UV_HANDLE_TTY_RAW)) { + return 0; + } + + switch (mode) { + case UV_TTY_MODE_NORMAL: + flags = ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT; + break; + case UV_TTY_MODE_RAW: + flags = ENABLE_WINDOW_INPUT; + break; + case UV_TTY_MODE_IO: + return UV_ENOTSUP; + default: + return UV_EINVAL; + } + + /* If currently reading, stop, and restart reading. */ + if (tty->flags & UV_HANDLE_READING) { + was_reading = 1; + alloc_cb = tty->alloc_cb; + read_cb = tty->read_cb; + err = uv_tty_read_stop(tty); + if (err) { + return uv_translate_sys_error(err); + } + } else { + was_reading = 0; + } + + uv_sem_wait(&uv_tty_output_lock); + if (!SetConsoleMode(tty->handle, flags)) { + err = uv_translate_sys_error(GetLastError()); + uv_sem_post(&uv_tty_output_lock); + return err; + } + uv_sem_post(&uv_tty_output_lock); + + /* Update flag. */ + tty->flags &= ~UV_HANDLE_TTY_RAW; + tty->flags |= mode ? UV_HANDLE_TTY_RAW : 0; + + /* If we just stopped reading, restart. */ + if (was_reading) { + err = uv_tty_read_start(tty, alloc_cb, read_cb); + if (err) { + return uv_translate_sys_error(err); + } + } + + return 0; +} + + +int uv_is_tty(uv_file file) { + DWORD result; + return GetConsoleMode((HANDLE) _get_osfhandle(file), &result) != 0; +} + + +int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) { + CONSOLE_SCREEN_BUFFER_INFO info; + + if (!GetConsoleScreenBufferInfo(tty->handle, &info)) { + return uv_translate_sys_error(GetLastError()); + } + + uv_sem_wait(&uv_tty_output_lock); + uv_tty_update_virtual_window(&info); + uv_sem_post(&uv_tty_output_lock); + + *width = uv_tty_virtual_width; + *height = uv_tty_virtual_height; + + return 0; +} + + +static void CALLBACK uv_tty_post_raw_read(void* data, BOOLEAN didTimeout) { + uv_loop_t* loop; + uv_tty_t* handle; + uv_req_t* req; + + assert(data); + assert(!didTimeout); + + req = (uv_req_t*) data; + handle = (uv_tty_t*) req->data; + loop = handle->loop; + + UnregisterWait(handle->tty.rd.read_raw_wait); + handle->tty.rd.read_raw_wait = NULL; + + SET_REQ_SUCCESS(req); + POST_COMPLETION_FOR_REQ(loop, req); +} + + +static void uv_tty_queue_read_raw(uv_loop_t* loop, uv_tty_t* handle) { + uv_read_t* req; + BOOL r; + + assert(handle->flags & UV_HANDLE_READING); + assert(!(handle->flags & UV_HANDLE_READ_PENDING)); + + assert(handle->handle && handle->handle != INVALID_HANDLE_VALUE); + + handle->tty.rd.read_line_buffer = uv_null_buf_; + + req = &handle->read_req; + memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); + + r = RegisterWaitForSingleObject(&handle->tty.rd.read_raw_wait, + handle->handle, + uv_tty_post_raw_read, + (void*) req, + INFINITE, + WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE); + if (!r) { + handle->tty.rd.read_raw_wait = NULL; + SET_REQ_ERROR(req, GetLastError()); + uv_insert_pending_req(loop, (uv_req_t*)req); + } + + handle->flags |= UV_HANDLE_READ_PENDING; + handle->reqs_pending++; +} + + +static DWORD CALLBACK uv_tty_line_read_thread(void* data) { + uv_loop_t* loop; + uv_tty_t* handle; + uv_req_t* req; + DWORD bytes, read_bytes; + WCHAR utf16[MAX_INPUT_BUFFER_LENGTH / 3]; + DWORD chars, read_chars; + LONG status; + COORD pos; + BOOL read_console_success; + + assert(data); + + req = (uv_req_t*) data; + handle = (uv_tty_t*) req->data; + loop = handle->loop; + + assert(handle->tty.rd.read_line_buffer.base != NULL); + assert(handle->tty.rd.read_line_buffer.len > 0); + + /* ReadConsole can't handle big buffers. */ + if (handle->tty.rd.read_line_buffer.len < MAX_INPUT_BUFFER_LENGTH) { + bytes = handle->tty.rd.read_line_buffer.len; + } else { + bytes = MAX_INPUT_BUFFER_LENGTH; + } + + /* At last, unicode! */ + /* One utf-16 codeunit never takes more than 3 utf-8 codeunits to encode */ + chars = bytes / 3; + + status = InterlockedExchange(&uv__read_console_status, IN_PROGRESS); + if (status == TRAP_REQUESTED) { + SET_REQ_SUCCESS(req); + req->u.io.overlapped.InternalHigh = 0; + POST_COMPLETION_FOR_REQ(loop, req); + return 0; + } + + read_console_success = ReadConsoleW(handle->handle, + (void*) utf16, + chars, + &read_chars, + NULL); + + if (read_console_success) { + read_bytes = WideCharToMultiByte(CP_UTF8, + 0, + utf16, + read_chars, + handle->tty.rd.read_line_buffer.base, + bytes, + NULL, + NULL); + SET_REQ_SUCCESS(req); + req->u.io.overlapped.InternalHigh = read_bytes; + } else { + SET_REQ_ERROR(req, GetLastError()); + } + + status = InterlockedExchange(&uv__read_console_status, COMPLETED); + + if (status == TRAP_REQUESTED) { + /* If we canceled the read by sending a VK_RETURN event, restore the + screen state to undo the visual effect of the VK_RETURN */ + if (read_console_success && InterlockedOr(&uv__restore_screen_state, 0)) { + HANDLE active_screen_buffer; + active_screen_buffer = CreateFileA("conout$", + GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, + NULL, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + NULL); + if (active_screen_buffer != INVALID_HANDLE_VALUE) { + pos = uv__saved_screen_state.dwCursorPosition; + + /* If the cursor was at the bottom line of the screen buffer, the + VK_RETURN would have caused the buffer contents to scroll up by one + line. The right position to reset the cursor to is therefore one line + higher */ + if (pos.Y == uv__saved_screen_state.dwSize.Y - 1) + pos.Y--; + + SetConsoleCursorPosition(active_screen_buffer, pos); + CloseHandle(active_screen_buffer); + } + } + uv_sem_post(&uv_tty_output_lock); + } + POST_COMPLETION_FOR_REQ(loop, req); + return 0; +} + + +static void uv_tty_queue_read_line(uv_loop_t* loop, uv_tty_t* handle) { + uv_read_t* req; + BOOL r; + + assert(handle->flags & UV_HANDLE_READING); + assert(!(handle->flags & UV_HANDLE_READ_PENDING)); + assert(handle->handle && handle->handle != INVALID_HANDLE_VALUE); + + req = &handle->read_req; + memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); + + handle->tty.rd.read_line_buffer = uv_buf_init(NULL, 0); + handle->alloc_cb((uv_handle_t*) handle, 8192, &handle->tty.rd.read_line_buffer); + if (handle->tty.rd.read_line_buffer.base == NULL || + handle->tty.rd.read_line_buffer.len == 0) { + handle->read_cb((uv_stream_t*) handle, + UV_ENOBUFS, + &handle->tty.rd.read_line_buffer); + return; + } + assert(handle->tty.rd.read_line_buffer.base != NULL); + + /* Reset flags No locking is required since there cannot be a line read + in progress. We are also relying on the memory barrier provided by + QueueUserWorkItem*/ + uv__restore_screen_state = FALSE; + uv__read_console_status = NOT_STARTED; + r = QueueUserWorkItem(uv_tty_line_read_thread, + (void*) req, + WT_EXECUTELONGFUNCTION); + if (!r) { + SET_REQ_ERROR(req, GetLastError()); + uv_insert_pending_req(loop, (uv_req_t*)req); + } + + handle->flags |= UV_HANDLE_READ_PENDING; + handle->reqs_pending++; +} + + +static void uv_tty_queue_read(uv_loop_t* loop, uv_tty_t* handle) { + if (handle->flags & UV_HANDLE_TTY_RAW) { + uv_tty_queue_read_raw(loop, handle); + } else { + uv_tty_queue_read_line(loop, handle); + } +} + + +static const char* get_vt100_fn_key(DWORD code, char shift, char ctrl, + size_t* len) { +#define VK_CASE(vk, normal_str, shift_str, ctrl_str, shift_ctrl_str) \ + case (vk): \ + if (shift && ctrl) { \ + *len = sizeof shift_ctrl_str; \ + return "\033" shift_ctrl_str; \ + } else if (shift) { \ + *len = sizeof shift_str ; \ + return "\033" shift_str; \ + } else if (ctrl) { \ + *len = sizeof ctrl_str; \ + return "\033" ctrl_str; \ + } else { \ + *len = sizeof normal_str; \ + return "\033" normal_str; \ + } + + switch (code) { + /* These mappings are the same as Cygwin's. Unmodified and alt-modified */ + /* keypad keys comply with linux console, modifiers comply with xterm */ + /* modifier usage. F1..f12 and shift-f1..f10 comply with linux console, */ + /* f6..f12 with and without modifiers comply with rxvt. */ + VK_CASE(VK_INSERT, "[2~", "[2;2~", "[2;5~", "[2;6~") + VK_CASE(VK_END, "[4~", "[4;2~", "[4;5~", "[4;6~") + VK_CASE(VK_DOWN, "[B", "[1;2B", "[1;5B", "[1;6B") + VK_CASE(VK_NEXT, "[6~", "[6;2~", "[6;5~", "[6;6~") + VK_CASE(VK_LEFT, "[D", "[1;2D", "[1;5D", "[1;6D") + VK_CASE(VK_CLEAR, "[G", "[1;2G", "[1;5G", "[1;6G") + VK_CASE(VK_RIGHT, "[C", "[1;2C", "[1;5C", "[1;6C") + VK_CASE(VK_UP, "[A", "[1;2A", "[1;5A", "[1;6A") + VK_CASE(VK_HOME, "[1~", "[1;2~", "[1;5~", "[1;6~") + VK_CASE(VK_PRIOR, "[5~", "[5;2~", "[5;5~", "[5;6~") + VK_CASE(VK_DELETE, "[3~", "[3;2~", "[3;5~", "[3;6~") + VK_CASE(VK_NUMPAD0, "[2~", "[2;2~", "[2;5~", "[2;6~") + VK_CASE(VK_NUMPAD1, "[4~", "[4;2~", "[4;5~", "[4;6~") + VK_CASE(VK_NUMPAD2, "[B", "[1;2B", "[1;5B", "[1;6B") + VK_CASE(VK_NUMPAD3, "[6~", "[6;2~", "[6;5~", "[6;6~") + VK_CASE(VK_NUMPAD4, "[D", "[1;2D", "[1;5D", "[1;6D") + VK_CASE(VK_NUMPAD5, "[G", "[1;2G", "[1;5G", "[1;6G") + VK_CASE(VK_NUMPAD6, "[C", "[1;2C", "[1;5C", "[1;6C") + VK_CASE(VK_NUMPAD7, "[A", "[1;2A", "[1;5A", "[1;6A") + VK_CASE(VK_NUMPAD8, "[1~", "[1;2~", "[1;5~", "[1;6~") + VK_CASE(VK_NUMPAD9, "[5~", "[5;2~", "[5;5~", "[5;6~") + VK_CASE(VK_DECIMAL, "[3~", "[3;2~", "[3;5~", "[3;6~") + VK_CASE(VK_F1, "[[A", "[23~", "[11^", "[23^" ) + VK_CASE(VK_F2, "[[B", "[24~", "[12^", "[24^" ) + VK_CASE(VK_F3, "[[C", "[25~", "[13^", "[25^" ) + VK_CASE(VK_F4, "[[D", "[26~", "[14^", "[26^" ) + VK_CASE(VK_F5, "[[E", "[28~", "[15^", "[28^" ) + VK_CASE(VK_F6, "[17~", "[29~", "[17^", "[29^" ) + VK_CASE(VK_F7, "[18~", "[31~", "[18^", "[31^" ) + VK_CASE(VK_F8, "[19~", "[32~", "[19^", "[32^" ) + VK_CASE(VK_F9, "[20~", "[33~", "[20^", "[33^" ) + VK_CASE(VK_F10, "[21~", "[34~", "[21^", "[34^" ) + VK_CASE(VK_F11, "[23~", "[23$", "[23^", "[23@" ) + VK_CASE(VK_F12, "[24~", "[24$", "[24^", "[24@" ) + + default: + *len = 0; + return NULL; + } +#undef VK_CASE +} + + +void uv_process_tty_read_raw_req(uv_loop_t* loop, uv_tty_t* handle, + uv_req_t* req) { + /* Shortcut for handle->tty.rd.last_input_record.Event.KeyEvent. */ +#define KEV handle->tty.rd.last_input_record.Event.KeyEvent + + DWORD records_left, records_read; + uv_buf_t buf; + off_t buf_used; + + assert(handle->type == UV_TTY); + assert(handle->flags & UV_HANDLE_TTY_READABLE); + handle->flags &= ~UV_HANDLE_READ_PENDING; + + if (!(handle->flags & UV_HANDLE_READING) || + !(handle->flags & UV_HANDLE_TTY_RAW)) { + goto out; + } + + if (!REQ_SUCCESS(req)) { + /* An error occurred while waiting for the event. */ + if ((handle->flags & UV_HANDLE_READING)) { + handle->flags &= ~UV_HANDLE_READING; + handle->read_cb((uv_stream_t*)handle, + uv_translate_sys_error(GET_REQ_ERROR(req)), + &uv_null_buf_); + } + goto out; + } + + /* Fetch the number of events */ + if (!GetNumberOfConsoleInputEvents(handle->handle, &records_left)) { + handle->flags &= ~UV_HANDLE_READING; + DECREASE_ACTIVE_COUNT(loop, handle); + handle->read_cb((uv_stream_t*)handle, + uv_translate_sys_error(GetLastError()), + &uv_null_buf_); + goto out; + } + + /* Windows sends a lot of events that we're not interested in, so buf */ + /* will be allocated on demand, when there's actually something to emit. */ + buf = uv_null_buf_; + buf_used = 0; + + while ((records_left > 0 || handle->tty.rd.last_key_len > 0) && + (handle->flags & UV_HANDLE_READING)) { + if (handle->tty.rd.last_key_len == 0) { + /* Read the next input record */ + if (!ReadConsoleInputW(handle->handle, + &handle->tty.rd.last_input_record, + 1, + &records_read)) { + handle->flags &= ~UV_HANDLE_READING; + DECREASE_ACTIVE_COUNT(loop, handle); + handle->read_cb((uv_stream_t*) handle, + uv_translate_sys_error(GetLastError()), + &buf); + goto out; + } + records_left--; + + /* If the window was resized, recompute the virtual window size. This */ + /* will trigger a SIGWINCH signal if the window size changed in an */ + /* way that matters to libuv. */ + if (handle->tty.rd.last_input_record.EventType == WINDOW_BUFFER_SIZE_EVENT) { + CONSOLE_SCREEN_BUFFER_INFO info; + + uv_sem_wait(&uv_tty_output_lock); + + if (uv_tty_output_handle != INVALID_HANDLE_VALUE && + GetConsoleScreenBufferInfo(uv_tty_output_handle, &info)) { + uv_tty_update_virtual_window(&info); + } + + uv_sem_post(&uv_tty_output_lock); + + continue; + } + + /* Ignore other events that are not key or resize events. */ + if (handle->tty.rd.last_input_record.EventType != KEY_EVENT) { + continue; + } + + /* Ignore keyup events, unless the left alt key was held and a valid */ + /* unicode character was emitted. */ + if (!KEV.bKeyDown && !(((KEV.dwControlKeyState & LEFT_ALT_PRESSED) || + KEV.wVirtualKeyCode==VK_MENU) && KEV.uChar.UnicodeChar != 0)) { + continue; + } + + /* Ignore keypresses to numpad number keys if the left alt is held */ + /* because the user is composing a character, or windows simulating */ + /* this. */ + if ((KEV.dwControlKeyState & LEFT_ALT_PRESSED) && + !(KEV.dwControlKeyState & ENHANCED_KEY) && + (KEV.wVirtualKeyCode == VK_INSERT || + KEV.wVirtualKeyCode == VK_END || + KEV.wVirtualKeyCode == VK_DOWN || + KEV.wVirtualKeyCode == VK_NEXT || + KEV.wVirtualKeyCode == VK_LEFT || + KEV.wVirtualKeyCode == VK_CLEAR || + KEV.wVirtualKeyCode == VK_RIGHT || + KEV.wVirtualKeyCode == VK_HOME || + KEV.wVirtualKeyCode == VK_UP || + KEV.wVirtualKeyCode == VK_PRIOR || + KEV.wVirtualKeyCode == VK_NUMPAD0 || + KEV.wVirtualKeyCode == VK_NUMPAD1 || + KEV.wVirtualKeyCode == VK_NUMPAD2 || + KEV.wVirtualKeyCode == VK_NUMPAD3 || + KEV.wVirtualKeyCode == VK_NUMPAD4 || + KEV.wVirtualKeyCode == VK_NUMPAD5 || + KEV.wVirtualKeyCode == VK_NUMPAD6 || + KEV.wVirtualKeyCode == VK_NUMPAD7 || + KEV.wVirtualKeyCode == VK_NUMPAD8 || + KEV.wVirtualKeyCode == VK_NUMPAD9)) { + continue; + } + + if (KEV.uChar.UnicodeChar != 0) { + int prefix_len, char_len; + + /* Character key pressed */ + if (KEV.uChar.UnicodeChar >= 0xD800 && + KEV.uChar.UnicodeChar < 0xDC00) { + /* UTF-16 high surrogate */ + handle->tty.rd.last_utf16_high_surrogate = KEV.uChar.UnicodeChar; + continue; + } + + /* Prefix with \u033 if alt was held, but alt was not used as part */ + /* a compose sequence. */ + if ((KEV.dwControlKeyState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED)) + && !(KEV.dwControlKeyState & (LEFT_CTRL_PRESSED | + RIGHT_CTRL_PRESSED)) && KEV.bKeyDown) { + handle->tty.rd.last_key[0] = '\033'; + prefix_len = 1; + } else { + prefix_len = 0; + } + + if (KEV.uChar.UnicodeChar >= 0xDC00 && + KEV.uChar.UnicodeChar < 0xE000) { + /* UTF-16 surrogate pair */ + WCHAR utf16_buffer[2] = { handle->tty.rd.last_utf16_high_surrogate, + KEV.uChar.UnicodeChar}; + char_len = WideCharToMultiByte(CP_UTF8, + 0, + utf16_buffer, + 2, + &handle->tty.rd.last_key[prefix_len], + sizeof handle->tty.rd.last_key, + NULL, + NULL); + } else { + /* Single UTF-16 character */ + char_len = WideCharToMultiByte(CP_UTF8, + 0, + &KEV.uChar.UnicodeChar, + 1, + &handle->tty.rd.last_key[prefix_len], + sizeof handle->tty.rd.last_key, + NULL, + NULL); + } + + /* Whatever happened, the last character wasn't a high surrogate. */ + handle->tty.rd.last_utf16_high_surrogate = 0; + + /* If the utf16 character(s) couldn't be converted something must */ + /* be wrong. */ + if (!char_len) { + handle->flags &= ~UV_HANDLE_READING; + DECREASE_ACTIVE_COUNT(loop, handle); + handle->read_cb((uv_stream_t*) handle, + uv_translate_sys_error(GetLastError()), + &buf); + goto out; + } + + handle->tty.rd.last_key_len = (unsigned char) (prefix_len + char_len); + handle->tty.rd.last_key_offset = 0; + continue; + + } else { + /* Function key pressed */ + const char* vt100; + size_t prefix_len, vt100_len; + + vt100 = get_vt100_fn_key(KEV.wVirtualKeyCode, + !!(KEV.dwControlKeyState & SHIFT_PRESSED), + !!(KEV.dwControlKeyState & ( + LEFT_CTRL_PRESSED | + RIGHT_CTRL_PRESSED)), + &vt100_len); + + /* If we were unable to map to a vt100 sequence, just ignore. */ + if (!vt100) { + continue; + } + + /* Prefix with \x033 when the alt key was held. */ + if (KEV.dwControlKeyState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED)) { + handle->tty.rd.last_key[0] = '\033'; + prefix_len = 1; + } else { + prefix_len = 0; + } + + /* Copy the vt100 sequence to the handle buffer. */ + assert(prefix_len + vt100_len < sizeof handle->tty.rd.last_key); + memcpy(&handle->tty.rd.last_key[prefix_len], vt100, vt100_len); + + handle->tty.rd.last_key_len = (unsigned char) (prefix_len + vt100_len); + handle->tty.rd.last_key_offset = 0; + continue; + } + } else { + /* Copy any bytes left from the last keypress to the user buffer. */ + if (handle->tty.rd.last_key_offset < handle->tty.rd.last_key_len) { + /* Allocate a buffer if needed */ + if (buf_used == 0) { + buf = uv_buf_init(NULL, 0); + handle->alloc_cb((uv_handle_t*) handle, 1024, &buf); + if (buf.base == NULL || buf.len == 0) { + handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf); + goto out; + } + assert(buf.base != NULL); + } + + buf.base[buf_used++] = handle->tty.rd.last_key[handle->tty.rd.last_key_offset++]; + + /* If the buffer is full, emit it */ + if ((size_t) buf_used == buf.len) { + handle->read_cb((uv_stream_t*) handle, buf_used, &buf); + buf = uv_null_buf_; + buf_used = 0; + } + + continue; + } + + /* Apply dwRepeat from the last input record. */ + if (--KEV.wRepeatCount > 0) { + handle->tty.rd.last_key_offset = 0; + continue; + } + + handle->tty.rd.last_key_len = 0; + continue; + } + } + + /* Send the buffer back to the user */ + if (buf_used > 0) { + handle->read_cb((uv_stream_t*) handle, buf_used, &buf); + } + + out: + /* Wait for more input events. */ + if ((handle->flags & UV_HANDLE_READING) && + !(handle->flags & UV_HANDLE_READ_PENDING)) { + uv_tty_queue_read(loop, handle); + } + + DECREASE_PENDING_REQ_COUNT(handle); + +#undef KEV +} + + + +void uv_process_tty_read_line_req(uv_loop_t* loop, uv_tty_t* handle, + uv_req_t* req) { + uv_buf_t buf; + + assert(handle->type == UV_TTY); + assert(handle->flags & UV_HANDLE_TTY_READABLE); + + buf = handle->tty.rd.read_line_buffer; + + handle->flags &= ~UV_HANDLE_READ_PENDING; + handle->tty.rd.read_line_buffer = uv_null_buf_; + + if (!REQ_SUCCESS(req)) { + /* Read was not successful */ + if (handle->flags & UV_HANDLE_READING) { + /* Real error */ + handle->flags &= ~UV_HANDLE_READING; + DECREASE_ACTIVE_COUNT(loop, handle); + handle->read_cb((uv_stream_t*) handle, + uv_translate_sys_error(GET_REQ_ERROR(req)), + &buf); + } else { + /* The read was cancelled, or whatever we don't care */ + handle->read_cb((uv_stream_t*) handle, 0, &buf); + } + + } else { + if (!(handle->flags & UV_HANDLE_CANCELLATION_PENDING)) { + /* Read successful */ + /* TODO: read unicode, convert to utf-8 */ + DWORD bytes = req->u.io.overlapped.InternalHigh; + handle->read_cb((uv_stream_t*) handle, bytes, &buf); + } else { + handle->flags &= ~UV_HANDLE_CANCELLATION_PENDING; + handle->read_cb((uv_stream_t*) handle, 0, &buf); + } + } + + /* Wait for more input events. */ + if ((handle->flags & UV_HANDLE_READING) && + !(handle->flags & UV_HANDLE_READ_PENDING)) { + uv_tty_queue_read(loop, handle); + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle, + uv_req_t* req) { + assert(handle->type == UV_TTY); + assert(handle->flags & UV_HANDLE_TTY_READABLE); + + /* If the read_line_buffer member is zero, it must have been an raw read. */ + /* Otherwise it was a line-buffered read. */ + /* FIXME: This is quite obscure. Use a flag or something. */ + if (handle->tty.rd.read_line_buffer.len == 0) { + uv_process_tty_read_raw_req(loop, handle, req); + } else { + uv_process_tty_read_line_req(loop, handle, req); + } +} + + +int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb, + uv_read_cb read_cb) { + uv_loop_t* loop = handle->loop; + + if (!(handle->flags & UV_HANDLE_TTY_READABLE)) { + return ERROR_INVALID_PARAMETER; + } + + handle->flags |= UV_HANDLE_READING; + INCREASE_ACTIVE_COUNT(loop, handle); + handle->read_cb = read_cb; + handle->alloc_cb = alloc_cb; + + /* If reading was stopped and then started again, there could still be a */ + /* read request pending. */ + if (handle->flags & UV_HANDLE_READ_PENDING) { + return 0; + } + + /* Maybe the user stopped reading half-way while processing key events. */ + /* Short-circuit if this could be the case. */ + if (handle->tty.rd.last_key_len > 0) { + SET_REQ_SUCCESS(&handle->read_req); + uv_insert_pending_req(handle->loop, (uv_req_t*) &handle->read_req); + return 0; + } + + uv_tty_queue_read(loop, handle); + + return 0; +} + + +int uv_tty_read_stop(uv_tty_t* handle) { + INPUT_RECORD record; + DWORD written, err; + + handle->flags &= ~UV_HANDLE_READING; + DECREASE_ACTIVE_COUNT(handle->loop, handle); + + if (!(handle->flags & UV_HANDLE_READ_PENDING)) + return 0; + + if (handle->flags & UV_HANDLE_TTY_RAW) { + /* Cancel raw read */ + /* Write some bullshit event to force the console wait to return. */ + memset(&record, 0, sizeof record); + if (!WriteConsoleInputW(handle->handle, &record, 1, &written)) { + return GetLastError(); + } + } else if (!(handle->flags & UV_HANDLE_CANCELLATION_PENDING)) { + /* Cancel line-buffered read if not already pending */ + err = uv__cancel_read_console(handle); + if (err) + return err; + + handle->flags |= UV_HANDLE_CANCELLATION_PENDING; + } + + return 0; +} + +static int uv__cancel_read_console(uv_tty_t* handle) { + HANDLE active_screen_buffer = INVALID_HANDLE_VALUE; + INPUT_RECORD record; + DWORD written; + DWORD err = 0; + LONG status; + + assert(!(handle->flags & UV_HANDLE_CANCELLATION_PENDING)); + + /* Hold the output lock during the cancellation, to ensure that further + writes don't interfere with the screen state. It will be the ReadConsole + thread's responsibility to release the lock. */ + uv_sem_wait(&uv_tty_output_lock); + status = InterlockedExchange(&uv__read_console_status, TRAP_REQUESTED); + if (status != IN_PROGRESS) { + /* Either we have managed to set a trap for the other thread before + ReadConsole is called, or ReadConsole has returned because the user + has pressed ENTER. In either case, there is nothing else to do. */ + uv_sem_post(&uv_tty_output_lock); + return 0; + } + + /* Save screen state before sending the VK_RETURN event */ + active_screen_buffer = CreateFileA("conout$", + GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, + NULL, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + NULL); + + if (active_screen_buffer != INVALID_HANDLE_VALUE && + GetConsoleScreenBufferInfo(active_screen_buffer, + &uv__saved_screen_state)) { + InterlockedOr(&uv__restore_screen_state, 1); + } + + /* Write enter key event to force the console wait to return. */ + record.EventType = KEY_EVENT; + record.Event.KeyEvent.bKeyDown = TRUE; + record.Event.KeyEvent.wRepeatCount = 1; + record.Event.KeyEvent.wVirtualKeyCode = VK_RETURN; + record.Event.KeyEvent.wVirtualScanCode = + MapVirtualKeyW(VK_RETURN, MAPVK_VK_TO_VSC); + record.Event.KeyEvent.uChar.UnicodeChar = L'\r'; + record.Event.KeyEvent.dwControlKeyState = 0; + if (!WriteConsoleInputW(handle->handle, &record, 1, &written)) + err = GetLastError(); + + if (active_screen_buffer != INVALID_HANDLE_VALUE) + CloseHandle(active_screen_buffer); + + return err; +} + + +static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info) { + int old_virtual_width = uv_tty_virtual_width; + int old_virtual_height = uv_tty_virtual_height; + + uv_tty_virtual_width = info->dwSize.X; + uv_tty_virtual_height = info->srWindow.Bottom - info->srWindow.Top + 1; + + /* Recompute virtual window offset row. */ + if (uv_tty_virtual_offset == -1) { + uv_tty_virtual_offset = info->dwCursorPosition.Y; + } else if (uv_tty_virtual_offset < info->dwCursorPosition.Y - + uv_tty_virtual_height + 1) { + /* If suddenly find the cursor outside of the virtual window, it must */ + /* have somehow scrolled. Update the virtual window offset. */ + uv_tty_virtual_offset = info->dwCursorPosition.Y - + uv_tty_virtual_height + 1; + } + if (uv_tty_virtual_offset + uv_tty_virtual_height > info->dwSize.Y) { + uv_tty_virtual_offset = info->dwSize.Y - uv_tty_virtual_height; + } + if (uv_tty_virtual_offset < 0) { + uv_tty_virtual_offset = 0; + } + + /* If the virtual window size changed, emit a SIGWINCH signal. Don't emit */ + /* if this was the first time the virtual window size was computed. */ + if (old_virtual_width != -1 && old_virtual_height != -1 && + (uv_tty_virtual_width != old_virtual_width || + uv_tty_virtual_height != old_virtual_height)) { + uv__signal_dispatch(SIGWINCH); + } +} + + +static COORD uv_tty_make_real_coord(uv_tty_t* handle, + CONSOLE_SCREEN_BUFFER_INFO* info, int x, unsigned char x_relative, int y, + unsigned char y_relative) { + COORD result; + + uv_tty_update_virtual_window(info); + + /* Adjust y position */ + if (y_relative) { + y = info->dwCursorPosition.Y + y; + } else { + y = uv_tty_virtual_offset + y; + } + /* Clip y to virtual client rectangle */ + if (y < uv_tty_virtual_offset) { + y = uv_tty_virtual_offset; + } else if (y >= uv_tty_virtual_offset + uv_tty_virtual_height) { + y = uv_tty_virtual_offset + uv_tty_virtual_height - 1; + } + + /* Adjust x */ + if (x_relative) { + x = info->dwCursorPosition.X + x; + } + /* Clip x */ + if (x < 0) { + x = 0; + } else if (x >= uv_tty_virtual_width) { + x = uv_tty_virtual_width - 1; + } + + result.X = (unsigned short) x; + result.Y = (unsigned short) y; + return result; +} + + +static int uv_tty_emit_text(uv_tty_t* handle, WCHAR buffer[], DWORD length, + DWORD* error) { + DWORD written; + + if (*error != ERROR_SUCCESS) { + return -1; + } + + if (!WriteConsoleW(handle->handle, + (void*) buffer, + length, + &written, + NULL)) { + *error = GetLastError(); + return -1; + } + + return 0; +} + + +static int uv_tty_move_caret(uv_tty_t* handle, int x, unsigned char x_relative, + int y, unsigned char y_relative, DWORD* error) { + CONSOLE_SCREEN_BUFFER_INFO info; + COORD pos; + + if (*error != ERROR_SUCCESS) { + return -1; + } + + retry: + if (!GetConsoleScreenBufferInfo(handle->handle, &info)) { + *error = GetLastError(); + } + + pos = uv_tty_make_real_coord(handle, &info, x, x_relative, y, y_relative); + + if (!SetConsoleCursorPosition(handle->handle, pos)) { + if (GetLastError() == ERROR_INVALID_PARAMETER) { + /* The console may be resized - retry */ + goto retry; + } else { + *error = GetLastError(); + return -1; + } + } + + return 0; +} + + +static int uv_tty_reset(uv_tty_t* handle, DWORD* error) { + const COORD origin = {0, 0}; + const WORD char_attrs = uv_tty_default_text_attributes; + CONSOLE_SCREEN_BUFFER_INFO info; + DWORD count, written; + + if (*error != ERROR_SUCCESS) { + return -1; + } + + /* Reset original text attributes. */ + if (!SetConsoleTextAttribute(handle->handle, char_attrs)) { + *error = GetLastError(); + return -1; + } + + /* Move the cursor position to (0, 0). */ + if (!SetConsoleCursorPosition(handle->handle, origin)) { + *error = GetLastError(); + return -1; + } + + /* Clear the screen buffer. */ + retry: + if (!GetConsoleScreenBufferInfo(handle->handle, &info)) { + *error = GetLastError(); + return -1; + } + + count = info.dwSize.X * info.dwSize.Y; + + if (!(FillConsoleOutputCharacterW(handle->handle, + L'\x20', + count, + origin, + &written) && + FillConsoleOutputAttribute(handle->handle, + char_attrs, + written, + origin, + &written))) { + if (GetLastError() == ERROR_INVALID_PARAMETER) { + /* The console may be resized - retry */ + goto retry; + } else { + *error = GetLastError(); + return -1; + } + } + + /* Move the virtual window up to the top. */ + uv_tty_virtual_offset = 0; + uv_tty_update_virtual_window(&info); + + return 0; +} + + +static int uv_tty_clear(uv_tty_t* handle, int dir, char entire_screen, + DWORD* error) { + CONSOLE_SCREEN_BUFFER_INFO info; + COORD start, end; + DWORD count, written; + + int x1, x2, y1, y2; + int x1r, x2r, y1r, y2r; + + if (*error != ERROR_SUCCESS) { + return -1; + } + + if (dir == 0) { + /* Clear from current position */ + x1 = 0; + x1r = 1; + } else { + /* Clear from column 0 */ + x1 = 0; + x1r = 0; + } + + if (dir == 1) { + /* Clear to current position */ + x2 = 0; + x2r = 1; + } else { + /* Clear to end of row. We pretend the console is 65536 characters wide, */ + /* uv_tty_make_real_coord will clip it to the actual console width. */ + x2 = 0xffff; + x2r = 0; + } + + if (!entire_screen) { + /* Stay on our own row */ + y1 = y2 = 0; + y1r = y2r = 1; + } else { + /* Apply columns direction to row */ + y1 = x1; + y1r = x1r; + y2 = x2; + y2r = x2r; + } + + retry: + if (!GetConsoleScreenBufferInfo(handle->handle, &info)) { + *error = GetLastError(); + return -1; + } + + start = uv_tty_make_real_coord(handle, &info, x1, x1r, y1, y1r); + end = uv_tty_make_real_coord(handle, &info, x2, x2r, y2, y2r); + count = (end.Y * info.dwSize.X + end.X) - + (start.Y * info.dwSize.X + start.X) + 1; + + if (!(FillConsoleOutputCharacterW(handle->handle, + L'\x20', + count, + start, + &written) && + FillConsoleOutputAttribute(handle->handle, + info.wAttributes, + written, + start, + &written))) { + if (GetLastError() == ERROR_INVALID_PARAMETER) { + /* The console may be resized - retry */ + goto retry; + } else { + *error = GetLastError(); + return -1; + } + } + + return 0; +} + +#define FLIP_FGBG \ + do { \ + WORD fg = info.wAttributes & 0xF; \ + WORD bg = info.wAttributes & 0xF0; \ + info.wAttributes &= 0xFF00; \ + info.wAttributes |= fg << 4; \ + info.wAttributes |= bg >> 4; \ + } while (0) + +static int uv_tty_set_style(uv_tty_t* handle, DWORD* error) { + unsigned short argc = handle->tty.wr.ansi_csi_argc; + unsigned short* argv = handle->tty.wr.ansi_csi_argv; + int i; + CONSOLE_SCREEN_BUFFER_INFO info; + + char fg_color = -1, bg_color = -1; + char fg_bright = -1, bg_bright = -1; + char inverse = -1; + + if (argc == 0) { + /* Reset mode */ + fg_color = uv_tty_default_fg_color; + bg_color = uv_tty_default_bg_color; + fg_bright = uv_tty_default_fg_bright; + bg_bright = uv_tty_default_bg_bright; + inverse = uv_tty_default_inverse; + } + + for (i = 0; i < argc; i++) { + short arg = argv[i]; + + if (arg == 0) { + /* Reset mode */ + fg_color = uv_tty_default_fg_color; + bg_color = uv_tty_default_bg_color; + fg_bright = uv_tty_default_fg_bright; + bg_bright = uv_tty_default_bg_bright; + inverse = uv_tty_default_inverse; + + } else if (arg == 1) { + /* Foreground bright on */ + fg_bright = 1; + + } else if (arg == 2) { + /* Both bright off */ + fg_bright = 0; + bg_bright = 0; + + } else if (arg == 5) { + /* Background bright on */ + bg_bright = 1; + + } else if (arg == 7) { + /* Inverse: on */ + inverse = 1; + + } else if (arg == 21 || arg == 22) { + /* Foreground bright off */ + fg_bright = 0; + + } else if (arg == 25) { + /* Background bright off */ + bg_bright = 0; + + } else if (arg == 27) { + /* Inverse: off */ + inverse = 0; + + } else if (arg >= 30 && arg <= 37) { + /* Set foreground color */ + fg_color = arg - 30; + + } else if (arg == 39) { + /* Default text color */ + fg_color = uv_tty_default_fg_color; + fg_bright = uv_tty_default_fg_bright; + + } else if (arg >= 40 && arg <= 47) { + /* Set background color */ + bg_color = arg - 40; + + } else if (arg == 49) { + /* Default background color */ + bg_color = uv_tty_default_bg_color; + bg_bright = uv_tty_default_bg_bright; + + } else if (arg >= 90 && arg <= 97) { + /* Set bold foreground color */ + fg_bright = 1; + fg_color = arg - 90; + + } else if (arg >= 100 && arg <= 107) { + /* Set bold background color */ + bg_bright = 1; + bg_color = arg - 100; + + } + } + + if (fg_color == -1 && bg_color == -1 && fg_bright == -1 && + bg_bright == -1 && inverse == -1) { + /* Nothing changed */ + return 0; + } + + if (!GetConsoleScreenBufferInfo(handle->handle, &info)) { + *error = GetLastError(); + return -1; + } + + if ((info.wAttributes & COMMON_LVB_REVERSE_VIDEO) > 0) { + FLIP_FGBG; + } + + if (fg_color != -1) { + info.wAttributes &= ~(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE); + if (fg_color & 1) info.wAttributes |= FOREGROUND_RED; + if (fg_color & 2) info.wAttributes |= FOREGROUND_GREEN; + if (fg_color & 4) info.wAttributes |= FOREGROUND_BLUE; + } + + if (fg_bright != -1) { + if (fg_bright) { + info.wAttributes |= FOREGROUND_INTENSITY; + } else { + info.wAttributes &= ~FOREGROUND_INTENSITY; + } + } + + if (bg_color != -1) { + info.wAttributes &= ~(BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE); + if (bg_color & 1) info.wAttributes |= BACKGROUND_RED; + if (bg_color & 2) info.wAttributes |= BACKGROUND_GREEN; + if (bg_color & 4) info.wAttributes |= BACKGROUND_BLUE; + } + + if (bg_bright != -1) { + if (bg_bright) { + info.wAttributes |= BACKGROUND_INTENSITY; + } else { + info.wAttributes &= ~BACKGROUND_INTENSITY; + } + } + + if (inverse != -1) { + if (inverse) { + info.wAttributes |= COMMON_LVB_REVERSE_VIDEO; + } else { + info.wAttributes &= ~COMMON_LVB_REVERSE_VIDEO; + } + } + + if ((info.wAttributes & COMMON_LVB_REVERSE_VIDEO) > 0) { + FLIP_FGBG; + } + + if (!SetConsoleTextAttribute(handle->handle, info.wAttributes)) { + *error = GetLastError(); + return -1; + } + + return 0; +} + + +static int uv_tty_save_state(uv_tty_t* handle, unsigned char save_attributes, + DWORD* error) { + CONSOLE_SCREEN_BUFFER_INFO info; + + if (*error != ERROR_SUCCESS) { + return -1; + } + + if (!GetConsoleScreenBufferInfo(handle->handle, &info)) { + *error = GetLastError(); + return -1; + } + + uv_tty_update_virtual_window(&info); + + handle->tty.wr.saved_position.X = info.dwCursorPosition.X; + handle->tty.wr.saved_position.Y = info.dwCursorPosition.Y - uv_tty_virtual_offset; + handle->flags |= UV_HANDLE_TTY_SAVED_POSITION; + + if (save_attributes) { + handle->tty.wr.saved_attributes = info.wAttributes & + (FOREGROUND_INTENSITY | BACKGROUND_INTENSITY); + handle->flags |= UV_HANDLE_TTY_SAVED_ATTRIBUTES; + } + + return 0; +} + + +static int uv_tty_restore_state(uv_tty_t* handle, + unsigned char restore_attributes, DWORD* error) { + CONSOLE_SCREEN_BUFFER_INFO info; + WORD new_attributes; + + if (*error != ERROR_SUCCESS) { + return -1; + } + + if (handle->flags & UV_HANDLE_TTY_SAVED_POSITION) { + if (uv_tty_move_caret(handle, + handle->tty.wr.saved_position.X, + 0, + handle->tty.wr.saved_position.Y, + 0, + error) != 0) { + return -1; + } + } + + if (restore_attributes && + (handle->flags & UV_HANDLE_TTY_SAVED_ATTRIBUTES)) { + if (!GetConsoleScreenBufferInfo(handle->handle, &info)) { + *error = GetLastError(); + return -1; + } + + new_attributes = info.wAttributes; + new_attributes &= ~(FOREGROUND_INTENSITY | BACKGROUND_INTENSITY); + new_attributes |= handle->tty.wr.saved_attributes; + + if (!SetConsoleTextAttribute(handle->handle, new_attributes)) { + *error = GetLastError(); + return -1; + } + } + + return 0; +} + +static int uv_tty_set_cursor_visibility(uv_tty_t* handle, + BOOL visible, + DWORD* error) { + CONSOLE_CURSOR_INFO cursor_info; + + if (!GetConsoleCursorInfo(handle->handle, &cursor_info)) { + *error = GetLastError(); + return -1; + } + + cursor_info.bVisible = visible; + + if (!SetConsoleCursorInfo(handle->handle, &cursor_info)) { + *error = GetLastError(); + return -1; + } + + return 0; +} + +static int uv_tty_write_bufs(uv_tty_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + DWORD* error) { + /* We can only write 8k characters at a time. Windows can't handle */ + /* much more characters in a single console write anyway. */ + WCHAR utf16_buf[8192]; + DWORD utf16_buf_used = 0; + unsigned int i; + +#define FLUSH_TEXT() \ + do { \ + if (utf16_buf_used > 0) { \ + uv_tty_emit_text(handle, utf16_buf, utf16_buf_used, error); \ + utf16_buf_used = 0; \ + } \ + } while (0) + +#define ENSURE_BUFFER_SPACE(wchars_needed) \ + if (wchars_needed > ARRAY_SIZE(utf16_buf) - utf16_buf_used) { \ + FLUSH_TEXT(); \ + } + + /* Cache for fast access */ + unsigned char utf8_bytes_left = handle->tty.wr.utf8_bytes_left; + unsigned int utf8_codepoint = handle->tty.wr.utf8_codepoint; + unsigned char previous_eol = handle->tty.wr.previous_eol; + unsigned char ansi_parser_state = handle->tty.wr.ansi_parser_state; + + /* Store the error here. If we encounter an error, stop trying to do i/o */ + /* but keep parsing the buffer so we leave the parser in a consistent */ + /* state. */ + *error = ERROR_SUCCESS; + + uv_sem_wait(&uv_tty_output_lock); + + for (i = 0; i < nbufs; i++) { + uv_buf_t buf = bufs[i]; + unsigned int j; + + if (uv__vterm_state == UV_SUPPORTED) { + utf16_buf_used = MultiByteToWideChar(CP_UTF8, + 0, + buf.base, + buf.len, + NULL, + 0); + + if (utf16_buf_used == 0) { + *error = GetLastError(); + break; + } + + if (!MultiByteToWideChar(CP_UTF8, + 0, + buf.base, + buf.len, + utf16_buf, + utf16_buf_used)) { + *error = GetLastError(); + break; + } + + FLUSH_TEXT(); + continue; + } + + for (j = 0; j < buf.len; j++) { + unsigned char c = buf.base[j]; + + /* Run the character through the utf8 decoder We happily accept non */ + /* shortest form encodings and invalid code points - there's no real */ + /* harm that can be done. */ + if (utf8_bytes_left == 0) { + /* Read utf-8 start byte */ + DWORD first_zero_bit; + unsigned char not_c = ~c; +#ifdef _MSC_VER /* msvc */ + if (_BitScanReverse(&first_zero_bit, not_c)) { +#else /* assume gcc */ + if (c != 0) { + first_zero_bit = (sizeof(int) * 8) - 1 - __builtin_clz(not_c); +#endif + if (first_zero_bit == 7) { + /* Ascii - pass right through */ + utf8_codepoint = (unsigned int) c; + + } else if (first_zero_bit <= 5) { + /* Multibyte sequence */ + utf8_codepoint = (0xff >> (8 - first_zero_bit)) & c; + utf8_bytes_left = (char) (6 - first_zero_bit); + + } else { + /* Invalid continuation */ + utf8_codepoint = UNICODE_REPLACEMENT_CHARACTER; + } + + } else { + /* 0xff -- invalid */ + utf8_codepoint = UNICODE_REPLACEMENT_CHARACTER; + } + + } else if ((c & 0xc0) == 0x80) { + /* Valid continuation of utf-8 multibyte sequence */ + utf8_bytes_left--; + utf8_codepoint <<= 6; + utf8_codepoint |= ((unsigned int) c & 0x3f); + + } else { + /* Start byte where continuation was expected. */ + utf8_bytes_left = 0; + utf8_codepoint = UNICODE_REPLACEMENT_CHARACTER; + /* Patch buf offset so this character will be parsed again as a */ + /* start byte. */ + j--; + } + + /* Maybe we need to parse more bytes to find a character. */ + if (utf8_bytes_left != 0) { + continue; + } + + /* Parse vt100/ansi escape codes */ + if (ansi_parser_state == ANSI_NORMAL) { + switch (utf8_codepoint) { + case '\033': + ansi_parser_state = ANSI_ESCAPE_SEEN; + continue; + + case 0233: + ansi_parser_state = ANSI_CSI; + handle->tty.wr.ansi_csi_argc = 0; + continue; + } + + } else if (ansi_parser_state == ANSI_ESCAPE_SEEN) { + switch (utf8_codepoint) { + case '[': + ansi_parser_state = ANSI_CSI; + handle->tty.wr.ansi_csi_argc = 0; + continue; + + case '^': + case '_': + case 'P': + case ']': + /* Not supported, but we'll have to parse until we see a stop */ + /* code, e.g. ESC \ or BEL. */ + ansi_parser_state = ANSI_ST_CONTROL; + continue; + + case '\033': + /* Ignore double escape. */ + continue; + + case 'c': + /* Full console reset. */ + FLUSH_TEXT(); + uv_tty_reset(handle, error); + ansi_parser_state = ANSI_NORMAL; + continue; + + case '7': + /* Save the cursor position and text attributes. */ + FLUSH_TEXT(); + uv_tty_save_state(handle, 1, error); + ansi_parser_state = ANSI_NORMAL; + continue; + + case '8': + /* Restore the cursor position and text attributes */ + FLUSH_TEXT(); + uv_tty_restore_state(handle, 1, error); + ansi_parser_state = ANSI_NORMAL; + continue; + + default: + if (utf8_codepoint >= '@' && utf8_codepoint <= '_') { + /* Single-char control. */ + ansi_parser_state = ANSI_NORMAL; + continue; + } else { + /* Invalid - proceed as normal, */ + ansi_parser_state = ANSI_NORMAL; + } + } + + } else if (ansi_parser_state & ANSI_CSI) { + if (!(ansi_parser_state & ANSI_IGNORE)) { + if (utf8_codepoint >= '0' && utf8_codepoint <= '9') { + /* Parsing a numerical argument */ + + if (!(ansi_parser_state & ANSI_IN_ARG)) { + /* We were not currently parsing a number */ + + /* Check for too many arguments */ + if (handle->tty.wr.ansi_csi_argc >= ARRAY_SIZE(handle->tty.wr.ansi_csi_argv)) { + ansi_parser_state |= ANSI_IGNORE; + continue; + } + + ansi_parser_state |= ANSI_IN_ARG; + handle->tty.wr.ansi_csi_argc++; + handle->tty.wr.ansi_csi_argv[handle->tty.wr.ansi_csi_argc - 1] = + (unsigned short) utf8_codepoint - '0'; + continue; + } else { + /* We were already parsing a number. Parse next digit. */ + uint32_t value = 10 * + handle->tty.wr.ansi_csi_argv[handle->tty.wr.ansi_csi_argc - 1]; + + /* Check for overflow. */ + if (value > UINT16_MAX) { + ansi_parser_state |= ANSI_IGNORE; + continue; + } + + handle->tty.wr.ansi_csi_argv[handle->tty.wr.ansi_csi_argc - 1] = + (unsigned short) value + (utf8_codepoint - '0'); + continue; + } + + } else if (utf8_codepoint == ';') { + /* Denotes the end of an argument. */ + if (ansi_parser_state & ANSI_IN_ARG) { + ansi_parser_state &= ~ANSI_IN_ARG; + continue; + + } else { + /* If ANSI_IN_ARG is not set, add another argument and */ + /* default it to 0. */ + /* Check for too many arguments */ + if (handle->tty.wr.ansi_csi_argc >= ARRAY_SIZE(handle->tty.wr.ansi_csi_argv)) { + ansi_parser_state |= ANSI_IGNORE; + continue; + } + + handle->tty.wr.ansi_csi_argc++; + handle->tty.wr.ansi_csi_argv[handle->tty.wr.ansi_csi_argc - 1] = 0; + continue; + } + + } else if (utf8_codepoint == '?' && !(ansi_parser_state & ANSI_IN_ARG) && + handle->tty.wr.ansi_csi_argc == 0) { + /* Ignores '?' if it is the first character after CSI[ */ + /* This is an extension character from the VT100 codeset */ + /* that is supported and used by most ANSI terminals today. */ + continue; + + } else if (utf8_codepoint >= '@' && utf8_codepoint <= '~' && + (handle->tty.wr.ansi_csi_argc > 0 || utf8_codepoint != '[')) { + int x, y, d; + + /* Command byte */ + switch (utf8_codepoint) { + case 'A': + /* cursor up */ + FLUSH_TEXT(); + y = -(handle->tty.wr.ansi_csi_argc ? handle->tty.wr.ansi_csi_argv[0] : 1); + uv_tty_move_caret(handle, 0, 1, y, 1, error); + break; + + case 'B': + /* cursor down */ + FLUSH_TEXT(); + y = handle->tty.wr.ansi_csi_argc ? handle->tty.wr.ansi_csi_argv[0] : 1; + uv_tty_move_caret(handle, 0, 1, y, 1, error); + break; + + case 'C': + /* cursor forward */ + FLUSH_TEXT(); + x = handle->tty.wr.ansi_csi_argc ? handle->tty.wr.ansi_csi_argv[0] : 1; + uv_tty_move_caret(handle, x, 1, 0, 1, error); + break; + + case 'D': + /* cursor back */ + FLUSH_TEXT(); + x = -(handle->tty.wr.ansi_csi_argc ? handle->tty.wr.ansi_csi_argv[0] : 1); + uv_tty_move_caret(handle, x, 1, 0, 1, error); + break; + + case 'E': + /* cursor next line */ + FLUSH_TEXT(); + y = handle->tty.wr.ansi_csi_argc ? handle->tty.wr.ansi_csi_argv[0] : 1; + uv_tty_move_caret(handle, 0, 0, y, 1, error); + break; + + case 'F': + /* cursor previous line */ + FLUSH_TEXT(); + y = -(handle->tty.wr.ansi_csi_argc ? handle->tty.wr.ansi_csi_argv[0] : 1); + uv_tty_move_caret(handle, 0, 0, y, 1, error); + break; + + case 'G': + /* cursor horizontal move absolute */ + FLUSH_TEXT(); + x = (handle->tty.wr.ansi_csi_argc >= 1 && handle->tty.wr.ansi_csi_argv[0]) + ? handle->tty.wr.ansi_csi_argv[0] - 1 : 0; + uv_tty_move_caret(handle, x, 0, 0, 1, error); + break; + + case 'H': + case 'f': + /* cursor move absolute */ + FLUSH_TEXT(); + y = (handle->tty.wr.ansi_csi_argc >= 1 && handle->tty.wr.ansi_csi_argv[0]) + ? handle->tty.wr.ansi_csi_argv[0] - 1 : 0; + x = (handle->tty.wr.ansi_csi_argc >= 2 && handle->tty.wr.ansi_csi_argv[1]) + ? handle->tty.wr.ansi_csi_argv[1] - 1 : 0; + uv_tty_move_caret(handle, x, 0, y, 0, error); + break; + + case 'J': + /* Erase screen */ + FLUSH_TEXT(); + d = handle->tty.wr.ansi_csi_argc ? handle->tty.wr.ansi_csi_argv[0] : 0; + if (d >= 0 && d <= 2) { + uv_tty_clear(handle, d, 1, error); + } + break; + + case 'K': + /* Erase line */ + FLUSH_TEXT(); + d = handle->tty.wr.ansi_csi_argc ? handle->tty.wr.ansi_csi_argv[0] : 0; + if (d >= 0 && d <= 2) { + uv_tty_clear(handle, d, 0, error); + } + break; + + case 'm': + /* Set style */ + FLUSH_TEXT(); + uv_tty_set_style(handle, error); + break; + + case 's': + /* Save the cursor position. */ + FLUSH_TEXT(); + uv_tty_save_state(handle, 0, error); + break; + + case 'u': + /* Restore the cursor position */ + FLUSH_TEXT(); + uv_tty_restore_state(handle, 0, error); + break; + + case 'l': + /* Hide the cursor */ + if (handle->tty.wr.ansi_csi_argc == 1 && + handle->tty.wr.ansi_csi_argv[0] == 25) { + FLUSH_TEXT(); + uv_tty_set_cursor_visibility(handle, 0, error); + } + break; + + case 'h': + /* Show the cursor */ + if (handle->tty.wr.ansi_csi_argc == 1 && + handle->tty.wr.ansi_csi_argv[0] == 25) { + FLUSH_TEXT(); + uv_tty_set_cursor_visibility(handle, 1, error); + } + break; + } + + /* Sequence ended - go back to normal state. */ + ansi_parser_state = ANSI_NORMAL; + continue; + + } else { + /* We don't support commands that use private mode characters or */ + /* intermediaries. Ignore the rest of the sequence. */ + ansi_parser_state |= ANSI_IGNORE; + continue; + } + } else { + /* We're ignoring this command. Stop only on command character. */ + if (utf8_codepoint >= '@' && utf8_codepoint <= '~') { + ansi_parser_state = ANSI_NORMAL; + } + continue; + } + + } else if (ansi_parser_state & ANSI_ST_CONTROL) { + /* Unsupported control code */ + /* Ignore everything until we see BEL or ESC \ */ + if (ansi_parser_state & ANSI_IN_STRING) { + if (!(ansi_parser_state & ANSI_BACKSLASH_SEEN)) { + if (utf8_codepoint == '"') { + ansi_parser_state &= ~ANSI_IN_STRING; + } else if (utf8_codepoint == '\\') { + ansi_parser_state |= ANSI_BACKSLASH_SEEN; + } + } else { + ansi_parser_state &= ~ANSI_BACKSLASH_SEEN; + } + } else { + if (utf8_codepoint == '\007' || (utf8_codepoint == '\\' && + (ansi_parser_state & ANSI_ESCAPE_SEEN))) { + /* End of sequence */ + ansi_parser_state = ANSI_NORMAL; + } else if (utf8_codepoint == '\033') { + /* Escape character */ + ansi_parser_state |= ANSI_ESCAPE_SEEN; + } else if (utf8_codepoint == '"') { + /* String starting */ + ansi_parser_state |= ANSI_IN_STRING; + ansi_parser_state &= ~ANSI_ESCAPE_SEEN; + ansi_parser_state &= ~ANSI_BACKSLASH_SEEN; + } else { + ansi_parser_state &= ~ANSI_ESCAPE_SEEN; + } + } + continue; + } else { + /* Inconsistent state */ + abort(); + } + + /* We wouldn't mind emitting utf-16 surrogate pairs. Too bad, the */ + /* windows console doesn't really support UTF-16, so just emit the */ + /* replacement character. */ + if (utf8_codepoint > 0xffff) { + utf8_codepoint = UNICODE_REPLACEMENT_CHARACTER; + } + + if (utf8_codepoint == 0x0a || utf8_codepoint == 0x0d) { + /* EOL conversion - emit \r\n when we see \n. */ + + if (utf8_codepoint == 0x0a && previous_eol != 0x0d) { + /* \n was not preceded by \r; print \r\n. */ + ENSURE_BUFFER_SPACE(2); + utf16_buf[utf16_buf_used++] = L'\r'; + utf16_buf[utf16_buf_used++] = L'\n'; + } else if (utf8_codepoint == 0x0d && previous_eol == 0x0a) { + /* \n was followed by \r; do not print the \r, since */ + /* the source was either \r\n\r (so the second \r is */ + /* redundant) or was \n\r (so the \n was processed */ + /* by the last case and an \r automatically inserted). */ + } else { + /* \r without \n; print \r as-is. */ + ENSURE_BUFFER_SPACE(1); + utf16_buf[utf16_buf_used++] = (WCHAR) utf8_codepoint; + } + + previous_eol = (char) utf8_codepoint; + + } else if (utf8_codepoint <= 0xffff) { + /* Encode character into utf-16 buffer. */ + ENSURE_BUFFER_SPACE(1); + utf16_buf[utf16_buf_used++] = (WCHAR) utf8_codepoint; + previous_eol = 0; + } + } + } + + /* Flush remaining characters */ + FLUSH_TEXT(); + + /* Copy cached values back to struct. */ + handle->tty.wr.utf8_bytes_left = utf8_bytes_left; + handle->tty.wr.utf8_codepoint = utf8_codepoint; + handle->tty.wr.previous_eol = previous_eol; + handle->tty.wr.ansi_parser_state = ansi_parser_state; + + uv_sem_post(&uv_tty_output_lock); + + if (*error == STATUS_SUCCESS) { + return 0; + } else { + return -1; + } + +#undef FLUSH_TEXT +} + + +int uv_tty_write(uv_loop_t* loop, + uv_write_t* req, + uv_tty_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + uv_write_cb cb) { + DWORD error; + + uv_req_init(loop, (uv_req_t*) req); + req->type = UV_WRITE; + req->handle = (uv_stream_t*) handle; + req->cb = cb; + + handle->reqs_pending++; + handle->stream.conn.write_reqs_pending++; + REGISTER_HANDLE_REQ(loop, handle, req); + + req->u.io.queued_bytes = 0; + + if (!uv_tty_write_bufs(handle, bufs, nbufs, &error)) { + SET_REQ_SUCCESS(req); + } else { + SET_REQ_ERROR(req, error); + } + + uv_insert_pending_req(loop, (uv_req_t*) req); + + return 0; +} + + +int uv__tty_try_write(uv_tty_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs) { + DWORD error; + + if (handle->stream.conn.write_reqs_pending > 0) + return UV_EAGAIN; + + if (uv_tty_write_bufs(handle, bufs, nbufs, &error)) + return uv_translate_sys_error(error); + + return uv__count_bufs(bufs, nbufs); +} + + +void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle, + uv_write_t* req) { + int err; + + handle->write_queue_size -= req->u.io.queued_bytes; + UNREGISTER_HANDLE_REQ(loop, handle, req); + + if (req->cb) { + err = GET_REQ_ERROR(req); + req->cb(req, uv_translate_sys_error(err)); + } + + handle->stream.conn.write_reqs_pending--; + if (handle->stream.conn.shutdown_req != NULL && + handle->stream.conn.write_reqs_pending == 0) { + uv_want_endgame(loop, (uv_handle_t*)handle); + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +void uv_tty_close(uv_tty_t* handle) { + assert(handle->u.fd == -1 || handle->u.fd > 2); + if (handle->u.fd == -1) + CloseHandle(handle->handle); + else + close(handle->u.fd); + + if (handle->flags & UV_HANDLE_READING) + uv_tty_read_stop(handle); + + handle->u.fd = -1; + handle->handle = INVALID_HANDLE_VALUE; + handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE); + uv__handle_closing(handle); + + if (handle->reqs_pending == 0) { + uv_want_endgame(handle->loop, (uv_handle_t*) handle); + } +} + + +void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle) { + if (!(handle->flags & UV_HANDLE_TTY_READABLE) && + handle->stream.conn.shutdown_req != NULL && + handle->stream.conn.write_reqs_pending == 0) { + UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req); + + /* TTY shutdown is really just a no-op */ + if (handle->stream.conn.shutdown_req->cb) { + if (handle->flags & UV__HANDLE_CLOSING) { + handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req, UV_ECANCELED); + } else { + handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req, 0); + } + } + + handle->stream.conn.shutdown_req = NULL; + + DECREASE_PENDING_REQ_COUNT(handle); + return; + } + + if (handle->flags & UV__HANDLE_CLOSING && + handle->reqs_pending == 0) { + /* The wait handle used for raw reading should be unregistered when the */ + /* wait callback runs. */ + assert(!(handle->flags & UV_HANDLE_TTY_READABLE) || + handle->tty.rd.read_raw_wait == NULL); + + assert(!(handle->flags & UV_HANDLE_CLOSED)); + uv__handle_close(handle); + } +} + + +/* TODO: remove me */ +void uv_process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle, + uv_req_t* raw_req) { + abort(); +} + + +/* TODO: remove me */ +void uv_process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle, + uv_connect_t* req) { + abort(); +} + + +int uv_tty_reset_mode(void) { + /* Not necessary to do anything. */ + return 0; +} + +/* Determine whether or not this version of windows supports + * proper ANSI color codes. Should be supported as of windows + * 10 version 1511, build number 10.0.10586. + */ +static void uv__determine_vterm_state(HANDLE handle) { + DWORD dwMode = 0; + + if (!GetConsoleMode(handle, &dwMode)) { + uv__vterm_state = UV_UNSUPPORTED; + return; + } + + dwMode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING; + if (!SetConsoleMode(handle, dwMode)) { + uv__vterm_state = UV_UNSUPPORTED; + return; + } + + uv__vterm_state = UV_SUPPORTED; +} diff --git a/deps/libuv/src/win/udp.c b/deps/libuv/src/win/udp.c new file mode 100644 index 00000000..9bf1453e --- /dev/null +++ b/deps/libuv/src/win/udp.c @@ -0,0 +1,928 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include + +#include "uv.h" +#include "internal.h" +#include "handle-inl.h" +#include "stream-inl.h" +#include "req-inl.h" + + +/* + * Threshold of active udp streams for which to preallocate udp read buffers. + */ +const unsigned int uv_active_udp_streams_threshold = 0; + +/* A zero-size buffer for use by uv_udp_read */ +static char uv_zero_[] = ""; + +int uv_udp_getsockname(const uv_udp_t* handle, + struct sockaddr* name, + int* namelen) { + int result; + + if (handle->socket == INVALID_SOCKET) { + return UV_EINVAL; + } + + result = getsockname(handle->socket, name, namelen); + if (result != 0) { + return uv_translate_sys_error(WSAGetLastError()); + } + + return 0; +} + + +static int uv_udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket, + int family) { + DWORD yes = 1; + WSAPROTOCOL_INFOW info; + int opt_len; + + if (handle->socket != INVALID_SOCKET) + return UV_EBUSY; + + /* Set the socket to nonblocking mode */ + if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) { + return WSAGetLastError(); + } + + /* Make the socket non-inheritable */ + if (!SetHandleInformation((HANDLE)socket, HANDLE_FLAG_INHERIT, 0)) { + return GetLastError(); + } + + /* Associate it with the I/O completion port. */ + /* Use uv_handle_t pointer as completion key. */ + if (CreateIoCompletionPort((HANDLE)socket, + loop->iocp, + (ULONG_PTR)socket, + 0) == NULL) { + return GetLastError(); + } + + if (pSetFileCompletionNotificationModes) { + /* All known Windows that support SetFileCompletionNotificationModes */ + /* have a bug that makes it impossible to use this function in */ + /* conjunction with datagram sockets. We can work around that but only */ + /* if the user is using the default UDP driver (AFD) and has no other */ + /* LSPs stacked on top. Here we check whether that is the case. */ + opt_len = (int) sizeof info; + if (getsockopt(socket, + SOL_SOCKET, + SO_PROTOCOL_INFOW, + (char*) &info, + &opt_len) == SOCKET_ERROR) { + return GetLastError(); + } + + if (info.ProtocolChain.ChainLen == 1) { + if (pSetFileCompletionNotificationModes((HANDLE)socket, + FILE_SKIP_SET_EVENT_ON_HANDLE | + FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) { + handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP; + handle->func_wsarecv = uv_wsarecv_workaround; + handle->func_wsarecvfrom = uv_wsarecvfrom_workaround; + } else if (GetLastError() != ERROR_INVALID_FUNCTION) { + return GetLastError(); + } + } + } + + handle->socket = socket; + + if (family == AF_INET6) { + handle->flags |= UV_HANDLE_IPV6; + } else { + assert(!(handle->flags & UV_HANDLE_IPV6)); + } + + return 0; +} + + +int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned int flags) { + int domain; + + /* Use the lower 8 bits for the domain */ + domain = flags & 0xFF; + if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC) + return UV_EINVAL; + + if (flags & ~0xFF) + return UV_EINVAL; + + uv__handle_init(loop, (uv_handle_t*) handle, UV_UDP); + handle->socket = INVALID_SOCKET; + handle->reqs_pending = 0; + handle->activecnt = 0; + handle->func_wsarecv = WSARecv; + handle->func_wsarecvfrom = WSARecvFrom; + handle->send_queue_size = 0; + handle->send_queue_count = 0; + uv_req_init(loop, (uv_req_t*) &(handle->recv_req)); + handle->recv_req.type = UV_UDP_RECV; + handle->recv_req.data = handle; + + /* If anything fails beyond this point we need to remove the handle from + * the handle queue, since it was added by uv__handle_init. + */ + + if (domain != AF_UNSPEC) { + SOCKET sock; + DWORD err; + + sock = socket(domain, SOCK_DGRAM, 0); + if (sock == INVALID_SOCKET) { + err = WSAGetLastError(); + QUEUE_REMOVE(&handle->handle_queue); + return uv_translate_sys_error(err); + } + + err = uv_udp_set_socket(handle->loop, handle, sock, domain); + if (err) { + closesocket(sock); + QUEUE_REMOVE(&handle->handle_queue); + return uv_translate_sys_error(err); + } + } + + return 0; +} + + +int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) { + return uv_udp_init_ex(loop, handle, AF_UNSPEC); +} + + +void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) { + uv_udp_recv_stop(handle); + closesocket(handle->socket); + handle->socket = INVALID_SOCKET; + + uv__handle_closing(handle); + + if (handle->reqs_pending == 0) { + uv_want_endgame(loop, (uv_handle_t*) handle); + } +} + + +void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle) { + if (handle->flags & UV__HANDLE_CLOSING && + handle->reqs_pending == 0) { + assert(!(handle->flags & UV_HANDLE_CLOSED)); + uv__handle_close(handle); + } +} + + +static int uv_udp_maybe_bind(uv_udp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + unsigned int flags) { + int r; + int err; + DWORD no = 0; + + if (handle->flags & UV_HANDLE_BOUND) + return 0; + + if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6) { + /* UV_UDP_IPV6ONLY is supported only for IPV6 sockets */ + return ERROR_INVALID_PARAMETER; + } + + if (handle->socket == INVALID_SOCKET) { + SOCKET sock = socket(addr->sa_family, SOCK_DGRAM, 0); + if (sock == INVALID_SOCKET) { + return WSAGetLastError(); + } + + err = uv_udp_set_socket(handle->loop, handle, sock, addr->sa_family); + if (err) { + closesocket(sock); + return err; + } + } + + if (flags & UV_UDP_REUSEADDR) { + DWORD yes = 1; + /* Set SO_REUSEADDR on the socket. */ + if (setsockopt(handle->socket, + SOL_SOCKET, + SO_REUSEADDR, + (char*) &yes, + sizeof yes) == SOCKET_ERROR) { + err = WSAGetLastError(); + return err; + } + } + + if (addr->sa_family == AF_INET6) + handle->flags |= UV_HANDLE_IPV6; + + if (addr->sa_family == AF_INET6 && !(flags & UV_UDP_IPV6ONLY)) { + /* On windows IPV6ONLY is on by default. */ + /* If the user doesn't specify it libuv turns it off. */ + + /* TODO: how to handle errors? This may fail if there is no ipv4 stack */ + /* available, or when run on XP/2003 which have no support for dualstack */ + /* sockets. For now we're silently ignoring the error. */ + setsockopt(handle->socket, + IPPROTO_IPV6, + IPV6_V6ONLY, + (char*) &no, + sizeof no); + } + + r = bind(handle->socket, addr, addrlen); + if (r == SOCKET_ERROR) { + return WSAGetLastError(); + } + + handle->flags |= UV_HANDLE_BOUND; + + return 0; +} + + +static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) { + uv_req_t* req; + uv_buf_t buf; + DWORD bytes, flags; + int result; + + assert(handle->flags & UV_HANDLE_READING); + assert(!(handle->flags & UV_HANDLE_READ_PENDING)); + + req = &handle->recv_req; + memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); + + /* + * Preallocate a read buffer if the number of active streams is below + * the threshold. + */ + if (loop->active_udp_streams < uv_active_udp_streams_threshold) { + handle->flags &= ~UV_HANDLE_ZERO_READ; + + handle->recv_buffer = uv_buf_init(NULL, 0); + handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->recv_buffer); + if (handle->recv_buffer.base == NULL || handle->recv_buffer.len == 0) { + handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0); + return; + } + assert(handle->recv_buffer.base != NULL); + + buf = handle->recv_buffer; + memset(&handle->recv_from, 0, sizeof handle->recv_from); + handle->recv_from_len = sizeof handle->recv_from; + flags = 0; + + result = handle->func_wsarecvfrom(handle->socket, + (WSABUF*) &buf, + 1, + &bytes, + &flags, + (struct sockaddr*) &handle->recv_from, + &handle->recv_from_len, + &req->u.io.overlapped, + NULL); + + if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { + /* Process the req without IOCP. */ + handle->flags |= UV_HANDLE_READ_PENDING; + req->u.io.overlapped.InternalHigh = bytes; + handle->reqs_pending++; + uv_insert_pending_req(loop, req); + } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { + /* The req will be processed with IOCP. */ + handle->flags |= UV_HANDLE_READ_PENDING; + handle->reqs_pending++; + } else { + /* Make this req pending reporting an error. */ + SET_REQ_ERROR(req, WSAGetLastError()); + uv_insert_pending_req(loop, req); + handle->reqs_pending++; + } + + } else { + handle->flags |= UV_HANDLE_ZERO_READ; + + buf.base = (char*) uv_zero_; + buf.len = 0; + flags = MSG_PEEK; + + result = handle->func_wsarecv(handle->socket, + (WSABUF*) &buf, + 1, + &bytes, + &flags, + &req->u.io.overlapped, + NULL); + + if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { + /* Process the req without IOCP. */ + handle->flags |= UV_HANDLE_READ_PENDING; + req->u.io.overlapped.InternalHigh = bytes; + handle->reqs_pending++; + uv_insert_pending_req(loop, req); + } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { + /* The req will be processed with IOCP. */ + handle->flags |= UV_HANDLE_READ_PENDING; + handle->reqs_pending++; + } else { + /* Make this req pending reporting an error. */ + SET_REQ_ERROR(req, WSAGetLastError()); + uv_insert_pending_req(loop, req); + handle->reqs_pending++; + } + } +} + + +int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, + uv_udp_recv_cb recv_cb) { + uv_loop_t* loop = handle->loop; + int err; + + if (handle->flags & UV_HANDLE_READING) { + return WSAEALREADY; + } + + err = uv_udp_maybe_bind(handle, + (const struct sockaddr*) &uv_addr_ip4_any_, + sizeof(uv_addr_ip4_any_), + 0); + if (err) + return err; + + handle->flags |= UV_HANDLE_READING; + INCREASE_ACTIVE_COUNT(loop, handle); + loop->active_udp_streams++; + + handle->recv_cb = recv_cb; + handle->alloc_cb = alloc_cb; + + /* If reading was stopped and then started again, there could still be a */ + /* recv request pending. */ + if (!(handle->flags & UV_HANDLE_READ_PENDING)) + uv_udp_queue_recv(loop, handle); + + return 0; +} + + +int uv__udp_recv_stop(uv_udp_t* handle) { + if (handle->flags & UV_HANDLE_READING) { + handle->flags &= ~UV_HANDLE_READING; + handle->loop->active_udp_streams--; + DECREASE_ACTIVE_COUNT(loop, handle); + } + + return 0; +} + + +static int uv__send(uv_udp_send_t* req, + uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen, + uv_udp_send_cb cb) { + uv_loop_t* loop = handle->loop; + DWORD result, bytes; + + uv_req_init(loop, (uv_req_t*) req); + req->type = UV_UDP_SEND; + req->handle = handle; + req->cb = cb; + memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); + + result = WSASendTo(handle->socket, + (WSABUF*)bufs, + nbufs, + &bytes, + 0, + addr, + addrlen, + &req->u.io.overlapped, + NULL); + + if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { + /* Request completed immediately. */ + req->u.io.queued_bytes = 0; + handle->reqs_pending++; + handle->send_queue_size += req->u.io.queued_bytes; + handle->send_queue_count++; + REGISTER_HANDLE_REQ(loop, handle, req); + uv_insert_pending_req(loop, (uv_req_t*)req); + } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { + /* Request queued by the kernel. */ + req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs); + handle->reqs_pending++; + handle->send_queue_size += req->u.io.queued_bytes; + handle->send_queue_count++; + REGISTER_HANDLE_REQ(loop, handle, req); + } else { + /* Send failed due to an error. */ + return WSAGetLastError(); + } + + return 0; +} + + +void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, + uv_req_t* req) { + uv_buf_t buf; + int partial; + + assert(handle->type == UV_UDP); + + handle->flags &= ~UV_HANDLE_READ_PENDING; + + if (!REQ_SUCCESS(req)) { + DWORD err = GET_REQ_SOCK_ERROR(req); + if (err == WSAEMSGSIZE) { + /* Not a real error, it just indicates that the received packet */ + /* was bigger than the receive buffer. */ + } else if (err == WSAECONNRESET || err == WSAENETRESET) { + /* A previous sendto operation failed; ignore this error. If */ + /* zero-reading we need to call WSARecv/WSARecvFrom _without_ the */ + /* MSG_PEEK flag to clear out the error queue. For nonzero reads, */ + /* immediately queue a new receive. */ + if (!(handle->flags & UV_HANDLE_ZERO_READ)) { + goto done; + } + } else { + /* A real error occurred. Report the error to the user only if we're */ + /* currently reading. */ + if (handle->flags & UV_HANDLE_READING) { + uv_udp_recv_stop(handle); + buf = (handle->flags & UV_HANDLE_ZERO_READ) ? + uv_buf_init(NULL, 0) : handle->recv_buffer; + handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0); + } + goto done; + } + } + + if (!(handle->flags & UV_HANDLE_ZERO_READ)) { + /* Successful read */ + partial = !REQ_SUCCESS(req); + handle->recv_cb(handle, + req->u.io.overlapped.InternalHigh, + &handle->recv_buffer, + (const struct sockaddr*) &handle->recv_from, + partial ? UV_UDP_PARTIAL : 0); + } else if (handle->flags & UV_HANDLE_READING) { + DWORD bytes, err, flags; + struct sockaddr_storage from; + int from_len; + + /* Do a nonblocking receive */ + /* TODO: try to read multiple datagrams at once. FIONREAD maybe? */ + buf = uv_buf_init(NULL, 0); + handle->alloc_cb((uv_handle_t*) handle, 65536, &buf); + if (buf.base == NULL || buf.len == 0) { + handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0); + goto done; + } + assert(buf.base != NULL); + + memset(&from, 0, sizeof from); + from_len = sizeof from; + + flags = 0; + + if (WSARecvFrom(handle->socket, + (WSABUF*)&buf, + 1, + &bytes, + &flags, + (struct sockaddr*) &from, + &from_len, + NULL, + NULL) != SOCKET_ERROR) { + + /* Message received */ + handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0); + } else { + err = WSAGetLastError(); + if (err == WSAEMSGSIZE) { + /* Message truncated */ + handle->recv_cb(handle, + bytes, + &buf, + (const struct sockaddr*) &from, + UV_UDP_PARTIAL); + } else if (err == WSAEWOULDBLOCK) { + /* Kernel buffer empty */ + handle->recv_cb(handle, 0, &buf, NULL, 0); + } else if (err == WSAECONNRESET || err == WSAENETRESET) { + /* WSAECONNRESET/WSANETRESET is ignored because this just indicates + * that a previous sendto operation failed. + */ + handle->recv_cb(handle, 0, &buf, NULL, 0); + } else { + /* Any other error that we want to report back to the user. */ + uv_udp_recv_stop(handle); + handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0); + } + } + } + +done: + /* Post another read if still reading and not closing. */ + if ((handle->flags & UV_HANDLE_READING) && + !(handle->flags & UV_HANDLE_READ_PENDING)) { + uv_udp_queue_recv(loop, handle); + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle, + uv_udp_send_t* req) { + int err; + + assert(handle->type == UV_UDP); + + assert(handle->send_queue_size >= req->u.io.queued_bytes); + assert(handle->send_queue_count >= 1); + handle->send_queue_size -= req->u.io.queued_bytes; + handle->send_queue_count--; + + UNREGISTER_HANDLE_REQ(loop, handle, req); + + if (req->cb) { + err = 0; + if (!REQ_SUCCESS(req)) { + err = GET_REQ_SOCK_ERROR(req); + } + req->cb(req, uv_translate_sys_error(err)); + } + + DECREASE_PENDING_REQ_COUNT(handle); +} + + +static int uv__udp_set_membership4(uv_udp_t* handle, + const struct sockaddr_in* multicast_addr, + const char* interface_addr, + uv_membership membership) { + int err; + int optname; + struct ip_mreq mreq; + + if (handle->flags & UV_HANDLE_IPV6) + return UV_EINVAL; + + /* If the socket is unbound, bind to inaddr_any. */ + err = uv_udp_maybe_bind(handle, + (const struct sockaddr*) &uv_addr_ip4_any_, + sizeof(uv_addr_ip4_any_), + UV_UDP_REUSEADDR); + if (err) + return uv_translate_sys_error(err); + + memset(&mreq, 0, sizeof mreq); + + if (interface_addr) { + err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr); + if (err) + return err; + } else { + mreq.imr_interface.s_addr = htonl(INADDR_ANY); + } + + mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr; + + switch (membership) { + case UV_JOIN_GROUP: + optname = IP_ADD_MEMBERSHIP; + break; + case UV_LEAVE_GROUP: + optname = IP_DROP_MEMBERSHIP; + break; + default: + return UV_EINVAL; + } + + if (setsockopt(handle->socket, + IPPROTO_IP, + optname, + (char*) &mreq, + sizeof mreq) == SOCKET_ERROR) { + return uv_translate_sys_error(WSAGetLastError()); + } + + return 0; +} + + +int uv__udp_set_membership6(uv_udp_t* handle, + const struct sockaddr_in6* multicast_addr, + const char* interface_addr, + uv_membership membership) { + int optname; + int err; + struct ipv6_mreq mreq; + struct sockaddr_in6 addr6; + + if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6)) + return UV_EINVAL; + + err = uv_udp_maybe_bind(handle, + (const struct sockaddr*) &uv_addr_ip6_any_, + sizeof(uv_addr_ip6_any_), + UV_UDP_REUSEADDR); + + if (err) + return uv_translate_sys_error(err); + + memset(&mreq, 0, sizeof(mreq)); + + if (interface_addr) { + if (uv_ip6_addr(interface_addr, 0, &addr6)) + return UV_EINVAL; + mreq.ipv6mr_interface = addr6.sin6_scope_id; + } else { + mreq.ipv6mr_interface = 0; + } + + mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr; + + switch (membership) { + case UV_JOIN_GROUP: + optname = IPV6_ADD_MEMBERSHIP; + break; + case UV_LEAVE_GROUP: + optname = IPV6_DROP_MEMBERSHIP; + break; + default: + return UV_EINVAL; + } + + if (setsockopt(handle->socket, + IPPROTO_IPV6, + optname, + (char*) &mreq, + sizeof mreq) == SOCKET_ERROR) { + return uv_translate_sys_error(WSAGetLastError()); + } + + return 0; +} + + +int uv_udp_set_membership(uv_udp_t* handle, + const char* multicast_addr, + const char* interface_addr, + uv_membership membership) { + struct sockaddr_in addr4; + struct sockaddr_in6 addr6; + + if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) + return uv__udp_set_membership4(handle, &addr4, interface_addr, membership); + else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) + return uv__udp_set_membership6(handle, &addr6, interface_addr, membership); + else + return UV_EINVAL; +} + + +int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) { + struct sockaddr_storage addr_st; + struct sockaddr_in* addr4; + struct sockaddr_in6* addr6; + + addr4 = (struct sockaddr_in*) &addr_st; + addr6 = (struct sockaddr_in6*) &addr_st; + + if (!interface_addr) { + memset(&addr_st, 0, sizeof addr_st); + if (handle->flags & UV_HANDLE_IPV6) { + addr_st.ss_family = AF_INET6; + addr6->sin6_scope_id = 0; + } else { + addr_st.ss_family = AF_INET; + addr4->sin_addr.s_addr = htonl(INADDR_ANY); + } + } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) { + /* nothing, address was parsed */ + } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) { + /* nothing, address was parsed */ + } else { + return UV_EINVAL; + } + + if (!(handle->flags & UV_HANDLE_BOUND)) + return UV_EBADF; + + if (addr_st.ss_family == AF_INET) { + if (setsockopt(handle->socket, + IPPROTO_IP, + IP_MULTICAST_IF, + (char*) &addr4->sin_addr, + sizeof(addr4->sin_addr)) == SOCKET_ERROR) { + return uv_translate_sys_error(WSAGetLastError()); + } + } else if (addr_st.ss_family == AF_INET6) { + if (setsockopt(handle->socket, + IPPROTO_IPV6, + IPV6_MULTICAST_IF, + (char*) &addr6->sin6_scope_id, + sizeof(addr6->sin6_scope_id)) == SOCKET_ERROR) { + return uv_translate_sys_error(WSAGetLastError()); + } + } else { + assert(0 && "unexpected address family"); + abort(); + } + + return 0; +} + + +int uv_udp_set_broadcast(uv_udp_t* handle, int value) { + BOOL optval = (BOOL) value; + + if (!(handle->flags & UV_HANDLE_BOUND)) + return UV_EBADF; + + if (setsockopt(handle->socket, + SOL_SOCKET, + SO_BROADCAST, + (char*) &optval, + sizeof optval)) { + return uv_translate_sys_error(WSAGetLastError()); + } + + return 0; +} + + +int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) { + WSAPROTOCOL_INFOW protocol_info; + int opt_len; + int err; + + /* Detect the address family of the socket. */ + opt_len = (int) sizeof protocol_info; + if (getsockopt(sock, + SOL_SOCKET, + SO_PROTOCOL_INFOW, + (char*) &protocol_info, + &opt_len) == SOCKET_ERROR) { + return uv_translate_sys_error(GetLastError()); + } + + err = uv_udp_set_socket(handle->loop, + handle, + sock, + protocol_info.iAddressFamily); + return uv_translate_sys_error(err); +} + + +#define SOCKOPT_SETTER(name, option4, option6, validate) \ + int uv_udp_set_##name(uv_udp_t* handle, int value) { \ + DWORD optval = (DWORD) value; \ + \ + if (!(validate(value))) { \ + return UV_EINVAL; \ + } \ + \ + if (!(handle->flags & UV_HANDLE_BOUND)) \ + return UV_EBADF; \ + \ + if (!(handle->flags & UV_HANDLE_IPV6)) { \ + /* Set IPv4 socket option */ \ + if (setsockopt(handle->socket, \ + IPPROTO_IP, \ + option4, \ + (char*) &optval, \ + sizeof optval)) { \ + return uv_translate_sys_error(WSAGetLastError()); \ + } \ + } else { \ + /* Set IPv6 socket option */ \ + if (setsockopt(handle->socket, \ + IPPROTO_IPV6, \ + option6, \ + (char*) &optval, \ + sizeof optval)) { \ + return uv_translate_sys_error(WSAGetLastError()); \ + } \ + } \ + return 0; \ + } + +#define VALIDATE_TTL(value) ((value) >= 1 && (value) <= 255) +#define VALIDATE_MULTICAST_TTL(value) ((value) >= -1 && (value) <= 255) +#define VALIDATE_MULTICAST_LOOP(value) (1) + +SOCKOPT_SETTER(ttl, + IP_TTL, + IPV6_HOPLIMIT, + VALIDATE_TTL) +SOCKOPT_SETTER(multicast_ttl, + IP_MULTICAST_TTL, + IPV6_MULTICAST_HOPS, + VALIDATE_MULTICAST_TTL) +SOCKOPT_SETTER(multicast_loop, + IP_MULTICAST_LOOP, + IPV6_MULTICAST_LOOP, + VALIDATE_MULTICAST_LOOP) + +#undef SOCKOPT_SETTER +#undef VALIDATE_TTL +#undef VALIDATE_MULTICAST_TTL +#undef VALIDATE_MULTICAST_LOOP + + +/* This function is an egress point, i.e. it returns libuv errors rather than + * system errors. + */ +int uv__udp_bind(uv_udp_t* handle, + const struct sockaddr* addr, + unsigned int addrlen, + unsigned int flags) { + int err; + + err = uv_udp_maybe_bind(handle, addr, addrlen, flags); + if (err) + return uv_translate_sys_error(err); + + return 0; +} + + +/* This function is an egress point, i.e. it returns libuv errors rather than + * system errors. + */ +int uv__udp_send(uv_udp_send_t* req, + uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen, + uv_udp_send_cb send_cb) { + const struct sockaddr* bind_addr; + int err; + + if (!(handle->flags & UV_HANDLE_BOUND)) { + if (addrlen == sizeof(uv_addr_ip4_any_)) { + bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_; + } else if (addrlen == sizeof(uv_addr_ip6_any_)) { + bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_; + } else { + abort(); + } + err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0); + if (err) + return uv_translate_sys_error(err); + } + + err = uv__send(req, handle, bufs, nbufs, addr, addrlen, send_cb); + if (err) + return uv_translate_sys_error(err); + + return 0; +} + + +int uv__udp_try_send(uv_udp_t* handle, + const uv_buf_t bufs[], + unsigned int nbufs, + const struct sockaddr* addr, + unsigned int addrlen) { + return UV_ENOSYS; +} diff --git a/deps/libuv/src/win/util.c b/deps/libuv/src/win/util.c new file mode 100644 index 00000000..050058af --- /dev/null +++ b/deps/libuv/src/win/util.c @@ -0,0 +1,1389 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "uv.h" +#include "internal.h" + +#include +#include +#include +#include +#include +#include +#include + + +/* + * Max title length; the only thing MSDN tells us about the maximum length + * of the console title is that it is smaller than 64K. However in practice + * it is much smaller, and there is no way to figure out what the exact length + * of the title is or can be, at least not on XP. To make it even more + * annoying, GetConsoleTitle fails when the buffer to be read into is bigger + * than the actual maximum length. So we make a conservative guess here; + * just don't put the novel you're writing in the title, unless the plot + * survives truncation. + */ +#define MAX_TITLE_LENGTH 8192 + +/* The number of nanoseconds in one second. */ +#define UV__NANOSEC 1000000000 + +/* Max user name length, from iphlpapi.h */ +#ifndef UNLEN +# define UNLEN 256 +#endif + +/* Cached copy of the process title, plus a mutex guarding it. */ +static char *process_title; +static CRITICAL_SECTION process_title_lock; + +/* Cached copy of the process id, written once. */ +static DWORD current_pid = 0; + + +/* Interval (in seconds) of the high-resolution clock. */ +static double hrtime_interval_ = 0; + + +/* + * One-time initialization code for functionality defined in util.c. + */ +void uv__util_init() { + LARGE_INTEGER perf_frequency; + + /* Initialize process title access mutex. */ + InitializeCriticalSection(&process_title_lock); + + /* Retrieve high-resolution timer frequency + * and precompute its reciprocal. + */ + if (QueryPerformanceFrequency(&perf_frequency)) { + hrtime_interval_ = 1.0 / perf_frequency.QuadPart; + } else { + hrtime_interval_= 0; + } +} + + +int uv_exepath(char* buffer, size_t* size_ptr) { + int utf8_len, utf16_buffer_len, utf16_len; + WCHAR* utf16_buffer; + int err; + + if (buffer == NULL || size_ptr == NULL || *size_ptr == 0) { + return UV_EINVAL; + } + + if (*size_ptr > 32768) { + /* Windows paths can never be longer than this. */ + utf16_buffer_len = 32768; + } else { + utf16_buffer_len = (int) *size_ptr; + } + + utf16_buffer = (WCHAR*) uv__malloc(sizeof(WCHAR) * utf16_buffer_len); + if (!utf16_buffer) { + return UV_ENOMEM; + } + + /* Get the path as UTF-16. */ + utf16_len = GetModuleFileNameW(NULL, utf16_buffer, utf16_buffer_len); + if (utf16_len <= 0) { + err = GetLastError(); + goto error; + } + + /* utf16_len contains the length, *not* including the terminating null. */ + utf16_buffer[utf16_len] = L'\0'; + + /* Convert to UTF-8 */ + utf8_len = WideCharToMultiByte(CP_UTF8, + 0, + utf16_buffer, + -1, + buffer, + (int) *size_ptr, + NULL, + NULL); + if (utf8_len == 0) { + err = GetLastError(); + goto error; + } + + uv__free(utf16_buffer); + + /* utf8_len *does* include the terminating null at this point, but the */ + /* returned size shouldn't. */ + *size_ptr = utf8_len - 1; + return 0; + + error: + uv__free(utf16_buffer); + return uv_translate_sys_error(err); +} + + +int uv_cwd(char* buffer, size_t* size) { + DWORD utf16_len; + WCHAR utf16_buffer[MAX_PATH]; + int r; + + if (buffer == NULL || size == NULL) { + return UV_EINVAL; + } + + utf16_len = GetCurrentDirectoryW(MAX_PATH, utf16_buffer); + if (utf16_len == 0) { + return uv_translate_sys_error(GetLastError()); + } else if (utf16_len > MAX_PATH) { + /* This should be impossible; however the CRT has a code path to deal */ + /* with this scenario, so I added a check anyway. */ + return UV_EIO; + } + + /* utf16_len contains the length, *not* including the terminating null. */ + utf16_buffer[utf16_len] = L'\0'; + + /* The returned directory should not have a trailing slash, unless it */ + /* points at a drive root, like c:\. Remove it if needed.*/ + if (utf16_buffer[utf16_len - 1] == L'\\' && + !(utf16_len == 3 && utf16_buffer[1] == L':')) { + utf16_len--; + utf16_buffer[utf16_len] = L'\0'; + } + + /* Check how much space we need */ + r = WideCharToMultiByte(CP_UTF8, + 0, + utf16_buffer, + -1, + NULL, + 0, + NULL, + NULL); + if (r == 0) { + return uv_translate_sys_error(GetLastError()); + } else if (r > (int) *size) { + *size = r; + return UV_ENOBUFS; + } + + /* Convert to UTF-8 */ + r = WideCharToMultiByte(CP_UTF8, + 0, + utf16_buffer, + -1, + buffer, + *size > INT_MAX ? INT_MAX : (int) *size, + NULL, + NULL); + if (r == 0) { + return uv_translate_sys_error(GetLastError()); + } + + *size = r - 1; + return 0; +} + + +int uv_chdir(const char* dir) { + WCHAR utf16_buffer[MAX_PATH]; + size_t utf16_len; + WCHAR drive_letter, env_var[4]; + + if (dir == NULL) { + return UV_EINVAL; + } + + if (MultiByteToWideChar(CP_UTF8, + 0, + dir, + -1, + utf16_buffer, + MAX_PATH) == 0) { + DWORD error = GetLastError(); + /* The maximum length of the current working directory is 260 chars, */ + /* including terminating null. If it doesn't fit, the path name must be */ + /* too long. */ + if (error == ERROR_INSUFFICIENT_BUFFER) { + return UV_ENAMETOOLONG; + } else { + return uv_translate_sys_error(error); + } + } + + if (!SetCurrentDirectoryW(utf16_buffer)) { + return uv_translate_sys_error(GetLastError()); + } + + /* Windows stores the drive-local path in an "hidden" environment variable, */ + /* which has the form "=C:=C:\Windows". SetCurrentDirectory does not */ + /* update this, so we'll have to do it. */ + utf16_len = GetCurrentDirectoryW(MAX_PATH, utf16_buffer); + if (utf16_len == 0) { + return uv_translate_sys_error(GetLastError()); + } else if (utf16_len > MAX_PATH) { + return UV_EIO; + } + + /* The returned directory should not have a trailing slash, unless it */ + /* points at a drive root, like c:\. Remove it if needed. */ + if (utf16_buffer[utf16_len - 1] == L'\\' && + !(utf16_len == 3 && utf16_buffer[1] == L':')) { + utf16_len--; + utf16_buffer[utf16_len] = L'\0'; + } + + if (utf16_len < 2 || utf16_buffer[1] != L':') { + /* Doesn't look like a drive letter could be there - probably an UNC */ + /* path. TODO: Need to handle win32 namespaces like \\?\C:\ ? */ + drive_letter = 0; + } else if (utf16_buffer[0] >= L'A' && utf16_buffer[0] <= L'Z') { + drive_letter = utf16_buffer[0]; + } else if (utf16_buffer[0] >= L'a' && utf16_buffer[0] <= L'z') { + /* Convert to uppercase. */ + drive_letter = utf16_buffer[0] - L'a' + L'A'; + } else { + /* Not valid. */ + drive_letter = 0; + } + + if (drive_letter != 0) { + /* Construct the environment variable name and set it. */ + env_var[0] = L'='; + env_var[1] = drive_letter; + env_var[2] = L':'; + env_var[3] = L'\0'; + + if (!SetEnvironmentVariableW(env_var, utf16_buffer)) { + return uv_translate_sys_error(GetLastError()); + } + } + + return 0; +} + + +void uv_loadavg(double avg[3]) { + /* Can't be implemented */ + avg[0] = avg[1] = avg[2] = 0; +} + + +uint64_t uv_get_free_memory(void) { + MEMORYSTATUSEX memory_status; + memory_status.dwLength = sizeof(memory_status); + + if (!GlobalMemoryStatusEx(&memory_status)) { + return -1; + } + + return (uint64_t)memory_status.ullAvailPhys; +} + + +uint64_t uv_get_total_memory(void) { + MEMORYSTATUSEX memory_status; + memory_status.dwLength = sizeof(memory_status); + + if (!GlobalMemoryStatusEx(&memory_status)) { + return -1; + } + + return (uint64_t)memory_status.ullTotalPhys; +} + + +int uv_parent_pid() { + int parent_pid = -1; + HANDLE handle; + PROCESSENTRY32 pe; + DWORD current_pid = GetCurrentProcessId(); + + pe.dwSize = sizeof(PROCESSENTRY32); + handle = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); + + if (Process32First(handle, &pe)) { + do { + if (pe.th32ProcessID == current_pid) { + parent_pid = pe.th32ParentProcessID; + break; + } + } while( Process32Next(handle, &pe)); + } + + CloseHandle(handle); + return parent_pid; +} + + +int uv_current_pid() { + if (current_pid == 0) { + current_pid = GetCurrentProcessId(); + } + return current_pid; +} + + +char** uv_setup_args(int argc, char** argv) { + return argv; +} + + +int uv_set_process_title(const char* title) { + int err; + int length; + WCHAR* title_w = NULL; + + uv__once_init(); + + /* Find out how big the buffer for the wide-char title must be */ + length = MultiByteToWideChar(CP_UTF8, 0, title, -1, NULL, 0); + if (!length) { + err = GetLastError(); + goto done; + } + + /* Convert to wide-char string */ + title_w = (WCHAR*)uv__malloc(sizeof(WCHAR) * length); + if (!title_w) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + length = MultiByteToWideChar(CP_UTF8, 0, title, -1, title_w, length); + if (!length) { + err = GetLastError(); + goto done; + } + + /* If the title must be truncated insert a \0 terminator there */ + if (length > MAX_TITLE_LENGTH) { + title_w[MAX_TITLE_LENGTH - 1] = L'\0'; + } + + if (!SetConsoleTitleW(title_w)) { + err = GetLastError(); + goto done; + } + + EnterCriticalSection(&process_title_lock); + uv__free(process_title); + process_title = uv__strdup(title); + LeaveCriticalSection(&process_title_lock); + + err = 0; + +done: + uv__free(title_w); + return uv_translate_sys_error(err); +} + + +static int uv__get_process_title() { + WCHAR title_w[MAX_TITLE_LENGTH]; + + if (!GetConsoleTitleW(title_w, sizeof(title_w) / sizeof(WCHAR))) { + return -1; + } + + if (uv__convert_utf16_to_utf8(title_w, -1, &process_title) != 0) + return -1; + + return 0; +} + + +int uv_get_process_title(char* buffer, size_t size) { + size_t len; + + if (buffer == NULL || size == 0) + return UV_EINVAL; + + uv__once_init(); + + EnterCriticalSection(&process_title_lock); + /* + * If the process_title was never read before nor explicitly set, + * we must query it with getConsoleTitleW + */ + if (!process_title && uv__get_process_title() == -1) { + LeaveCriticalSection(&process_title_lock); + return uv_translate_sys_error(GetLastError()); + } + + assert(process_title); + len = strlen(process_title) + 1; + + if (size < len) { + LeaveCriticalSection(&process_title_lock); + return UV_ENOBUFS; + } + + memcpy(buffer, process_title, len); + LeaveCriticalSection(&process_title_lock); + + return 0; +} + + +uint64_t uv_hrtime(void) { + uv__once_init(); + return uv__hrtime(UV__NANOSEC); +} + +uint64_t uv__hrtime(double scale) { + LARGE_INTEGER counter; + + /* If the performance interval is zero, there's no support. */ + if (hrtime_interval_ == 0) { + return 0; + } + + if (!QueryPerformanceCounter(&counter)) { + return 0; + } + + /* Because we have no guarantee about the order of magnitude of the + * performance counter interval, integer math could cause this computation + * to overflow. Therefore we resort to floating point math. + */ + return (uint64_t) ((double) counter.QuadPart * hrtime_interval_ * scale); +} + + +int uv_resident_set_memory(size_t* rss) { + HANDLE current_process; + PROCESS_MEMORY_COUNTERS pmc; + + current_process = GetCurrentProcess(); + + if (!GetProcessMemoryInfo(current_process, &pmc, sizeof(pmc))) { + return uv_translate_sys_error(GetLastError()); + } + + *rss = pmc.WorkingSetSize; + + return 0; +} + + +int uv_uptime(double* uptime) { + BYTE stack_buffer[4096]; + BYTE* malloced_buffer = NULL; + BYTE* buffer = (BYTE*) stack_buffer; + size_t buffer_size = sizeof(stack_buffer); + DWORD data_size; + + PERF_DATA_BLOCK* data_block; + PERF_OBJECT_TYPE* object_type; + PERF_COUNTER_DEFINITION* counter_definition; + + DWORD i; + + for (;;) { + LONG result; + + data_size = (DWORD) buffer_size; + result = RegQueryValueExW(HKEY_PERFORMANCE_DATA, + L"2", + NULL, + NULL, + buffer, + &data_size); + if (result == ERROR_SUCCESS) { + break; + } else if (result != ERROR_MORE_DATA) { + *uptime = 0; + return uv_translate_sys_error(result); + } + + buffer_size *= 2; + /* Don't let the buffer grow infinitely. */ + if (buffer_size > 1 << 20) { + goto internalError; + } + + uv__free(malloced_buffer); + + buffer = malloced_buffer = (BYTE*) uv__malloc(buffer_size); + if (malloced_buffer == NULL) { + *uptime = 0; + return UV_ENOMEM; + } + } + + if (data_size < sizeof(*data_block)) + goto internalError; + + data_block = (PERF_DATA_BLOCK*) buffer; + + if (wmemcmp(data_block->Signature, L"PERF", 4) != 0) + goto internalError; + + if (data_size < data_block->HeaderLength + sizeof(*object_type)) + goto internalError; + + object_type = (PERF_OBJECT_TYPE*) (buffer + data_block->HeaderLength); + + if (object_type->NumInstances != PERF_NO_INSTANCES) + goto internalError; + + counter_definition = (PERF_COUNTER_DEFINITION*) (buffer + + data_block->HeaderLength + object_type->HeaderLength); + for (i = 0; i < object_type->NumCounters; i++) { + if ((BYTE*) counter_definition + sizeof(*counter_definition) > + buffer + data_size) { + break; + } + + if (counter_definition->CounterNameTitleIndex == 674 && + counter_definition->CounterSize == sizeof(uint64_t)) { + if (counter_definition->CounterOffset + sizeof(uint64_t) > data_size || + !(counter_definition->CounterType & PERF_OBJECT_TIMER)) { + goto internalError; + } else { + BYTE* address = (BYTE*) object_type + object_type->DefinitionLength + + counter_definition->CounterOffset; + uint64_t value = *((uint64_t*) address); + *uptime = (double) (object_type->PerfTime.QuadPart - value) / + (double) object_type->PerfFreq.QuadPart; + uv__free(malloced_buffer); + return 0; + } + } + + counter_definition = (PERF_COUNTER_DEFINITION*) + ((BYTE*) counter_definition + counter_definition->ByteLength); + } + + /* If we get here, the uptime value was not found. */ + uv__free(malloced_buffer); + *uptime = 0; + return UV_ENOSYS; + + internalError: + uv__free(malloced_buffer); + *uptime = 0; + return UV_EIO; +} + + +int uv_cpu_info(uv_cpu_info_t** cpu_infos_ptr, int* cpu_count_ptr) { + uv_cpu_info_t* cpu_infos; + SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION* sppi; + DWORD sppi_size; + SYSTEM_INFO system_info; + DWORD cpu_count, r, i; + NTSTATUS status; + ULONG result_size; + int err; + uv_cpu_info_t* cpu_info; + + cpu_infos = NULL; + cpu_count = 0; + sppi = NULL; + + uv__once_init(); + + GetSystemInfo(&system_info); + cpu_count = system_info.dwNumberOfProcessors; + + cpu_infos = uv__calloc(cpu_count, sizeof *cpu_infos); + if (cpu_infos == NULL) { + err = ERROR_OUTOFMEMORY; + goto error; + } + + sppi_size = cpu_count * sizeof(*sppi); + sppi = uv__malloc(sppi_size); + if (sppi == NULL) { + err = ERROR_OUTOFMEMORY; + goto error; + } + + status = pNtQuerySystemInformation(SystemProcessorPerformanceInformation, + sppi, + sppi_size, + &result_size); + if (!NT_SUCCESS(status)) { + err = pRtlNtStatusToDosError(status); + goto error; + } + + assert(result_size == sppi_size); + + for (i = 0; i < cpu_count; i++) { + WCHAR key_name[128]; + HKEY processor_key; + DWORD cpu_speed; + DWORD cpu_speed_size = sizeof(cpu_speed); + WCHAR cpu_brand[256]; + DWORD cpu_brand_size = sizeof(cpu_brand); + size_t len; + + len = _snwprintf(key_name, + ARRAY_SIZE(key_name), + L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\%d", + i); + + assert(len > 0 && len < ARRAY_SIZE(key_name)); + + r = RegOpenKeyExW(HKEY_LOCAL_MACHINE, + key_name, + 0, + KEY_QUERY_VALUE, + &processor_key); + if (r != ERROR_SUCCESS) { + err = GetLastError(); + goto error; + } + + if (RegQueryValueExW(processor_key, + L"~MHz", + NULL, + NULL, + (BYTE*) &cpu_speed, + &cpu_speed_size) != ERROR_SUCCESS) { + err = GetLastError(); + RegCloseKey(processor_key); + goto error; + } + + if (RegQueryValueExW(processor_key, + L"ProcessorNameString", + NULL, + NULL, + (BYTE*) &cpu_brand, + &cpu_brand_size) != ERROR_SUCCESS) { + err = GetLastError(); + RegCloseKey(processor_key); + goto error; + } + + RegCloseKey(processor_key); + + cpu_info = &cpu_infos[i]; + cpu_info->speed = cpu_speed; + cpu_info->cpu_times.user = sppi[i].UserTime.QuadPart / 10000; + cpu_info->cpu_times.sys = (sppi[i].KernelTime.QuadPart - + sppi[i].IdleTime.QuadPart) / 10000; + cpu_info->cpu_times.idle = sppi[i].IdleTime.QuadPart / 10000; + cpu_info->cpu_times.irq = sppi[i].InterruptTime.QuadPart / 10000; + cpu_info->cpu_times.nice = 0; + + uv__convert_utf16_to_utf8(cpu_brand, + cpu_brand_size / sizeof(WCHAR), + &(cpu_info->model)); + } + + uv__free(sppi); + + *cpu_count_ptr = cpu_count; + *cpu_infos_ptr = cpu_infos; + + return 0; + + error: + /* This is safe because the cpu_infos array is zeroed on allocation. */ + for (i = 0; i < cpu_count; i++) + uv__free(cpu_infos[i].model); + + uv__free(cpu_infos); + uv__free(sppi); + + return uv_translate_sys_error(err); +} + + +void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(cpu_infos[i].model); + } + + uv__free(cpu_infos); +} + + +static int is_windows_version_or_greater(DWORD os_major, + DWORD os_minor, + WORD service_pack_major, + WORD service_pack_minor) { + OSVERSIONINFOEX osvi; + DWORDLONG condition_mask = 0; + int op = VER_GREATER_EQUAL; + + /* Initialize the OSVERSIONINFOEX structure. */ + ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); + osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); + osvi.dwMajorVersion = os_major; + osvi.dwMinorVersion = os_minor; + osvi.wServicePackMajor = service_pack_major; + osvi.wServicePackMinor = service_pack_minor; + + /* Initialize the condition mask. */ + VER_SET_CONDITION(condition_mask, VER_MAJORVERSION, op); + VER_SET_CONDITION(condition_mask, VER_MINORVERSION, op); + VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMAJOR, op); + VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMINOR, op); + + /* Perform the test. */ + return (int) VerifyVersionInfo( + &osvi, + VER_MAJORVERSION | VER_MINORVERSION | + VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR, + condition_mask); +} + + +static int address_prefix_match(int family, + struct sockaddr* address, + struct sockaddr* prefix_address, + int prefix_len) { + uint8_t* address_data; + uint8_t* prefix_address_data; + int i; + + assert(address->sa_family == family); + assert(prefix_address->sa_family == family); + + if (family == AF_INET6) { + address_data = (uint8_t*) &(((struct sockaddr_in6 *) address)->sin6_addr); + prefix_address_data = + (uint8_t*) &(((struct sockaddr_in6 *) prefix_address)->sin6_addr); + } else { + address_data = (uint8_t*) &(((struct sockaddr_in *) address)->sin_addr); + prefix_address_data = + (uint8_t*) &(((struct sockaddr_in *) prefix_address)->sin_addr); + } + + for (i = 0; i < prefix_len >> 3; i++) { + if (address_data[i] != prefix_address_data[i]) + return 0; + } + + if (prefix_len % 8) + return prefix_address_data[i] == + (address_data[i] & (0xff << (8 - prefix_len % 8))); + + return 1; +} + + +int uv_interface_addresses(uv_interface_address_t** addresses_ptr, + int* count_ptr) { + IP_ADAPTER_ADDRESSES* win_address_buf; + ULONG win_address_buf_size; + IP_ADAPTER_ADDRESSES* adapter; + + uv_interface_address_t* uv_address_buf; + char* name_buf; + size_t uv_address_buf_size; + uv_interface_address_t* uv_address; + + int count; + + int is_vista_or_greater; + ULONG flags; + + is_vista_or_greater = is_windows_version_or_greater(6, 0, 0, 0); + if (is_vista_or_greater) { + flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST | + GAA_FLAG_SKIP_DNS_SERVER; + } else { + /* We need at least XP SP1. */ + if (!is_windows_version_or_greater(5, 1, 1, 0)) + return UV_ENOTSUP; + + flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST | + GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_PREFIX; + } + + + /* Fetch the size of the adapters reported by windows, and then get the */ + /* list itself. */ + win_address_buf_size = 0; + win_address_buf = NULL; + + for (;;) { + ULONG r; + + /* If win_address_buf is 0, then GetAdaptersAddresses will fail with */ + /* ERROR_BUFFER_OVERFLOW, and the required buffer size will be stored in */ + /* win_address_buf_size. */ + r = GetAdaptersAddresses(AF_UNSPEC, + flags, + NULL, + win_address_buf, + &win_address_buf_size); + + if (r == ERROR_SUCCESS) + break; + + uv__free(win_address_buf); + + switch (r) { + case ERROR_BUFFER_OVERFLOW: + /* This happens when win_address_buf is NULL or too small to hold */ + /* all adapters. */ + win_address_buf = uv__malloc(win_address_buf_size); + if (win_address_buf == NULL) + return UV_ENOMEM; + + continue; + + case ERROR_NO_DATA: { + /* No adapters were found. */ + uv_address_buf = uv__malloc(1); + if (uv_address_buf == NULL) + return UV_ENOMEM; + + *count_ptr = 0; + *addresses_ptr = uv_address_buf; + + return 0; + } + + case ERROR_ADDRESS_NOT_ASSOCIATED: + return UV_EAGAIN; + + case ERROR_INVALID_PARAMETER: + /* MSDN says: + * "This error is returned for any of the following conditions: the + * SizePointer parameter is NULL, the Address parameter is not + * AF_INET, AF_INET6, or AF_UNSPEC, or the address information for + * the parameters requested is greater than ULONG_MAX." + * Since the first two conditions are not met, it must be that the + * adapter data is too big. + */ + return UV_ENOBUFS; + + default: + /* Other (unspecified) errors can happen, but we don't have any */ + /* special meaning for them. */ + assert(r != ERROR_SUCCESS); + return uv_translate_sys_error(r); + } + } + + /* Count the number of enabled interfaces and compute how much space is */ + /* needed to store their info. */ + count = 0; + uv_address_buf_size = 0; + + for (adapter = win_address_buf; + adapter != NULL; + adapter = adapter->Next) { + IP_ADAPTER_UNICAST_ADDRESS* unicast_address; + int name_size; + + /* Interfaces that are not 'up' should not be reported. Also skip */ + /* interfaces that have no associated unicast address, as to avoid */ + /* allocating space for the name for this interface. */ + if (adapter->OperStatus != IfOperStatusUp || + adapter->FirstUnicastAddress == NULL) + continue; + + /* Compute the size of the interface name. */ + name_size = WideCharToMultiByte(CP_UTF8, + 0, + adapter->FriendlyName, + -1, + NULL, + 0, + NULL, + FALSE); + if (name_size <= 0) { + uv__free(win_address_buf); + return uv_translate_sys_error(GetLastError()); + } + uv_address_buf_size += name_size; + + /* Count the number of addresses associated with this interface, and */ + /* compute the size. */ + for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS*) + adapter->FirstUnicastAddress; + unicast_address != NULL; + unicast_address = unicast_address->Next) { + count++; + uv_address_buf_size += sizeof(uv_interface_address_t); + } + } + + /* Allocate space to store interface data plus adapter names. */ + uv_address_buf = uv__malloc(uv_address_buf_size); + if (uv_address_buf == NULL) { + uv__free(win_address_buf); + return UV_ENOMEM; + } + + /* Compute the start of the uv_interface_address_t array, and the place in */ + /* the buffer where the interface names will be stored. */ + uv_address = uv_address_buf; + name_buf = (char*) (uv_address_buf + count); + + /* Fill out the output buffer. */ + for (adapter = win_address_buf; + adapter != NULL; + adapter = adapter->Next) { + IP_ADAPTER_UNICAST_ADDRESS* unicast_address; + int name_size; + size_t max_name_size; + + if (adapter->OperStatus != IfOperStatusUp || + adapter->FirstUnicastAddress == NULL) + continue; + + /* Convert the interface name to UTF8. */ + max_name_size = (char*) uv_address_buf + uv_address_buf_size - name_buf; + if (max_name_size > (size_t) INT_MAX) + max_name_size = INT_MAX; + name_size = WideCharToMultiByte(CP_UTF8, + 0, + adapter->FriendlyName, + -1, + name_buf, + (int) max_name_size, + NULL, + FALSE); + if (name_size <= 0) { + uv__free(win_address_buf); + uv__free(uv_address_buf); + return uv_translate_sys_error(GetLastError()); + } + + /* Add an uv_interface_address_t element for every unicast address. */ + for (unicast_address = (IP_ADAPTER_UNICAST_ADDRESS*) + adapter->FirstUnicastAddress; + unicast_address != NULL; + unicast_address = unicast_address->Next) { + struct sockaddr* sa; + ULONG prefix_len; + + sa = unicast_address->Address.lpSockaddr; + + /* XP has no OnLinkPrefixLength field. */ + if (is_vista_or_greater) { + prefix_len = + ((IP_ADAPTER_UNICAST_ADDRESS_LH*) unicast_address)->OnLinkPrefixLength; + } else { + /* Prior to Windows Vista the FirstPrefix pointed to the list with + * single prefix for each IP address assigned to the adapter. + * Order of FirstPrefix does not match order of FirstUnicastAddress, + * so we need to find corresponding prefix. + */ + IP_ADAPTER_PREFIX* prefix; + prefix_len = 0; + + for (prefix = adapter->FirstPrefix; prefix; prefix = prefix->Next) { + /* We want the longest matching prefix. */ + if (prefix->Address.lpSockaddr->sa_family != sa->sa_family || + prefix->PrefixLength <= prefix_len) + continue; + + if (address_prefix_match(sa->sa_family, sa, + prefix->Address.lpSockaddr, prefix->PrefixLength)) { + prefix_len = prefix->PrefixLength; + } + } + + /* If there is no matching prefix information, return a single-host + * subnet mask (e.g. 255.255.255.255 for IPv4). + */ + if (!prefix_len) + prefix_len = (sa->sa_family == AF_INET6) ? 128 : 32; + } + + memset(uv_address, 0, sizeof *uv_address); + + uv_address->name = name_buf; + + if (adapter->PhysicalAddressLength == sizeof(uv_address->phys_addr)) { + memcpy(uv_address->phys_addr, + adapter->PhysicalAddress, + sizeof(uv_address->phys_addr)); + } + + uv_address->is_internal = + (adapter->IfType == IF_TYPE_SOFTWARE_LOOPBACK); + + if (sa->sa_family == AF_INET6) { + uv_address->address.address6 = *((struct sockaddr_in6 *) sa); + + uv_address->netmask.netmask6.sin6_family = AF_INET6; + memset(uv_address->netmask.netmask6.sin6_addr.s6_addr, 0xff, prefix_len >> 3); + /* This check ensures that we don't write past the size of the data. */ + if (prefix_len % 8) { + uv_address->netmask.netmask6.sin6_addr.s6_addr[prefix_len >> 3] = + 0xff << (8 - prefix_len % 8); + } + + } else { + uv_address->address.address4 = *((struct sockaddr_in *) sa); + + uv_address->netmask.netmask4.sin_family = AF_INET; + uv_address->netmask.netmask4.sin_addr.s_addr = (prefix_len > 0) ? + htonl(0xffffffff << (32 - prefix_len)) : 0; + } + + uv_address++; + } + + name_buf += name_size; + } + + uv__free(win_address_buf); + + *addresses_ptr = uv_address_buf; + *count_ptr = count; + + return 0; +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + uv__free(addresses); +} + + +int uv_getrusage(uv_rusage_t *uv_rusage) { + FILETIME createTime, exitTime, kernelTime, userTime; + SYSTEMTIME kernelSystemTime, userSystemTime; + PROCESS_MEMORY_COUNTERS memCounters; + IO_COUNTERS ioCounters; + int ret; + + ret = GetProcessTimes(GetCurrentProcess(), &createTime, &exitTime, &kernelTime, &userTime); + if (ret == 0) { + return uv_translate_sys_error(GetLastError()); + } + + ret = FileTimeToSystemTime(&kernelTime, &kernelSystemTime); + if (ret == 0) { + return uv_translate_sys_error(GetLastError()); + } + + ret = FileTimeToSystemTime(&userTime, &userSystemTime); + if (ret == 0) { + return uv_translate_sys_error(GetLastError()); + } + + ret = GetProcessMemoryInfo(GetCurrentProcess(), + &memCounters, + sizeof(memCounters)); + if (ret == 0) { + return uv_translate_sys_error(GetLastError()); + } + + ret = GetProcessIoCounters(GetCurrentProcess(), &ioCounters); + if (ret == 0) { + return uv_translate_sys_error(GetLastError()); + } + + memset(uv_rusage, 0, sizeof(*uv_rusage)); + + uv_rusage->ru_utime.tv_sec = userSystemTime.wHour * 3600 + + userSystemTime.wMinute * 60 + + userSystemTime.wSecond; + uv_rusage->ru_utime.tv_usec = userSystemTime.wMilliseconds * 1000; + + uv_rusage->ru_stime.tv_sec = kernelSystemTime.wHour * 3600 + + kernelSystemTime.wMinute * 60 + + kernelSystemTime.wSecond; + uv_rusage->ru_stime.tv_usec = kernelSystemTime.wMilliseconds * 1000; + + uv_rusage->ru_majflt = (uint64_t) memCounters.PageFaultCount; + uv_rusage->ru_maxrss = (uint64_t) memCounters.PeakWorkingSetSize / 1024; + + uv_rusage->ru_oublock = (uint64_t) ioCounters.WriteOperationCount; + uv_rusage->ru_inblock = (uint64_t) ioCounters.ReadOperationCount; + + return 0; +} + + +int uv_os_homedir(char* buffer, size_t* size) { + uv_passwd_t pwd; + wchar_t path[MAX_PATH]; + DWORD bufsize; + size_t len; + int r; + + if (buffer == NULL || size == NULL || *size == 0) + return UV_EINVAL; + + /* Check if the USERPROFILE environment variable is set first */ + len = GetEnvironmentVariableW(L"USERPROFILE", path, MAX_PATH); + + if (len == 0) { + r = GetLastError(); + + /* Don't return an error if USERPROFILE was not found */ + if (r != ERROR_ENVVAR_NOT_FOUND) + return uv_translate_sys_error(r); + } else if (len > MAX_PATH) { + /* This should not be possible */ + return UV_EIO; + } else { + /* Check how much space we need */ + bufsize = WideCharToMultiByte(CP_UTF8, 0, path, -1, NULL, 0, NULL, NULL); + + if (bufsize == 0) { + return uv_translate_sys_error(GetLastError()); + } else if (bufsize > *size) { + *size = bufsize; + return UV_ENOBUFS; + } + + /* Convert to UTF-8 */ + bufsize = WideCharToMultiByte(CP_UTF8, + 0, + path, + -1, + buffer, + *size, + NULL, + NULL); + + if (bufsize == 0) + return uv_translate_sys_error(GetLastError()); + + *size = bufsize - 1; + return 0; + } + + /* USERPROFILE is not set, so call uv__getpwuid_r() */ + r = uv__getpwuid_r(&pwd); + + if (r != 0) { + return r; + } + + len = strlen(pwd.homedir); + + if (len >= *size) { + *size = len + 1; + uv_os_free_passwd(&pwd); + return UV_ENOBUFS; + } + + memcpy(buffer, pwd.homedir, len + 1); + *size = len; + uv_os_free_passwd(&pwd); + + return 0; +} + + +int uv_os_tmpdir(char* buffer, size_t* size) { + wchar_t path[MAX_PATH + 1]; + DWORD bufsize; + size_t len; + + if (buffer == NULL || size == NULL || *size == 0) + return UV_EINVAL; + + len = GetTempPathW(MAX_PATH + 1, path); + + if (len == 0) { + return uv_translate_sys_error(GetLastError()); + } else if (len > MAX_PATH + 1) { + /* This should not be possible */ + return UV_EIO; + } + + /* The returned directory should not have a trailing slash, unless it */ + /* points at a drive root, like c:\. Remove it if needed.*/ + if (path[len - 1] == L'\\' && + !(len == 3 && path[1] == L':')) { + len--; + path[len] = L'\0'; + } + + /* Check how much space we need */ + bufsize = WideCharToMultiByte(CP_UTF8, 0, path, -1, NULL, 0, NULL, NULL); + + if (bufsize == 0) { + return uv_translate_sys_error(GetLastError()); + } else if (bufsize > *size) { + *size = bufsize; + return UV_ENOBUFS; + } + + /* Convert to UTF-8 */ + bufsize = WideCharToMultiByte(CP_UTF8, + 0, + path, + -1, + buffer, + *size, + NULL, + NULL); + + if (bufsize == 0) + return uv_translate_sys_error(GetLastError()); + + *size = bufsize - 1; + return 0; +} + + +void uv_os_free_passwd(uv_passwd_t* pwd) { + if (pwd == NULL) + return; + + uv__free(pwd->username); + uv__free(pwd->homedir); + pwd->username = NULL; + pwd->homedir = NULL; +} + + +/* + * Converts a UTF-16 string into a UTF-8 one. The resulting string is + * null-terminated. + * + * If utf16 is null terminated, utf16len can be set to -1, otherwise it must + * be specified. + */ +int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8) { + DWORD bufsize; + + if (utf16 == NULL) + return UV_EINVAL; + + /* Check how much space we need */ + bufsize = WideCharToMultiByte(CP_UTF8, + 0, + utf16, + utf16len, + NULL, + 0, + NULL, + NULL); + + if (bufsize == 0) + return uv_translate_sys_error(GetLastError()); + + /* Allocate the destination buffer adding an extra byte for the terminating + * NULL. If utf16len is not -1 WideCharToMultiByte will not add it, so + * we do it ourselves always, just in case. */ + *utf8 = uv__malloc(bufsize + 1); + + if (*utf8 == NULL) + return UV_ENOMEM; + + /* Convert to UTF-8 */ + bufsize = WideCharToMultiByte(CP_UTF8, + 0, + utf16, + utf16len, + *utf8, + bufsize, + NULL, + NULL); + + if (bufsize == 0) { + uv__free(*utf8); + *utf8 = NULL; + return uv_translate_sys_error(GetLastError()); + } + + (*utf8)[bufsize] = '\0'; + return 0; +} + + +int uv__getpwuid_r(uv_passwd_t* pwd) { + HANDLE token; + wchar_t username[UNLEN + 1]; + wchar_t path[MAX_PATH]; + DWORD bufsize; + int r; + + if (pwd == NULL) + return UV_EINVAL; + + /* Get the home directory using GetUserProfileDirectoryW() */ + if (OpenProcessToken(GetCurrentProcess(), TOKEN_READ, &token) == 0) + return uv_translate_sys_error(GetLastError()); + + bufsize = sizeof(path); + if (!GetUserProfileDirectoryW(token, path, &bufsize)) { + r = GetLastError(); + CloseHandle(token); + + /* This should not be possible */ + if (r == ERROR_INSUFFICIENT_BUFFER) + return UV_ENOMEM; + + return uv_translate_sys_error(r); + } + + CloseHandle(token); + + /* Get the username using GetUserNameW() */ + bufsize = sizeof(username); + if (!GetUserNameW(username, &bufsize)) { + r = GetLastError(); + + /* This should not be possible */ + if (r == ERROR_INSUFFICIENT_BUFFER) + return UV_ENOMEM; + + return uv_translate_sys_error(r); + } + + pwd->homedir = NULL; + r = uv__convert_utf16_to_utf8(path, -1, &pwd->homedir); + + if (r != 0) + return r; + + pwd->username = NULL; + r = uv__convert_utf16_to_utf8(username, -1, &pwd->username); + + if (r != 0) { + uv__free(pwd->homedir); + return r; + } + + pwd->shell = NULL; + pwd->uid = -1; + pwd->gid = -1; + + return 0; +} + + +int uv_os_get_passwd(uv_passwd_t* pwd) { + return uv__getpwuid_r(pwd); +} diff --git a/deps/libuv/src/win/winapi.c b/deps/libuv/src/win/winapi.c new file mode 100644 index 00000000..1fa179b5 --- /dev/null +++ b/deps/libuv/src/win/winapi.c @@ -0,0 +1,159 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include + +#include "uv.h" +#include "internal.h" + + +/* Ntdll function pointers */ +sRtlNtStatusToDosError pRtlNtStatusToDosError; +sNtDeviceIoControlFile pNtDeviceIoControlFile; +sNtQueryInformationFile pNtQueryInformationFile; +sNtSetInformationFile pNtSetInformationFile; +sNtQueryVolumeInformationFile pNtQueryVolumeInformationFile; +sNtQueryDirectoryFile pNtQueryDirectoryFile; +sNtQuerySystemInformation pNtQuerySystemInformation; + + +/* Kernel32 function pointers */ +sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx; +sSetFileCompletionNotificationModes pSetFileCompletionNotificationModes; +sCreateSymbolicLinkW pCreateSymbolicLinkW; +sCancelIoEx pCancelIoEx; +sInitializeConditionVariable pInitializeConditionVariable; +sSleepConditionVariableCS pSleepConditionVariableCS; +sSleepConditionVariableSRW pSleepConditionVariableSRW; +sWakeAllConditionVariable pWakeAllConditionVariable; +sWakeConditionVariable pWakeConditionVariable; +sCancelSynchronousIo pCancelSynchronousIo; +sGetFinalPathNameByHandleW pGetFinalPathNameByHandleW; + + +/* Powrprof.dll function pointer */ +sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotification; + + +void uv_winapi_init() { + HMODULE ntdll_module; + HMODULE kernel32_module; + HMODULE powrprof_module; + + ntdll_module = GetModuleHandleA("ntdll.dll"); + if (ntdll_module == NULL) { + uv_fatal_error(GetLastError(), "GetModuleHandleA"); + } + + pRtlNtStatusToDosError = (sRtlNtStatusToDosError) GetProcAddress( + ntdll_module, + "RtlNtStatusToDosError"); + if (pRtlNtStatusToDosError == NULL) { + uv_fatal_error(GetLastError(), "GetProcAddress"); + } + + pNtDeviceIoControlFile = (sNtDeviceIoControlFile) GetProcAddress( + ntdll_module, + "NtDeviceIoControlFile"); + if (pNtDeviceIoControlFile == NULL) { + uv_fatal_error(GetLastError(), "GetProcAddress"); + } + + pNtQueryInformationFile = (sNtQueryInformationFile) GetProcAddress( + ntdll_module, + "NtQueryInformationFile"); + if (pNtQueryInformationFile == NULL) { + uv_fatal_error(GetLastError(), "GetProcAddress"); + } + + pNtSetInformationFile = (sNtSetInformationFile) GetProcAddress( + ntdll_module, + "NtSetInformationFile"); + if (pNtSetInformationFile == NULL) { + uv_fatal_error(GetLastError(), "GetProcAddress"); + } + + pNtQueryVolumeInformationFile = (sNtQueryVolumeInformationFile) + GetProcAddress(ntdll_module, "NtQueryVolumeInformationFile"); + if (pNtQueryVolumeInformationFile == NULL) { + uv_fatal_error(GetLastError(), "GetProcAddress"); + } + + pNtQueryDirectoryFile = (sNtQueryDirectoryFile) + GetProcAddress(ntdll_module, "NtQueryDirectoryFile"); + if (pNtQueryVolumeInformationFile == NULL) { + uv_fatal_error(GetLastError(), "GetProcAddress"); + } + + pNtQuerySystemInformation = (sNtQuerySystemInformation) GetProcAddress( + ntdll_module, + "NtQuerySystemInformation"); + if (pNtQuerySystemInformation == NULL) { + uv_fatal_error(GetLastError(), "GetProcAddress"); + } + + kernel32_module = GetModuleHandleA("kernel32.dll"); + if (kernel32_module == NULL) { + uv_fatal_error(GetLastError(), "GetModuleHandleA"); + } + + pGetQueuedCompletionStatusEx = (sGetQueuedCompletionStatusEx) GetProcAddress( + kernel32_module, + "GetQueuedCompletionStatusEx"); + + pSetFileCompletionNotificationModes = (sSetFileCompletionNotificationModes) + GetProcAddress(kernel32_module, "SetFileCompletionNotificationModes"); + + pCreateSymbolicLinkW = (sCreateSymbolicLinkW) + GetProcAddress(kernel32_module, "CreateSymbolicLinkW"); + + pCancelIoEx = (sCancelIoEx) + GetProcAddress(kernel32_module, "CancelIoEx"); + + pInitializeConditionVariable = (sInitializeConditionVariable) + GetProcAddress(kernel32_module, "InitializeConditionVariable"); + + pSleepConditionVariableCS = (sSleepConditionVariableCS) + GetProcAddress(kernel32_module, "SleepConditionVariableCS"); + + pSleepConditionVariableSRW = (sSleepConditionVariableSRW) + GetProcAddress(kernel32_module, "SleepConditionVariableSRW"); + + pWakeAllConditionVariable = (sWakeAllConditionVariable) + GetProcAddress(kernel32_module, "WakeAllConditionVariable"); + + pWakeConditionVariable = (sWakeConditionVariable) + GetProcAddress(kernel32_module, "WakeConditionVariable"); + + pCancelSynchronousIo = (sCancelSynchronousIo) + GetProcAddress(kernel32_module, "CancelSynchronousIo"); + + pGetFinalPathNameByHandleW = (sGetFinalPathNameByHandleW) + GetProcAddress(kernel32_module, "GetFinalPathNameByHandleW"); + + + powrprof_module = LoadLibraryA("powrprof.dll"); + if (powrprof_module != NULL) { + pPowerRegisterSuspendResumeNotification = (sPowerRegisterSuspendResumeNotification) + GetProcAddress(powrprof_module, "PowerRegisterSuspendResumeNotification"); + } + +} diff --git a/deps/libuv/src/win/winapi.h b/deps/libuv/src/win/winapi.h new file mode 100644 index 00000000..16d9365c --- /dev/null +++ b/deps/libuv/src/win/winapi.h @@ -0,0 +1,4748 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_WIN_WINAPI_H_ +#define UV_WIN_WINAPI_H_ + +#include + + +/* + * Ntdll headers + */ +#ifndef STATUS_SEVERITY_SUCCESS +# define STATUS_SEVERITY_SUCCESS 0x0 +#endif + +#ifndef STATUS_SEVERITY_INFORMATIONAL +# define STATUS_SEVERITY_INFORMATIONAL 0x1 +#endif + +#ifndef STATUS_SEVERITY_WARNING +# define STATUS_SEVERITY_WARNING 0x2 +#endif + +#ifndef STATUS_SEVERITY_ERROR +# define STATUS_SEVERITY_ERROR 0x3 +#endif + +#ifndef FACILITY_NTWIN32 +# define FACILITY_NTWIN32 0x7 +#endif + +#ifndef NT_SUCCESS +# define NT_SUCCESS(status) (((NTSTATUS) (status)) >= 0) +#endif + +#ifndef NT_INFORMATION +# define NT_INFORMATION(status) ((((ULONG) (status)) >> 30) == 1) +#endif + +#ifndef NT_WARNING +# define NT_WARNING(status) ((((ULONG) (status)) >> 30) == 2) +#endif + +#ifndef NT_ERROR +# define NT_ERROR(status) ((((ULONG) (status)) >> 30) == 3) +#endif + +#ifndef STATUS_SUCCESS +# define STATUS_SUCCESS ((NTSTATUS) 0x00000000L) +#endif + +#ifndef STATUS_WAIT_0 +# define STATUS_WAIT_0 ((NTSTATUS) 0x00000000L) +#endif + +#ifndef STATUS_WAIT_1 +# define STATUS_WAIT_1 ((NTSTATUS) 0x00000001L) +#endif + +#ifndef STATUS_WAIT_2 +# define STATUS_WAIT_2 ((NTSTATUS) 0x00000002L) +#endif + +#ifndef STATUS_WAIT_3 +# define STATUS_WAIT_3 ((NTSTATUS) 0x00000003L) +#endif + +#ifndef STATUS_WAIT_63 +# define STATUS_WAIT_63 ((NTSTATUS) 0x0000003FL) +#endif + +#ifndef STATUS_ABANDONED +# define STATUS_ABANDONED ((NTSTATUS) 0x00000080L) +#endif + +#ifndef STATUS_ABANDONED_WAIT_0 +# define STATUS_ABANDONED_WAIT_0 ((NTSTATUS) 0x00000080L) +#endif + +#ifndef STATUS_ABANDONED_WAIT_63 +# define STATUS_ABANDONED_WAIT_63 ((NTSTATUS) 0x000000BFL) +#endif + +#ifndef STATUS_USER_APC +# define STATUS_USER_APC ((NTSTATUS) 0x000000C0L) +#endif + +#ifndef STATUS_KERNEL_APC +# define STATUS_KERNEL_APC ((NTSTATUS) 0x00000100L) +#endif + +#ifndef STATUS_ALERTED +# define STATUS_ALERTED ((NTSTATUS) 0x00000101L) +#endif + +#ifndef STATUS_TIMEOUT +# define STATUS_TIMEOUT ((NTSTATUS) 0x00000102L) +#endif + +#ifndef STATUS_PENDING +# define STATUS_PENDING ((NTSTATUS) 0x00000103L) +#endif + +#ifndef STATUS_REPARSE +# define STATUS_REPARSE ((NTSTATUS) 0x00000104L) +#endif + +#ifndef STATUS_MORE_ENTRIES +# define STATUS_MORE_ENTRIES ((NTSTATUS) 0x00000105L) +#endif + +#ifndef STATUS_NOT_ALL_ASSIGNED +# define STATUS_NOT_ALL_ASSIGNED ((NTSTATUS) 0x00000106L) +#endif + +#ifndef STATUS_SOME_NOT_MAPPED +# define STATUS_SOME_NOT_MAPPED ((NTSTATUS) 0x00000107L) +#endif + +#ifndef STATUS_OPLOCK_BREAK_IN_PROGRESS +# define STATUS_OPLOCK_BREAK_IN_PROGRESS ((NTSTATUS) 0x00000108L) +#endif + +#ifndef STATUS_VOLUME_MOUNTED +# define STATUS_VOLUME_MOUNTED ((NTSTATUS) 0x00000109L) +#endif + +#ifndef STATUS_RXACT_COMMITTED +# define STATUS_RXACT_COMMITTED ((NTSTATUS) 0x0000010AL) +#endif + +#ifndef STATUS_NOTIFY_CLEANUP +# define STATUS_NOTIFY_CLEANUP ((NTSTATUS) 0x0000010BL) +#endif + +#ifndef STATUS_NOTIFY_ENUM_DIR +# define STATUS_NOTIFY_ENUM_DIR ((NTSTATUS) 0x0000010CL) +#endif + +#ifndef STATUS_NO_QUOTAS_FOR_ACCOUNT +# define STATUS_NO_QUOTAS_FOR_ACCOUNT ((NTSTATUS) 0x0000010DL) +#endif + +#ifndef STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED +# define STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED ((NTSTATUS) 0x0000010EL) +#endif + +#ifndef STATUS_PAGE_FAULT_TRANSITION +# define STATUS_PAGE_FAULT_TRANSITION ((NTSTATUS) 0x00000110L) +#endif + +#ifndef STATUS_PAGE_FAULT_DEMAND_ZERO +# define STATUS_PAGE_FAULT_DEMAND_ZERO ((NTSTATUS) 0x00000111L) +#endif + +#ifndef STATUS_PAGE_FAULT_COPY_ON_WRITE +# define STATUS_PAGE_FAULT_COPY_ON_WRITE ((NTSTATUS) 0x00000112L) +#endif + +#ifndef STATUS_PAGE_FAULT_GUARD_PAGE +# define STATUS_PAGE_FAULT_GUARD_PAGE ((NTSTATUS) 0x00000113L) +#endif + +#ifndef STATUS_PAGE_FAULT_PAGING_FILE +# define STATUS_PAGE_FAULT_PAGING_FILE ((NTSTATUS) 0x00000114L) +#endif + +#ifndef STATUS_CACHE_PAGE_LOCKED +# define STATUS_CACHE_PAGE_LOCKED ((NTSTATUS) 0x00000115L) +#endif + +#ifndef STATUS_CRASH_DUMP +# define STATUS_CRASH_DUMP ((NTSTATUS) 0x00000116L) +#endif + +#ifndef STATUS_BUFFER_ALL_ZEROS +# define STATUS_BUFFER_ALL_ZEROS ((NTSTATUS) 0x00000117L) +#endif + +#ifndef STATUS_REPARSE_OBJECT +# define STATUS_REPARSE_OBJECT ((NTSTATUS) 0x00000118L) +#endif + +#ifndef STATUS_RESOURCE_REQUIREMENTS_CHANGED +# define STATUS_RESOURCE_REQUIREMENTS_CHANGED ((NTSTATUS) 0x00000119L) +#endif + +#ifndef STATUS_TRANSLATION_COMPLETE +# define STATUS_TRANSLATION_COMPLETE ((NTSTATUS) 0x00000120L) +#endif + +#ifndef STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY +# define STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY ((NTSTATUS) 0x00000121L) +#endif + +#ifndef STATUS_NOTHING_TO_TERMINATE +# define STATUS_NOTHING_TO_TERMINATE ((NTSTATUS) 0x00000122L) +#endif + +#ifndef STATUS_PROCESS_NOT_IN_JOB +# define STATUS_PROCESS_NOT_IN_JOB ((NTSTATUS) 0x00000123L) +#endif + +#ifndef STATUS_PROCESS_IN_JOB +# define STATUS_PROCESS_IN_JOB ((NTSTATUS) 0x00000124L) +#endif + +#ifndef STATUS_VOLSNAP_HIBERNATE_READY +# define STATUS_VOLSNAP_HIBERNATE_READY ((NTSTATUS) 0x00000125L) +#endif + +#ifndef STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY +# define STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY ((NTSTATUS) 0x00000126L) +#endif + +#ifndef STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED +# define STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED ((NTSTATUS) 0x00000127L) +#endif + +#ifndef STATUS_INTERRUPT_STILL_CONNECTED +# define STATUS_INTERRUPT_STILL_CONNECTED ((NTSTATUS) 0x00000128L) +#endif + +#ifndef STATUS_PROCESS_CLONED +# define STATUS_PROCESS_CLONED ((NTSTATUS) 0x00000129L) +#endif + +#ifndef STATUS_FILE_LOCKED_WITH_ONLY_READERS +# define STATUS_FILE_LOCKED_WITH_ONLY_READERS ((NTSTATUS) 0x0000012AL) +#endif + +#ifndef STATUS_FILE_LOCKED_WITH_WRITERS +# define STATUS_FILE_LOCKED_WITH_WRITERS ((NTSTATUS) 0x0000012BL) +#endif + +#ifndef STATUS_RESOURCEMANAGER_READ_ONLY +# define STATUS_RESOURCEMANAGER_READ_ONLY ((NTSTATUS) 0x00000202L) +#endif + +#ifndef STATUS_RING_PREVIOUSLY_EMPTY +# define STATUS_RING_PREVIOUSLY_EMPTY ((NTSTATUS) 0x00000210L) +#endif + +#ifndef STATUS_RING_PREVIOUSLY_FULL +# define STATUS_RING_PREVIOUSLY_FULL ((NTSTATUS) 0x00000211L) +#endif + +#ifndef STATUS_RING_PREVIOUSLY_ABOVE_QUOTA +# define STATUS_RING_PREVIOUSLY_ABOVE_QUOTA ((NTSTATUS) 0x00000212L) +#endif + +#ifndef STATUS_RING_NEWLY_EMPTY +# define STATUS_RING_NEWLY_EMPTY ((NTSTATUS) 0x00000213L) +#endif + +#ifndef STATUS_RING_SIGNAL_OPPOSITE_ENDPOINT +# define STATUS_RING_SIGNAL_OPPOSITE_ENDPOINT ((NTSTATUS) 0x00000214L) +#endif + +#ifndef STATUS_OPLOCK_SWITCHED_TO_NEW_HANDLE +# define STATUS_OPLOCK_SWITCHED_TO_NEW_HANDLE ((NTSTATUS) 0x00000215L) +#endif + +#ifndef STATUS_OPLOCK_HANDLE_CLOSED +# define STATUS_OPLOCK_HANDLE_CLOSED ((NTSTATUS) 0x00000216L) +#endif + +#ifndef STATUS_WAIT_FOR_OPLOCK +# define STATUS_WAIT_FOR_OPLOCK ((NTSTATUS) 0x00000367L) +#endif + +#ifndef STATUS_OBJECT_NAME_EXISTS +# define STATUS_OBJECT_NAME_EXISTS ((NTSTATUS) 0x40000000L) +#endif + +#ifndef STATUS_THREAD_WAS_SUSPENDED +# define STATUS_THREAD_WAS_SUSPENDED ((NTSTATUS) 0x40000001L) +#endif + +#ifndef STATUS_WORKING_SET_LIMIT_RANGE +# define STATUS_WORKING_SET_LIMIT_RANGE ((NTSTATUS) 0x40000002L) +#endif + +#ifndef STATUS_IMAGE_NOT_AT_BASE +# define STATUS_IMAGE_NOT_AT_BASE ((NTSTATUS) 0x40000003L) +#endif + +#ifndef STATUS_RXACT_STATE_CREATED +# define STATUS_RXACT_STATE_CREATED ((NTSTATUS) 0x40000004L) +#endif + +#ifndef STATUS_SEGMENT_NOTIFICATION +# define STATUS_SEGMENT_NOTIFICATION ((NTSTATUS) 0x40000005L) +#endif + +#ifndef STATUS_LOCAL_USER_SESSION_KEY +# define STATUS_LOCAL_USER_SESSION_KEY ((NTSTATUS) 0x40000006L) +#endif + +#ifndef STATUS_BAD_CURRENT_DIRECTORY +# define STATUS_BAD_CURRENT_DIRECTORY ((NTSTATUS) 0x40000007L) +#endif + +#ifndef STATUS_SERIAL_MORE_WRITES +# define STATUS_SERIAL_MORE_WRITES ((NTSTATUS) 0x40000008L) +#endif + +#ifndef STATUS_REGISTRY_RECOVERED +# define STATUS_REGISTRY_RECOVERED ((NTSTATUS) 0x40000009L) +#endif + +#ifndef STATUS_FT_READ_RECOVERY_FROM_BACKUP +# define STATUS_FT_READ_RECOVERY_FROM_BACKUP ((NTSTATUS) 0x4000000AL) +#endif + +#ifndef STATUS_FT_WRITE_RECOVERY +# define STATUS_FT_WRITE_RECOVERY ((NTSTATUS) 0x4000000BL) +#endif + +#ifndef STATUS_SERIAL_COUNTER_TIMEOUT +# define STATUS_SERIAL_COUNTER_TIMEOUT ((NTSTATUS) 0x4000000CL) +#endif + +#ifndef STATUS_NULL_LM_PASSWORD +# define STATUS_NULL_LM_PASSWORD ((NTSTATUS) 0x4000000DL) +#endif + +#ifndef STATUS_IMAGE_MACHINE_TYPE_MISMATCH +# define STATUS_IMAGE_MACHINE_TYPE_MISMATCH ((NTSTATUS) 0x4000000EL) +#endif + +#ifndef STATUS_RECEIVE_PARTIAL +# define STATUS_RECEIVE_PARTIAL ((NTSTATUS) 0x4000000FL) +#endif + +#ifndef STATUS_RECEIVE_EXPEDITED +# define STATUS_RECEIVE_EXPEDITED ((NTSTATUS) 0x40000010L) +#endif + +#ifndef STATUS_RECEIVE_PARTIAL_EXPEDITED +# define STATUS_RECEIVE_PARTIAL_EXPEDITED ((NTSTATUS) 0x40000011L) +#endif + +#ifndef STATUS_EVENT_DONE +# define STATUS_EVENT_DONE ((NTSTATUS) 0x40000012L) +#endif + +#ifndef STATUS_EVENT_PENDING +# define STATUS_EVENT_PENDING ((NTSTATUS) 0x40000013L) +#endif + +#ifndef STATUS_CHECKING_FILE_SYSTEM +# define STATUS_CHECKING_FILE_SYSTEM ((NTSTATUS) 0x40000014L) +#endif + +#ifndef STATUS_FATAL_APP_EXIT +# define STATUS_FATAL_APP_EXIT ((NTSTATUS) 0x40000015L) +#endif + +#ifndef STATUS_PREDEFINED_HANDLE +# define STATUS_PREDEFINED_HANDLE ((NTSTATUS) 0x40000016L) +#endif + +#ifndef STATUS_WAS_UNLOCKED +# define STATUS_WAS_UNLOCKED ((NTSTATUS) 0x40000017L) +#endif + +#ifndef STATUS_SERVICE_NOTIFICATION +# define STATUS_SERVICE_NOTIFICATION ((NTSTATUS) 0x40000018L) +#endif + +#ifndef STATUS_WAS_LOCKED +# define STATUS_WAS_LOCKED ((NTSTATUS) 0x40000019L) +#endif + +#ifndef STATUS_LOG_HARD_ERROR +# define STATUS_LOG_HARD_ERROR ((NTSTATUS) 0x4000001AL) +#endif + +#ifndef STATUS_ALREADY_WIN32 +# define STATUS_ALREADY_WIN32 ((NTSTATUS) 0x4000001BL) +#endif + +#ifndef STATUS_WX86_UNSIMULATE +# define STATUS_WX86_UNSIMULATE ((NTSTATUS) 0x4000001CL) +#endif + +#ifndef STATUS_WX86_CONTINUE +# define STATUS_WX86_CONTINUE ((NTSTATUS) 0x4000001DL) +#endif + +#ifndef STATUS_WX86_SINGLE_STEP +# define STATUS_WX86_SINGLE_STEP ((NTSTATUS) 0x4000001EL) +#endif + +#ifndef STATUS_WX86_BREAKPOINT +# define STATUS_WX86_BREAKPOINT ((NTSTATUS) 0x4000001FL) +#endif + +#ifndef STATUS_WX86_EXCEPTION_CONTINUE +# define STATUS_WX86_EXCEPTION_CONTINUE ((NTSTATUS) 0x40000020L) +#endif + +#ifndef STATUS_WX86_EXCEPTION_LASTCHANCE +# define STATUS_WX86_EXCEPTION_LASTCHANCE ((NTSTATUS) 0x40000021L) +#endif + +#ifndef STATUS_WX86_EXCEPTION_CHAIN +# define STATUS_WX86_EXCEPTION_CHAIN ((NTSTATUS) 0x40000022L) +#endif + +#ifndef STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE +# define STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE ((NTSTATUS) 0x40000023L) +#endif + +#ifndef STATUS_NO_YIELD_PERFORMED +# define STATUS_NO_YIELD_PERFORMED ((NTSTATUS) 0x40000024L) +#endif + +#ifndef STATUS_TIMER_RESUME_IGNORED +# define STATUS_TIMER_RESUME_IGNORED ((NTSTATUS) 0x40000025L) +#endif + +#ifndef STATUS_ARBITRATION_UNHANDLED +# define STATUS_ARBITRATION_UNHANDLED ((NTSTATUS) 0x40000026L) +#endif + +#ifndef STATUS_CARDBUS_NOT_SUPPORTED +# define STATUS_CARDBUS_NOT_SUPPORTED ((NTSTATUS) 0x40000027L) +#endif + +#ifndef STATUS_WX86_CREATEWX86TIB +# define STATUS_WX86_CREATEWX86TIB ((NTSTATUS) 0x40000028L) +#endif + +#ifndef STATUS_MP_PROCESSOR_MISMATCH +# define STATUS_MP_PROCESSOR_MISMATCH ((NTSTATUS) 0x40000029L) +#endif + +#ifndef STATUS_HIBERNATED +# define STATUS_HIBERNATED ((NTSTATUS) 0x4000002AL) +#endif + +#ifndef STATUS_RESUME_HIBERNATION +# define STATUS_RESUME_HIBERNATION ((NTSTATUS) 0x4000002BL) +#endif + +#ifndef STATUS_FIRMWARE_UPDATED +# define STATUS_FIRMWARE_UPDATED ((NTSTATUS) 0x4000002CL) +#endif + +#ifndef STATUS_DRIVERS_LEAKING_LOCKED_PAGES +# define STATUS_DRIVERS_LEAKING_LOCKED_PAGES ((NTSTATUS) 0x4000002DL) +#endif + +#ifndef STATUS_MESSAGE_RETRIEVED +# define STATUS_MESSAGE_RETRIEVED ((NTSTATUS) 0x4000002EL) +#endif + +#ifndef STATUS_SYSTEM_POWERSTATE_TRANSITION +# define STATUS_SYSTEM_POWERSTATE_TRANSITION ((NTSTATUS) 0x4000002FL) +#endif + +#ifndef STATUS_ALPC_CHECK_COMPLETION_LIST +# define STATUS_ALPC_CHECK_COMPLETION_LIST ((NTSTATUS) 0x40000030L) +#endif + +#ifndef STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION +# define STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION ((NTSTATUS) 0x40000031L) +#endif + +#ifndef STATUS_ACCESS_AUDIT_BY_POLICY +# define STATUS_ACCESS_AUDIT_BY_POLICY ((NTSTATUS) 0x40000032L) +#endif + +#ifndef STATUS_ABANDON_HIBERFILE +# define STATUS_ABANDON_HIBERFILE ((NTSTATUS) 0x40000033L) +#endif + +#ifndef STATUS_BIZRULES_NOT_ENABLED +# define STATUS_BIZRULES_NOT_ENABLED ((NTSTATUS) 0x40000034L) +#endif + +#ifndef STATUS_GUARD_PAGE_VIOLATION +# define STATUS_GUARD_PAGE_VIOLATION ((NTSTATUS) 0x80000001L) +#endif + +#ifndef STATUS_DATATYPE_MISALIGNMENT +# define STATUS_DATATYPE_MISALIGNMENT ((NTSTATUS) 0x80000002L) +#endif + +#ifndef STATUS_BREAKPOINT +# define STATUS_BREAKPOINT ((NTSTATUS) 0x80000003L) +#endif + +#ifndef STATUS_SINGLE_STEP +# define STATUS_SINGLE_STEP ((NTSTATUS) 0x80000004L) +#endif + +#ifndef STATUS_BUFFER_OVERFLOW +# define STATUS_BUFFER_OVERFLOW ((NTSTATUS) 0x80000005L) +#endif + +#ifndef STATUS_NO_MORE_FILES +# define STATUS_NO_MORE_FILES ((NTSTATUS) 0x80000006L) +#endif + +#ifndef STATUS_WAKE_SYSTEM_DEBUGGER +# define STATUS_WAKE_SYSTEM_DEBUGGER ((NTSTATUS) 0x80000007L) +#endif + +#ifndef STATUS_HANDLES_CLOSED +# define STATUS_HANDLES_CLOSED ((NTSTATUS) 0x8000000AL) +#endif + +#ifndef STATUS_NO_INHERITANCE +# define STATUS_NO_INHERITANCE ((NTSTATUS) 0x8000000BL) +#endif + +#ifndef STATUS_GUID_SUBSTITUTION_MADE +# define STATUS_GUID_SUBSTITUTION_MADE ((NTSTATUS) 0x8000000CL) +#endif + +#ifndef STATUS_PARTIAL_COPY +# define STATUS_PARTIAL_COPY ((NTSTATUS) 0x8000000DL) +#endif + +#ifndef STATUS_DEVICE_PAPER_EMPTY +# define STATUS_DEVICE_PAPER_EMPTY ((NTSTATUS) 0x8000000EL) +#endif + +#ifndef STATUS_DEVICE_POWERED_OFF +# define STATUS_DEVICE_POWERED_OFF ((NTSTATUS) 0x8000000FL) +#endif + +#ifndef STATUS_DEVICE_OFF_LINE +# define STATUS_DEVICE_OFF_LINE ((NTSTATUS) 0x80000010L) +#endif + +#ifndef STATUS_DEVICE_BUSY +# define STATUS_DEVICE_BUSY ((NTSTATUS) 0x80000011L) +#endif + +#ifndef STATUS_NO_MORE_EAS +# define STATUS_NO_MORE_EAS ((NTSTATUS) 0x80000012L) +#endif + +#ifndef STATUS_INVALID_EA_NAME +# define STATUS_INVALID_EA_NAME ((NTSTATUS) 0x80000013L) +#endif + +#ifndef STATUS_EA_LIST_INCONSISTENT +# define STATUS_EA_LIST_INCONSISTENT ((NTSTATUS) 0x80000014L) +#endif + +#ifndef STATUS_INVALID_EA_FLAG +# define STATUS_INVALID_EA_FLAG ((NTSTATUS) 0x80000015L) +#endif + +#ifndef STATUS_VERIFY_REQUIRED +# define STATUS_VERIFY_REQUIRED ((NTSTATUS) 0x80000016L) +#endif + +#ifndef STATUS_EXTRANEOUS_INFORMATION +# define STATUS_EXTRANEOUS_INFORMATION ((NTSTATUS) 0x80000017L) +#endif + +#ifndef STATUS_RXACT_COMMIT_NECESSARY +# define STATUS_RXACT_COMMIT_NECESSARY ((NTSTATUS) 0x80000018L) +#endif + +#ifndef STATUS_NO_MORE_ENTRIES +# define STATUS_NO_MORE_ENTRIES ((NTSTATUS) 0x8000001AL) +#endif + +#ifndef STATUS_FILEMARK_DETECTED +# define STATUS_FILEMARK_DETECTED ((NTSTATUS) 0x8000001BL) +#endif + +#ifndef STATUS_MEDIA_CHANGED +# define STATUS_MEDIA_CHANGED ((NTSTATUS) 0x8000001CL) +#endif + +#ifndef STATUS_BUS_RESET +# define STATUS_BUS_RESET ((NTSTATUS) 0x8000001DL) +#endif + +#ifndef STATUS_END_OF_MEDIA +# define STATUS_END_OF_MEDIA ((NTSTATUS) 0x8000001EL) +#endif + +#ifndef STATUS_BEGINNING_OF_MEDIA +# define STATUS_BEGINNING_OF_MEDIA ((NTSTATUS) 0x8000001FL) +#endif + +#ifndef STATUS_MEDIA_CHECK +# define STATUS_MEDIA_CHECK ((NTSTATUS) 0x80000020L) +#endif + +#ifndef STATUS_SETMARK_DETECTED +# define STATUS_SETMARK_DETECTED ((NTSTATUS) 0x80000021L) +#endif + +#ifndef STATUS_NO_DATA_DETECTED +# define STATUS_NO_DATA_DETECTED ((NTSTATUS) 0x80000022L) +#endif + +#ifndef STATUS_REDIRECTOR_HAS_OPEN_HANDLES +# define STATUS_REDIRECTOR_HAS_OPEN_HANDLES ((NTSTATUS) 0x80000023L) +#endif + +#ifndef STATUS_SERVER_HAS_OPEN_HANDLES +# define STATUS_SERVER_HAS_OPEN_HANDLES ((NTSTATUS) 0x80000024L) +#endif + +#ifndef STATUS_ALREADY_DISCONNECTED +# define STATUS_ALREADY_DISCONNECTED ((NTSTATUS) 0x80000025L) +#endif + +#ifndef STATUS_LONGJUMP +# define STATUS_LONGJUMP ((NTSTATUS) 0x80000026L) +#endif + +#ifndef STATUS_CLEANER_CARTRIDGE_INSTALLED +# define STATUS_CLEANER_CARTRIDGE_INSTALLED ((NTSTATUS) 0x80000027L) +#endif + +#ifndef STATUS_PLUGPLAY_QUERY_VETOED +# define STATUS_PLUGPLAY_QUERY_VETOED ((NTSTATUS) 0x80000028L) +#endif + +#ifndef STATUS_UNWIND_CONSOLIDATE +# define STATUS_UNWIND_CONSOLIDATE ((NTSTATUS) 0x80000029L) +#endif + +#ifndef STATUS_REGISTRY_HIVE_RECOVERED +# define STATUS_REGISTRY_HIVE_RECOVERED ((NTSTATUS) 0x8000002AL) +#endif + +#ifndef STATUS_DLL_MIGHT_BE_INSECURE +# define STATUS_DLL_MIGHT_BE_INSECURE ((NTSTATUS) 0x8000002BL) +#endif + +#ifndef STATUS_DLL_MIGHT_BE_INCOMPATIBLE +# define STATUS_DLL_MIGHT_BE_INCOMPATIBLE ((NTSTATUS) 0x8000002CL) +#endif + +#ifndef STATUS_STOPPED_ON_SYMLINK +# define STATUS_STOPPED_ON_SYMLINK ((NTSTATUS) 0x8000002DL) +#endif + +#ifndef STATUS_CANNOT_GRANT_REQUESTED_OPLOCK +# define STATUS_CANNOT_GRANT_REQUESTED_OPLOCK ((NTSTATUS) 0x8000002EL) +#endif + +#ifndef STATUS_NO_ACE_CONDITION +# define STATUS_NO_ACE_CONDITION ((NTSTATUS) 0x8000002FL) +#endif + +#ifndef STATUS_UNSUCCESSFUL +# define STATUS_UNSUCCESSFUL ((NTSTATUS) 0xC0000001L) +#endif + +#ifndef STATUS_NOT_IMPLEMENTED +# define STATUS_NOT_IMPLEMENTED ((NTSTATUS) 0xC0000002L) +#endif + +#ifndef STATUS_INVALID_INFO_CLASS +# define STATUS_INVALID_INFO_CLASS ((NTSTATUS) 0xC0000003L) +#endif + +#ifndef STATUS_INFO_LENGTH_MISMATCH +# define STATUS_INFO_LENGTH_MISMATCH ((NTSTATUS) 0xC0000004L) +#endif + +#ifndef STATUS_ACCESS_VIOLATION +# define STATUS_ACCESS_VIOLATION ((NTSTATUS) 0xC0000005L) +#endif + +#ifndef STATUS_IN_PAGE_ERROR +# define STATUS_IN_PAGE_ERROR ((NTSTATUS) 0xC0000006L) +#endif + +#ifndef STATUS_PAGEFILE_QUOTA +# define STATUS_PAGEFILE_QUOTA ((NTSTATUS) 0xC0000007L) +#endif + +#ifndef STATUS_INVALID_HANDLE +# define STATUS_INVALID_HANDLE ((NTSTATUS) 0xC0000008L) +#endif + +#ifndef STATUS_BAD_INITIAL_STACK +# define STATUS_BAD_INITIAL_STACK ((NTSTATUS) 0xC0000009L) +#endif + +#ifndef STATUS_BAD_INITIAL_PC +# define STATUS_BAD_INITIAL_PC ((NTSTATUS) 0xC000000AL) +#endif + +#ifndef STATUS_INVALID_CID +# define STATUS_INVALID_CID ((NTSTATUS) 0xC000000BL) +#endif + +#ifndef STATUS_TIMER_NOT_CANCELED +# define STATUS_TIMER_NOT_CANCELED ((NTSTATUS) 0xC000000CL) +#endif + +#ifndef STATUS_INVALID_PARAMETER +# define STATUS_INVALID_PARAMETER ((NTSTATUS) 0xC000000DL) +#endif + +#ifndef STATUS_NO_SUCH_DEVICE +# define STATUS_NO_SUCH_DEVICE ((NTSTATUS) 0xC000000EL) +#endif + +#ifndef STATUS_NO_SUCH_FILE +# define STATUS_NO_SUCH_FILE ((NTSTATUS) 0xC000000FL) +#endif + +#ifndef STATUS_INVALID_DEVICE_REQUEST +# define STATUS_INVALID_DEVICE_REQUEST ((NTSTATUS) 0xC0000010L) +#endif + +#ifndef STATUS_END_OF_FILE +# define STATUS_END_OF_FILE ((NTSTATUS) 0xC0000011L) +#endif + +#ifndef STATUS_WRONG_VOLUME +# define STATUS_WRONG_VOLUME ((NTSTATUS) 0xC0000012L) +#endif + +#ifndef STATUS_NO_MEDIA_IN_DEVICE +# define STATUS_NO_MEDIA_IN_DEVICE ((NTSTATUS) 0xC0000013L) +#endif + +#ifndef STATUS_UNRECOGNIZED_MEDIA +# define STATUS_UNRECOGNIZED_MEDIA ((NTSTATUS) 0xC0000014L) +#endif + +#ifndef STATUS_NONEXISTENT_SECTOR +# define STATUS_NONEXISTENT_SECTOR ((NTSTATUS) 0xC0000015L) +#endif + +#ifndef STATUS_MORE_PROCESSING_REQUIRED +# define STATUS_MORE_PROCESSING_REQUIRED ((NTSTATUS) 0xC0000016L) +#endif + +#ifndef STATUS_NO_MEMORY +# define STATUS_NO_MEMORY ((NTSTATUS) 0xC0000017L) +#endif + +#ifndef STATUS_CONFLICTING_ADDRESSES +# define STATUS_CONFLICTING_ADDRESSES ((NTSTATUS) 0xC0000018L) +#endif + +#ifndef STATUS_NOT_MAPPED_VIEW +# define STATUS_NOT_MAPPED_VIEW ((NTSTATUS) 0xC0000019L) +#endif + +#ifndef STATUS_UNABLE_TO_FREE_VM +# define STATUS_UNABLE_TO_FREE_VM ((NTSTATUS) 0xC000001AL) +#endif + +#ifndef STATUS_UNABLE_TO_DELETE_SECTION +# define STATUS_UNABLE_TO_DELETE_SECTION ((NTSTATUS) 0xC000001BL) +#endif + +#ifndef STATUS_INVALID_SYSTEM_SERVICE +# define STATUS_INVALID_SYSTEM_SERVICE ((NTSTATUS) 0xC000001CL) +#endif + +#ifndef STATUS_ILLEGAL_INSTRUCTION +# define STATUS_ILLEGAL_INSTRUCTION ((NTSTATUS) 0xC000001DL) +#endif + +#ifndef STATUS_INVALID_LOCK_SEQUENCE +# define STATUS_INVALID_LOCK_SEQUENCE ((NTSTATUS) 0xC000001EL) +#endif + +#ifndef STATUS_INVALID_VIEW_SIZE +# define STATUS_INVALID_VIEW_SIZE ((NTSTATUS) 0xC000001FL) +#endif + +#ifndef STATUS_INVALID_FILE_FOR_SECTION +# define STATUS_INVALID_FILE_FOR_SECTION ((NTSTATUS) 0xC0000020L) +#endif + +#ifndef STATUS_ALREADY_COMMITTED +# define STATUS_ALREADY_COMMITTED ((NTSTATUS) 0xC0000021L) +#endif + +#ifndef STATUS_ACCESS_DENIED +# define STATUS_ACCESS_DENIED ((NTSTATUS) 0xC0000022L) +#endif + +#ifndef STATUS_BUFFER_TOO_SMALL +# define STATUS_BUFFER_TOO_SMALL ((NTSTATUS) 0xC0000023L) +#endif + +#ifndef STATUS_OBJECT_TYPE_MISMATCH +# define STATUS_OBJECT_TYPE_MISMATCH ((NTSTATUS) 0xC0000024L) +#endif + +#ifndef STATUS_NONCONTINUABLE_EXCEPTION +# define STATUS_NONCONTINUABLE_EXCEPTION ((NTSTATUS) 0xC0000025L) +#endif + +#ifndef STATUS_INVALID_DISPOSITION +# define STATUS_INVALID_DISPOSITION ((NTSTATUS) 0xC0000026L) +#endif + +#ifndef STATUS_UNWIND +# define STATUS_UNWIND ((NTSTATUS) 0xC0000027L) +#endif + +#ifndef STATUS_BAD_STACK +# define STATUS_BAD_STACK ((NTSTATUS) 0xC0000028L) +#endif + +#ifndef STATUS_INVALID_UNWIND_TARGET +# define STATUS_INVALID_UNWIND_TARGET ((NTSTATUS) 0xC0000029L) +#endif + +#ifndef STATUS_NOT_LOCKED +# define STATUS_NOT_LOCKED ((NTSTATUS) 0xC000002AL) +#endif + +#ifndef STATUS_PARITY_ERROR +# define STATUS_PARITY_ERROR ((NTSTATUS) 0xC000002BL) +#endif + +#ifndef STATUS_UNABLE_TO_DECOMMIT_VM +# define STATUS_UNABLE_TO_DECOMMIT_VM ((NTSTATUS) 0xC000002CL) +#endif + +#ifndef STATUS_NOT_COMMITTED +# define STATUS_NOT_COMMITTED ((NTSTATUS) 0xC000002DL) +#endif + +#ifndef STATUS_INVALID_PORT_ATTRIBUTES +# define STATUS_INVALID_PORT_ATTRIBUTES ((NTSTATUS) 0xC000002EL) +#endif + +#ifndef STATUS_PORT_MESSAGE_TOO_LONG +# define STATUS_PORT_MESSAGE_TOO_LONG ((NTSTATUS) 0xC000002FL) +#endif + +#ifndef STATUS_INVALID_PARAMETER_MIX +# define STATUS_INVALID_PARAMETER_MIX ((NTSTATUS) 0xC0000030L) +#endif + +#ifndef STATUS_INVALID_QUOTA_LOWER +# define STATUS_INVALID_QUOTA_LOWER ((NTSTATUS) 0xC0000031L) +#endif + +#ifndef STATUS_DISK_CORRUPT_ERROR +# define STATUS_DISK_CORRUPT_ERROR ((NTSTATUS) 0xC0000032L) +#endif + +#ifndef STATUS_OBJECT_NAME_INVALID +# define STATUS_OBJECT_NAME_INVALID ((NTSTATUS) 0xC0000033L) +#endif + +#ifndef STATUS_OBJECT_NAME_NOT_FOUND +# define STATUS_OBJECT_NAME_NOT_FOUND ((NTSTATUS) 0xC0000034L) +#endif + +#ifndef STATUS_OBJECT_NAME_COLLISION +# define STATUS_OBJECT_NAME_COLLISION ((NTSTATUS) 0xC0000035L) +#endif + +#ifndef STATUS_PORT_DISCONNECTED +# define STATUS_PORT_DISCONNECTED ((NTSTATUS) 0xC0000037L) +#endif + +#ifndef STATUS_DEVICE_ALREADY_ATTACHED +# define STATUS_DEVICE_ALREADY_ATTACHED ((NTSTATUS) 0xC0000038L) +#endif + +#ifndef STATUS_OBJECT_PATH_INVALID +# define STATUS_OBJECT_PATH_INVALID ((NTSTATUS) 0xC0000039L) +#endif + +#ifndef STATUS_OBJECT_PATH_NOT_FOUND +# define STATUS_OBJECT_PATH_NOT_FOUND ((NTSTATUS) 0xC000003AL) +#endif + +#ifndef STATUS_OBJECT_PATH_SYNTAX_BAD +# define STATUS_OBJECT_PATH_SYNTAX_BAD ((NTSTATUS) 0xC000003BL) +#endif + +#ifndef STATUS_DATA_OVERRUN +# define STATUS_DATA_OVERRUN ((NTSTATUS) 0xC000003CL) +#endif + +#ifndef STATUS_DATA_LATE_ERROR +# define STATUS_DATA_LATE_ERROR ((NTSTATUS) 0xC000003DL) +#endif + +#ifndef STATUS_DATA_ERROR +# define STATUS_DATA_ERROR ((NTSTATUS) 0xC000003EL) +#endif + +#ifndef STATUS_CRC_ERROR +# define STATUS_CRC_ERROR ((NTSTATUS) 0xC000003FL) +#endif + +#ifndef STATUS_SECTION_TOO_BIG +# define STATUS_SECTION_TOO_BIG ((NTSTATUS) 0xC0000040L) +#endif + +#ifndef STATUS_PORT_CONNECTION_REFUSED +# define STATUS_PORT_CONNECTION_REFUSED ((NTSTATUS) 0xC0000041L) +#endif + +#ifndef STATUS_INVALID_PORT_HANDLE +# define STATUS_INVALID_PORT_HANDLE ((NTSTATUS) 0xC0000042L) +#endif + +#ifndef STATUS_SHARING_VIOLATION +# define STATUS_SHARING_VIOLATION ((NTSTATUS) 0xC0000043L) +#endif + +#ifndef STATUS_QUOTA_EXCEEDED +# define STATUS_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000044L) +#endif + +#ifndef STATUS_INVALID_PAGE_PROTECTION +# define STATUS_INVALID_PAGE_PROTECTION ((NTSTATUS) 0xC0000045L) +#endif + +#ifndef STATUS_MUTANT_NOT_OWNED +# define STATUS_MUTANT_NOT_OWNED ((NTSTATUS) 0xC0000046L) +#endif + +#ifndef STATUS_SEMAPHORE_LIMIT_EXCEEDED +# define STATUS_SEMAPHORE_LIMIT_EXCEEDED ((NTSTATUS) 0xC0000047L) +#endif + +#ifndef STATUS_PORT_ALREADY_SET +# define STATUS_PORT_ALREADY_SET ((NTSTATUS) 0xC0000048L) +#endif + +#ifndef STATUS_SECTION_NOT_IMAGE +# define STATUS_SECTION_NOT_IMAGE ((NTSTATUS) 0xC0000049L) +#endif + +#ifndef STATUS_SUSPEND_COUNT_EXCEEDED +# define STATUS_SUSPEND_COUNT_EXCEEDED ((NTSTATUS) 0xC000004AL) +#endif + +#ifndef STATUS_THREAD_IS_TERMINATING +# define STATUS_THREAD_IS_TERMINATING ((NTSTATUS) 0xC000004BL) +#endif + +#ifndef STATUS_BAD_WORKING_SET_LIMIT +# define STATUS_BAD_WORKING_SET_LIMIT ((NTSTATUS) 0xC000004CL) +#endif + +#ifndef STATUS_INCOMPATIBLE_FILE_MAP +# define STATUS_INCOMPATIBLE_FILE_MAP ((NTSTATUS) 0xC000004DL) +#endif + +#ifndef STATUS_SECTION_PROTECTION +# define STATUS_SECTION_PROTECTION ((NTSTATUS) 0xC000004EL) +#endif + +#ifndef STATUS_EAS_NOT_SUPPORTED +# define STATUS_EAS_NOT_SUPPORTED ((NTSTATUS) 0xC000004FL) +#endif + +#ifndef STATUS_EA_TOO_LARGE +# define STATUS_EA_TOO_LARGE ((NTSTATUS) 0xC0000050L) +#endif + +#ifndef STATUS_NONEXISTENT_EA_ENTRY +# define STATUS_NONEXISTENT_EA_ENTRY ((NTSTATUS) 0xC0000051L) +#endif + +#ifndef STATUS_NO_EAS_ON_FILE +# define STATUS_NO_EAS_ON_FILE ((NTSTATUS) 0xC0000052L) +#endif + +#ifndef STATUS_EA_CORRUPT_ERROR +# define STATUS_EA_CORRUPT_ERROR ((NTSTATUS) 0xC0000053L) +#endif + +#ifndef STATUS_FILE_LOCK_CONFLICT +# define STATUS_FILE_LOCK_CONFLICT ((NTSTATUS) 0xC0000054L) +#endif + +#ifndef STATUS_LOCK_NOT_GRANTED +# define STATUS_LOCK_NOT_GRANTED ((NTSTATUS) 0xC0000055L) +#endif + +#ifndef STATUS_DELETE_PENDING +# define STATUS_DELETE_PENDING ((NTSTATUS) 0xC0000056L) +#endif + +#ifndef STATUS_CTL_FILE_NOT_SUPPORTED +# define STATUS_CTL_FILE_NOT_SUPPORTED ((NTSTATUS) 0xC0000057L) +#endif + +#ifndef STATUS_UNKNOWN_REVISION +# define STATUS_UNKNOWN_REVISION ((NTSTATUS) 0xC0000058L) +#endif + +#ifndef STATUS_REVISION_MISMATCH +# define STATUS_REVISION_MISMATCH ((NTSTATUS) 0xC0000059L) +#endif + +#ifndef STATUS_INVALID_OWNER +# define STATUS_INVALID_OWNER ((NTSTATUS) 0xC000005AL) +#endif + +#ifndef STATUS_INVALID_PRIMARY_GROUP +# define STATUS_INVALID_PRIMARY_GROUP ((NTSTATUS) 0xC000005BL) +#endif + +#ifndef STATUS_NO_IMPERSONATION_TOKEN +# define STATUS_NO_IMPERSONATION_TOKEN ((NTSTATUS) 0xC000005CL) +#endif + +#ifndef STATUS_CANT_DISABLE_MANDATORY +# define STATUS_CANT_DISABLE_MANDATORY ((NTSTATUS) 0xC000005DL) +#endif + +#ifndef STATUS_NO_LOGON_SERVERS +# define STATUS_NO_LOGON_SERVERS ((NTSTATUS) 0xC000005EL) +#endif + +#ifndef STATUS_NO_SUCH_LOGON_SESSION +# define STATUS_NO_SUCH_LOGON_SESSION ((NTSTATUS) 0xC000005FL) +#endif + +#ifndef STATUS_NO_SUCH_PRIVILEGE +# define STATUS_NO_SUCH_PRIVILEGE ((NTSTATUS) 0xC0000060L) +#endif + +#ifndef STATUS_PRIVILEGE_NOT_HELD +# define STATUS_PRIVILEGE_NOT_HELD ((NTSTATUS) 0xC0000061L) +#endif + +#ifndef STATUS_INVALID_ACCOUNT_NAME +# define STATUS_INVALID_ACCOUNT_NAME ((NTSTATUS) 0xC0000062L) +#endif + +#ifndef STATUS_USER_EXISTS +# define STATUS_USER_EXISTS ((NTSTATUS) 0xC0000063L) +#endif + +#ifndef STATUS_NO_SUCH_USER +# define STATUS_NO_SUCH_USER ((NTSTATUS) 0xC0000064L) +#endif + +#ifndef STATUS_GROUP_EXISTS +# define STATUS_GROUP_EXISTS ((NTSTATUS) 0xC0000065L) +#endif + +#ifndef STATUS_NO_SUCH_GROUP +# define STATUS_NO_SUCH_GROUP ((NTSTATUS) 0xC0000066L) +#endif + +#ifndef STATUS_MEMBER_IN_GROUP +# define STATUS_MEMBER_IN_GROUP ((NTSTATUS) 0xC0000067L) +#endif + +#ifndef STATUS_MEMBER_NOT_IN_GROUP +# define STATUS_MEMBER_NOT_IN_GROUP ((NTSTATUS) 0xC0000068L) +#endif + +#ifndef STATUS_LAST_ADMIN +# define STATUS_LAST_ADMIN ((NTSTATUS) 0xC0000069L) +#endif + +#ifndef STATUS_WRONG_PASSWORD +# define STATUS_WRONG_PASSWORD ((NTSTATUS) 0xC000006AL) +#endif + +#ifndef STATUS_ILL_FORMED_PASSWORD +# define STATUS_ILL_FORMED_PASSWORD ((NTSTATUS) 0xC000006BL) +#endif + +#ifndef STATUS_PASSWORD_RESTRICTION +# define STATUS_PASSWORD_RESTRICTION ((NTSTATUS) 0xC000006CL) +#endif + +#ifndef STATUS_LOGON_FAILURE +# define STATUS_LOGON_FAILURE ((NTSTATUS) 0xC000006DL) +#endif + +#ifndef STATUS_ACCOUNT_RESTRICTION +# define STATUS_ACCOUNT_RESTRICTION ((NTSTATUS) 0xC000006EL) +#endif + +#ifndef STATUS_INVALID_LOGON_HOURS +# define STATUS_INVALID_LOGON_HOURS ((NTSTATUS) 0xC000006FL) +#endif + +#ifndef STATUS_INVALID_WORKSTATION +# define STATUS_INVALID_WORKSTATION ((NTSTATUS) 0xC0000070L) +#endif + +#ifndef STATUS_PASSWORD_EXPIRED +# define STATUS_PASSWORD_EXPIRED ((NTSTATUS) 0xC0000071L) +#endif + +#ifndef STATUS_ACCOUNT_DISABLED +# define STATUS_ACCOUNT_DISABLED ((NTSTATUS) 0xC0000072L) +#endif + +#ifndef STATUS_NONE_MAPPED +# define STATUS_NONE_MAPPED ((NTSTATUS) 0xC0000073L) +#endif + +#ifndef STATUS_TOO_MANY_LUIDS_REQUESTED +# define STATUS_TOO_MANY_LUIDS_REQUESTED ((NTSTATUS) 0xC0000074L) +#endif + +#ifndef STATUS_LUIDS_EXHAUSTED +# define STATUS_LUIDS_EXHAUSTED ((NTSTATUS) 0xC0000075L) +#endif + +#ifndef STATUS_INVALID_SUB_AUTHORITY +# define STATUS_INVALID_SUB_AUTHORITY ((NTSTATUS) 0xC0000076L) +#endif + +#ifndef STATUS_INVALID_ACL +# define STATUS_INVALID_ACL ((NTSTATUS) 0xC0000077L) +#endif + +#ifndef STATUS_INVALID_SID +# define STATUS_INVALID_SID ((NTSTATUS) 0xC0000078L) +#endif + +#ifndef STATUS_INVALID_SECURITY_DESCR +# define STATUS_INVALID_SECURITY_DESCR ((NTSTATUS) 0xC0000079L) +#endif + +#ifndef STATUS_PROCEDURE_NOT_FOUND +# define STATUS_PROCEDURE_NOT_FOUND ((NTSTATUS) 0xC000007AL) +#endif + +#ifndef STATUS_INVALID_IMAGE_FORMAT +# define STATUS_INVALID_IMAGE_FORMAT ((NTSTATUS) 0xC000007BL) +#endif + +#ifndef STATUS_NO_TOKEN +# define STATUS_NO_TOKEN ((NTSTATUS) 0xC000007CL) +#endif + +#ifndef STATUS_BAD_INHERITANCE_ACL +# define STATUS_BAD_INHERITANCE_ACL ((NTSTATUS) 0xC000007DL) +#endif + +#ifndef STATUS_RANGE_NOT_LOCKED +# define STATUS_RANGE_NOT_LOCKED ((NTSTATUS) 0xC000007EL) +#endif + +#ifndef STATUS_DISK_FULL +# define STATUS_DISK_FULL ((NTSTATUS) 0xC000007FL) +#endif + +#ifndef STATUS_SERVER_DISABLED +# define STATUS_SERVER_DISABLED ((NTSTATUS) 0xC0000080L) +#endif + +#ifndef STATUS_SERVER_NOT_DISABLED +# define STATUS_SERVER_NOT_DISABLED ((NTSTATUS) 0xC0000081L) +#endif + +#ifndef STATUS_TOO_MANY_GUIDS_REQUESTED +# define STATUS_TOO_MANY_GUIDS_REQUESTED ((NTSTATUS) 0xC0000082L) +#endif + +#ifndef STATUS_GUIDS_EXHAUSTED +# define STATUS_GUIDS_EXHAUSTED ((NTSTATUS) 0xC0000083L) +#endif + +#ifndef STATUS_INVALID_ID_AUTHORITY +# define STATUS_INVALID_ID_AUTHORITY ((NTSTATUS) 0xC0000084L) +#endif + +#ifndef STATUS_AGENTS_EXHAUSTED +# define STATUS_AGENTS_EXHAUSTED ((NTSTATUS) 0xC0000085L) +#endif + +#ifndef STATUS_INVALID_VOLUME_LABEL +# define STATUS_INVALID_VOLUME_LABEL ((NTSTATUS) 0xC0000086L) +#endif + +#ifndef STATUS_SECTION_NOT_EXTENDED +# define STATUS_SECTION_NOT_EXTENDED ((NTSTATUS) 0xC0000087L) +#endif + +#ifndef STATUS_NOT_MAPPED_DATA +# define STATUS_NOT_MAPPED_DATA ((NTSTATUS) 0xC0000088L) +#endif + +#ifndef STATUS_RESOURCE_DATA_NOT_FOUND +# define STATUS_RESOURCE_DATA_NOT_FOUND ((NTSTATUS) 0xC0000089L) +#endif + +#ifndef STATUS_RESOURCE_TYPE_NOT_FOUND +# define STATUS_RESOURCE_TYPE_NOT_FOUND ((NTSTATUS) 0xC000008AL) +#endif + +#ifndef STATUS_RESOURCE_NAME_NOT_FOUND +# define STATUS_RESOURCE_NAME_NOT_FOUND ((NTSTATUS) 0xC000008BL) +#endif + +#ifndef STATUS_ARRAY_BOUNDS_EXCEEDED +# define STATUS_ARRAY_BOUNDS_EXCEEDED ((NTSTATUS) 0xC000008CL) +#endif + +#ifndef STATUS_FLOAT_DENORMAL_OPERAND +# define STATUS_FLOAT_DENORMAL_OPERAND ((NTSTATUS) 0xC000008DL) +#endif + +#ifndef STATUS_FLOAT_DIVIDE_BY_ZERO +# define STATUS_FLOAT_DIVIDE_BY_ZERO ((NTSTATUS) 0xC000008EL) +#endif + +#ifndef STATUS_FLOAT_INEXACT_RESULT +# define STATUS_FLOAT_INEXACT_RESULT ((NTSTATUS) 0xC000008FL) +#endif + +#ifndef STATUS_FLOAT_INVALID_OPERATION +# define STATUS_FLOAT_INVALID_OPERATION ((NTSTATUS) 0xC0000090L) +#endif + +#ifndef STATUS_FLOAT_OVERFLOW +# define STATUS_FLOAT_OVERFLOW ((NTSTATUS) 0xC0000091L) +#endif + +#ifndef STATUS_FLOAT_STACK_CHECK +# define STATUS_FLOAT_STACK_CHECK ((NTSTATUS) 0xC0000092L) +#endif + +#ifndef STATUS_FLOAT_UNDERFLOW +# define STATUS_FLOAT_UNDERFLOW ((NTSTATUS) 0xC0000093L) +#endif + +#ifndef STATUS_INTEGER_DIVIDE_BY_ZERO +# define STATUS_INTEGER_DIVIDE_BY_ZERO ((NTSTATUS) 0xC0000094L) +#endif + +#ifndef STATUS_INTEGER_OVERFLOW +# define STATUS_INTEGER_OVERFLOW ((NTSTATUS) 0xC0000095L) +#endif + +#ifndef STATUS_PRIVILEGED_INSTRUCTION +# define STATUS_PRIVILEGED_INSTRUCTION ((NTSTATUS) 0xC0000096L) +#endif + +#ifndef STATUS_TOO_MANY_PAGING_FILES +# define STATUS_TOO_MANY_PAGING_FILES ((NTSTATUS) 0xC0000097L) +#endif + +#ifndef STATUS_FILE_INVALID +# define STATUS_FILE_INVALID ((NTSTATUS) 0xC0000098L) +#endif + +#ifndef STATUS_ALLOTTED_SPACE_EXCEEDED +# define STATUS_ALLOTTED_SPACE_EXCEEDED ((NTSTATUS) 0xC0000099L) +#endif + +#ifndef STATUS_INSUFFICIENT_RESOURCES +# define STATUS_INSUFFICIENT_RESOURCES ((NTSTATUS) 0xC000009AL) +#endif + +#ifndef STATUS_DFS_EXIT_PATH_FOUND +# define STATUS_DFS_EXIT_PATH_FOUND ((NTSTATUS) 0xC000009BL) +#endif + +#ifndef STATUS_DEVICE_DATA_ERROR +# define STATUS_DEVICE_DATA_ERROR ((NTSTATUS) 0xC000009CL) +#endif + +#ifndef STATUS_DEVICE_NOT_CONNECTED +# define STATUS_DEVICE_NOT_CONNECTED ((NTSTATUS) 0xC000009DL) +#endif + +#ifndef STATUS_DEVICE_POWER_FAILURE +# define STATUS_DEVICE_POWER_FAILURE ((NTSTATUS) 0xC000009EL) +#endif + +#ifndef STATUS_FREE_VM_NOT_AT_BASE +# define STATUS_FREE_VM_NOT_AT_BASE ((NTSTATUS) 0xC000009FL) +#endif + +#ifndef STATUS_MEMORY_NOT_ALLOCATED +# define STATUS_MEMORY_NOT_ALLOCATED ((NTSTATUS) 0xC00000A0L) +#endif + +#ifndef STATUS_WORKING_SET_QUOTA +# define STATUS_WORKING_SET_QUOTA ((NTSTATUS) 0xC00000A1L) +#endif + +#ifndef STATUS_MEDIA_WRITE_PROTECTED +# define STATUS_MEDIA_WRITE_PROTECTED ((NTSTATUS) 0xC00000A2L) +#endif + +#ifndef STATUS_DEVICE_NOT_READY +# define STATUS_DEVICE_NOT_READY ((NTSTATUS) 0xC00000A3L) +#endif + +#ifndef STATUS_INVALID_GROUP_ATTRIBUTES +# define STATUS_INVALID_GROUP_ATTRIBUTES ((NTSTATUS) 0xC00000A4L) +#endif + +#ifndef STATUS_BAD_IMPERSONATION_LEVEL +# define STATUS_BAD_IMPERSONATION_LEVEL ((NTSTATUS) 0xC00000A5L) +#endif + +#ifndef STATUS_CANT_OPEN_ANONYMOUS +# define STATUS_CANT_OPEN_ANONYMOUS ((NTSTATUS) 0xC00000A6L) +#endif + +#ifndef STATUS_BAD_VALIDATION_CLASS +# define STATUS_BAD_VALIDATION_CLASS ((NTSTATUS) 0xC00000A7L) +#endif + +#ifndef STATUS_BAD_TOKEN_TYPE +# define STATUS_BAD_TOKEN_TYPE ((NTSTATUS) 0xC00000A8L) +#endif + +#ifndef STATUS_BAD_MASTER_BOOT_RECORD +# define STATUS_BAD_MASTER_BOOT_RECORD ((NTSTATUS) 0xC00000A9L) +#endif + +#ifndef STATUS_INSTRUCTION_MISALIGNMENT +# define STATUS_INSTRUCTION_MISALIGNMENT ((NTSTATUS) 0xC00000AAL) +#endif + +#ifndef STATUS_INSTANCE_NOT_AVAILABLE +# define STATUS_INSTANCE_NOT_AVAILABLE ((NTSTATUS) 0xC00000ABL) +#endif + +#ifndef STATUS_PIPE_NOT_AVAILABLE +# define STATUS_PIPE_NOT_AVAILABLE ((NTSTATUS) 0xC00000ACL) +#endif + +#ifndef STATUS_INVALID_PIPE_STATE +# define STATUS_INVALID_PIPE_STATE ((NTSTATUS) 0xC00000ADL) +#endif + +#ifndef STATUS_PIPE_BUSY +# define STATUS_PIPE_BUSY ((NTSTATUS) 0xC00000AEL) +#endif + +#ifndef STATUS_ILLEGAL_FUNCTION +# define STATUS_ILLEGAL_FUNCTION ((NTSTATUS) 0xC00000AFL) +#endif + +#ifndef STATUS_PIPE_DISCONNECTED +# define STATUS_PIPE_DISCONNECTED ((NTSTATUS) 0xC00000B0L) +#endif + +#ifndef STATUS_PIPE_CLOSING +# define STATUS_PIPE_CLOSING ((NTSTATUS) 0xC00000B1L) +#endif + +#ifndef STATUS_PIPE_CONNECTED +# define STATUS_PIPE_CONNECTED ((NTSTATUS) 0xC00000B2L) +#endif + +#ifndef STATUS_PIPE_LISTENING +# define STATUS_PIPE_LISTENING ((NTSTATUS) 0xC00000B3L) +#endif + +#ifndef STATUS_INVALID_READ_MODE +# define STATUS_INVALID_READ_MODE ((NTSTATUS) 0xC00000B4L) +#endif + +#ifndef STATUS_IO_TIMEOUT +# define STATUS_IO_TIMEOUT ((NTSTATUS) 0xC00000B5L) +#endif + +#ifndef STATUS_FILE_FORCED_CLOSED +# define STATUS_FILE_FORCED_CLOSED ((NTSTATUS) 0xC00000B6L) +#endif + +#ifndef STATUS_PROFILING_NOT_STARTED +# define STATUS_PROFILING_NOT_STARTED ((NTSTATUS) 0xC00000B7L) +#endif + +#ifndef STATUS_PROFILING_NOT_STOPPED +# define STATUS_PROFILING_NOT_STOPPED ((NTSTATUS) 0xC00000B8L) +#endif + +#ifndef STATUS_COULD_NOT_INTERPRET +# define STATUS_COULD_NOT_INTERPRET ((NTSTATUS) 0xC00000B9L) +#endif + +#ifndef STATUS_FILE_IS_A_DIRECTORY +# define STATUS_FILE_IS_A_DIRECTORY ((NTSTATUS) 0xC00000BAL) +#endif + +#ifndef STATUS_NOT_SUPPORTED +# define STATUS_NOT_SUPPORTED ((NTSTATUS) 0xC00000BBL) +#endif + +#ifndef STATUS_REMOTE_NOT_LISTENING +# define STATUS_REMOTE_NOT_LISTENING ((NTSTATUS) 0xC00000BCL) +#endif + +#ifndef STATUS_DUPLICATE_NAME +# define STATUS_DUPLICATE_NAME ((NTSTATUS) 0xC00000BDL) +#endif + +#ifndef STATUS_BAD_NETWORK_PATH +# define STATUS_BAD_NETWORK_PATH ((NTSTATUS) 0xC00000BEL) +#endif + +#ifndef STATUS_NETWORK_BUSY +# define STATUS_NETWORK_BUSY ((NTSTATUS) 0xC00000BFL) +#endif + +#ifndef STATUS_DEVICE_DOES_NOT_EXIST +# define STATUS_DEVICE_DOES_NOT_EXIST ((NTSTATUS) 0xC00000C0L) +#endif + +#ifndef STATUS_TOO_MANY_COMMANDS +# define STATUS_TOO_MANY_COMMANDS ((NTSTATUS) 0xC00000C1L) +#endif + +#ifndef STATUS_ADAPTER_HARDWARE_ERROR +# define STATUS_ADAPTER_HARDWARE_ERROR ((NTSTATUS) 0xC00000C2L) +#endif + +#ifndef STATUS_INVALID_NETWORK_RESPONSE +# define STATUS_INVALID_NETWORK_RESPONSE ((NTSTATUS) 0xC00000C3L) +#endif + +#ifndef STATUS_UNEXPECTED_NETWORK_ERROR +# define STATUS_UNEXPECTED_NETWORK_ERROR ((NTSTATUS) 0xC00000C4L) +#endif + +#ifndef STATUS_BAD_REMOTE_ADAPTER +# define STATUS_BAD_REMOTE_ADAPTER ((NTSTATUS) 0xC00000C5L) +#endif + +#ifndef STATUS_PRINT_QUEUE_FULL +# define STATUS_PRINT_QUEUE_FULL ((NTSTATUS) 0xC00000C6L) +#endif + +#ifndef STATUS_NO_SPOOL_SPACE +# define STATUS_NO_SPOOL_SPACE ((NTSTATUS) 0xC00000C7L) +#endif + +#ifndef STATUS_PRINT_CANCELLED +# define STATUS_PRINT_CANCELLED ((NTSTATUS) 0xC00000C8L) +#endif + +#ifndef STATUS_NETWORK_NAME_DELETED +# define STATUS_NETWORK_NAME_DELETED ((NTSTATUS) 0xC00000C9L) +#endif + +#ifndef STATUS_NETWORK_ACCESS_DENIED +# define STATUS_NETWORK_ACCESS_DENIED ((NTSTATUS) 0xC00000CAL) +#endif + +#ifndef STATUS_BAD_DEVICE_TYPE +# define STATUS_BAD_DEVICE_TYPE ((NTSTATUS) 0xC00000CBL) +#endif + +#ifndef STATUS_BAD_NETWORK_NAME +# define STATUS_BAD_NETWORK_NAME ((NTSTATUS) 0xC00000CCL) +#endif + +#ifndef STATUS_TOO_MANY_NAMES +# define STATUS_TOO_MANY_NAMES ((NTSTATUS) 0xC00000CDL) +#endif + +#ifndef STATUS_TOO_MANY_SESSIONS +# define STATUS_TOO_MANY_SESSIONS ((NTSTATUS) 0xC00000CEL) +#endif + +#ifndef STATUS_SHARING_PAUSED +# define STATUS_SHARING_PAUSED ((NTSTATUS) 0xC00000CFL) +#endif + +#ifndef STATUS_REQUEST_NOT_ACCEPTED +# define STATUS_REQUEST_NOT_ACCEPTED ((NTSTATUS) 0xC00000D0L) +#endif + +#ifndef STATUS_REDIRECTOR_PAUSED +# define STATUS_REDIRECTOR_PAUSED ((NTSTATUS) 0xC00000D1L) +#endif + +#ifndef STATUS_NET_WRITE_FAULT +# define STATUS_NET_WRITE_FAULT ((NTSTATUS) 0xC00000D2L) +#endif + +#ifndef STATUS_PROFILING_AT_LIMIT +# define STATUS_PROFILING_AT_LIMIT ((NTSTATUS) 0xC00000D3L) +#endif + +#ifndef STATUS_NOT_SAME_DEVICE +# define STATUS_NOT_SAME_DEVICE ((NTSTATUS) 0xC00000D4L) +#endif + +#ifndef STATUS_FILE_RENAMED +# define STATUS_FILE_RENAMED ((NTSTATUS) 0xC00000D5L) +#endif + +#ifndef STATUS_VIRTUAL_CIRCUIT_CLOSED +# define STATUS_VIRTUAL_CIRCUIT_CLOSED ((NTSTATUS) 0xC00000D6L) +#endif + +#ifndef STATUS_NO_SECURITY_ON_OBJECT +# define STATUS_NO_SECURITY_ON_OBJECT ((NTSTATUS) 0xC00000D7L) +#endif + +#ifndef STATUS_CANT_WAIT +# define STATUS_CANT_WAIT ((NTSTATUS) 0xC00000D8L) +#endif + +#ifndef STATUS_PIPE_EMPTY +# define STATUS_PIPE_EMPTY ((NTSTATUS) 0xC00000D9L) +#endif + +#ifndef STATUS_CANT_ACCESS_DOMAIN_INFO +# define STATUS_CANT_ACCESS_DOMAIN_INFO ((NTSTATUS) 0xC00000DAL) +#endif + +#ifndef STATUS_CANT_TERMINATE_SELF +# define STATUS_CANT_TERMINATE_SELF ((NTSTATUS) 0xC00000DBL) +#endif + +#ifndef STATUS_INVALID_SERVER_STATE +# define STATUS_INVALID_SERVER_STATE ((NTSTATUS) 0xC00000DCL) +#endif + +#ifndef STATUS_INVALID_DOMAIN_STATE +# define STATUS_INVALID_DOMAIN_STATE ((NTSTATUS) 0xC00000DDL) +#endif + +#ifndef STATUS_INVALID_DOMAIN_ROLE +# define STATUS_INVALID_DOMAIN_ROLE ((NTSTATUS) 0xC00000DEL) +#endif + +#ifndef STATUS_NO_SUCH_DOMAIN +# define STATUS_NO_SUCH_DOMAIN ((NTSTATUS) 0xC00000DFL) +#endif + +#ifndef STATUS_DOMAIN_EXISTS +# define STATUS_DOMAIN_EXISTS ((NTSTATUS) 0xC00000E0L) +#endif + +#ifndef STATUS_DOMAIN_LIMIT_EXCEEDED +# define STATUS_DOMAIN_LIMIT_EXCEEDED ((NTSTATUS) 0xC00000E1L) +#endif + +#ifndef STATUS_OPLOCK_NOT_GRANTED +# define STATUS_OPLOCK_NOT_GRANTED ((NTSTATUS) 0xC00000E2L) +#endif + +#ifndef STATUS_INVALID_OPLOCK_PROTOCOL +# define STATUS_INVALID_OPLOCK_PROTOCOL ((NTSTATUS) 0xC00000E3L) +#endif + +#ifndef STATUS_INTERNAL_DB_CORRUPTION +# define STATUS_INTERNAL_DB_CORRUPTION ((NTSTATUS) 0xC00000E4L) +#endif + +#ifndef STATUS_INTERNAL_ERROR +# define STATUS_INTERNAL_ERROR ((NTSTATUS) 0xC00000E5L) +#endif + +#ifndef STATUS_GENERIC_NOT_MAPPED +# define STATUS_GENERIC_NOT_MAPPED ((NTSTATUS) 0xC00000E6L) +#endif + +#ifndef STATUS_BAD_DESCRIPTOR_FORMAT +# define STATUS_BAD_DESCRIPTOR_FORMAT ((NTSTATUS) 0xC00000E7L) +#endif + +#ifndef STATUS_INVALID_USER_BUFFER +# define STATUS_INVALID_USER_BUFFER ((NTSTATUS) 0xC00000E8L) +#endif + +#ifndef STATUS_UNEXPECTED_IO_ERROR +# define STATUS_UNEXPECTED_IO_ERROR ((NTSTATUS) 0xC00000E9L) +#endif + +#ifndef STATUS_UNEXPECTED_MM_CREATE_ERR +# define STATUS_UNEXPECTED_MM_CREATE_ERR ((NTSTATUS) 0xC00000EAL) +#endif + +#ifndef STATUS_UNEXPECTED_MM_MAP_ERROR +# define STATUS_UNEXPECTED_MM_MAP_ERROR ((NTSTATUS) 0xC00000EBL) +#endif + +#ifndef STATUS_UNEXPECTED_MM_EXTEND_ERR +# define STATUS_UNEXPECTED_MM_EXTEND_ERR ((NTSTATUS) 0xC00000ECL) +#endif + +#ifndef STATUS_NOT_LOGON_PROCESS +# define STATUS_NOT_LOGON_PROCESS ((NTSTATUS) 0xC00000EDL) +#endif + +#ifndef STATUS_LOGON_SESSION_EXISTS +# define STATUS_LOGON_SESSION_EXISTS ((NTSTATUS) 0xC00000EEL) +#endif + +#ifndef STATUS_INVALID_PARAMETER_1 +# define STATUS_INVALID_PARAMETER_1 ((NTSTATUS) 0xC00000EFL) +#endif + +#ifndef STATUS_INVALID_PARAMETER_2 +# define STATUS_INVALID_PARAMETER_2 ((NTSTATUS) 0xC00000F0L) +#endif + +#ifndef STATUS_INVALID_PARAMETER_3 +# define STATUS_INVALID_PARAMETER_3 ((NTSTATUS) 0xC00000F1L) +#endif + +#ifndef STATUS_INVALID_PARAMETER_4 +# define STATUS_INVALID_PARAMETER_4 ((NTSTATUS) 0xC00000F2L) +#endif + +#ifndef STATUS_INVALID_PARAMETER_5 +# define STATUS_INVALID_PARAMETER_5 ((NTSTATUS) 0xC00000F3L) +#endif + +#ifndef STATUS_INVALID_PARAMETER_6 +# define STATUS_INVALID_PARAMETER_6 ((NTSTATUS) 0xC00000F4L) +#endif + +#ifndef STATUS_INVALID_PARAMETER_7 +# define STATUS_INVALID_PARAMETER_7 ((NTSTATUS) 0xC00000F5L) +#endif + +#ifndef STATUS_INVALID_PARAMETER_8 +# define STATUS_INVALID_PARAMETER_8 ((NTSTATUS) 0xC00000F6L) +#endif + +#ifndef STATUS_INVALID_PARAMETER_9 +# define STATUS_INVALID_PARAMETER_9 ((NTSTATUS) 0xC00000F7L) +#endif + +#ifndef STATUS_INVALID_PARAMETER_10 +# define STATUS_INVALID_PARAMETER_10 ((NTSTATUS) 0xC00000F8L) +#endif + +#ifndef STATUS_INVALID_PARAMETER_11 +# define STATUS_INVALID_PARAMETER_11 ((NTSTATUS) 0xC00000F9L) +#endif + +#ifndef STATUS_INVALID_PARAMETER_12 +# define STATUS_INVALID_PARAMETER_12 ((NTSTATUS) 0xC00000FAL) +#endif + +#ifndef STATUS_REDIRECTOR_NOT_STARTED +# define STATUS_REDIRECTOR_NOT_STARTED ((NTSTATUS) 0xC00000FBL) +#endif + +#ifndef STATUS_REDIRECTOR_STARTED +# define STATUS_REDIRECTOR_STARTED ((NTSTATUS) 0xC00000FCL) +#endif + +#ifndef STATUS_STACK_OVERFLOW +# define STATUS_STACK_OVERFLOW ((NTSTATUS) 0xC00000FDL) +#endif + +#ifndef STATUS_NO_SUCH_PACKAGE +# define STATUS_NO_SUCH_PACKAGE ((NTSTATUS) 0xC00000FEL) +#endif + +#ifndef STATUS_BAD_FUNCTION_TABLE +# define STATUS_BAD_FUNCTION_TABLE ((NTSTATUS) 0xC00000FFL) +#endif + +#ifndef STATUS_VARIABLE_NOT_FOUND +# define STATUS_VARIABLE_NOT_FOUND ((NTSTATUS) 0xC0000100L) +#endif + +#ifndef STATUS_DIRECTORY_NOT_EMPTY +# define STATUS_DIRECTORY_NOT_EMPTY ((NTSTATUS) 0xC0000101L) +#endif + +#ifndef STATUS_FILE_CORRUPT_ERROR +# define STATUS_FILE_CORRUPT_ERROR ((NTSTATUS) 0xC0000102L) +#endif + +#ifndef STATUS_NOT_A_DIRECTORY +# define STATUS_NOT_A_DIRECTORY ((NTSTATUS) 0xC0000103L) +#endif + +#ifndef STATUS_BAD_LOGON_SESSION_STATE +# define STATUS_BAD_LOGON_SESSION_STATE ((NTSTATUS) 0xC0000104L) +#endif + +#ifndef STATUS_LOGON_SESSION_COLLISION +# define STATUS_LOGON_SESSION_COLLISION ((NTSTATUS) 0xC0000105L) +#endif + +#ifndef STATUS_NAME_TOO_LONG +# define STATUS_NAME_TOO_LONG ((NTSTATUS) 0xC0000106L) +#endif + +#ifndef STATUS_FILES_OPEN +# define STATUS_FILES_OPEN ((NTSTATUS) 0xC0000107L) +#endif + +#ifndef STATUS_CONNECTION_IN_USE +# define STATUS_CONNECTION_IN_USE ((NTSTATUS) 0xC0000108L) +#endif + +#ifndef STATUS_MESSAGE_NOT_FOUND +# define STATUS_MESSAGE_NOT_FOUND ((NTSTATUS) 0xC0000109L) +#endif + +#ifndef STATUS_PROCESS_IS_TERMINATING +# define STATUS_PROCESS_IS_TERMINATING ((NTSTATUS) 0xC000010AL) +#endif + +#ifndef STATUS_INVALID_LOGON_TYPE +# define STATUS_INVALID_LOGON_TYPE ((NTSTATUS) 0xC000010BL) +#endif + +#ifndef STATUS_NO_GUID_TRANSLATION +# define STATUS_NO_GUID_TRANSLATION ((NTSTATUS) 0xC000010CL) +#endif + +#ifndef STATUS_CANNOT_IMPERSONATE +# define STATUS_CANNOT_IMPERSONATE ((NTSTATUS) 0xC000010DL) +#endif + +#ifndef STATUS_IMAGE_ALREADY_LOADED +# define STATUS_IMAGE_ALREADY_LOADED ((NTSTATUS) 0xC000010EL) +#endif + +#ifndef STATUS_ABIOS_NOT_PRESENT +# define STATUS_ABIOS_NOT_PRESENT ((NTSTATUS) 0xC000010FL) +#endif + +#ifndef STATUS_ABIOS_LID_NOT_EXIST +# define STATUS_ABIOS_LID_NOT_EXIST ((NTSTATUS) 0xC0000110L) +#endif + +#ifndef STATUS_ABIOS_LID_ALREADY_OWNED +# define STATUS_ABIOS_LID_ALREADY_OWNED ((NTSTATUS) 0xC0000111L) +#endif + +#ifndef STATUS_ABIOS_NOT_LID_OWNER +# define STATUS_ABIOS_NOT_LID_OWNER ((NTSTATUS) 0xC0000112L) +#endif + +#ifndef STATUS_ABIOS_INVALID_COMMAND +# define STATUS_ABIOS_INVALID_COMMAND ((NTSTATUS) 0xC0000113L) +#endif + +#ifndef STATUS_ABIOS_INVALID_LID +# define STATUS_ABIOS_INVALID_LID ((NTSTATUS) 0xC0000114L) +#endif + +#ifndef STATUS_ABIOS_SELECTOR_NOT_AVAILABLE +# define STATUS_ABIOS_SELECTOR_NOT_AVAILABLE ((NTSTATUS) 0xC0000115L) +#endif + +#ifndef STATUS_ABIOS_INVALID_SELECTOR +# define STATUS_ABIOS_INVALID_SELECTOR ((NTSTATUS) 0xC0000116L) +#endif + +#ifndef STATUS_NO_LDT +# define STATUS_NO_LDT ((NTSTATUS) 0xC0000117L) +#endif + +#ifndef STATUS_INVALID_LDT_SIZE +# define STATUS_INVALID_LDT_SIZE ((NTSTATUS) 0xC0000118L) +#endif + +#ifndef STATUS_INVALID_LDT_OFFSET +# define STATUS_INVALID_LDT_OFFSET ((NTSTATUS) 0xC0000119L) +#endif + +#ifndef STATUS_INVALID_LDT_DESCRIPTOR +# define STATUS_INVALID_LDT_DESCRIPTOR ((NTSTATUS) 0xC000011AL) +#endif + +#ifndef STATUS_INVALID_IMAGE_NE_FORMAT +# define STATUS_INVALID_IMAGE_NE_FORMAT ((NTSTATUS) 0xC000011BL) +#endif + +#ifndef STATUS_RXACT_INVALID_STATE +# define STATUS_RXACT_INVALID_STATE ((NTSTATUS) 0xC000011CL) +#endif + +#ifndef STATUS_RXACT_COMMIT_FAILURE +# define STATUS_RXACT_COMMIT_FAILURE ((NTSTATUS) 0xC000011DL) +#endif + +#ifndef STATUS_MAPPED_FILE_SIZE_ZERO +# define STATUS_MAPPED_FILE_SIZE_ZERO ((NTSTATUS) 0xC000011EL) +#endif + +#ifndef STATUS_TOO_MANY_OPENED_FILES +# define STATUS_TOO_MANY_OPENED_FILES ((NTSTATUS) 0xC000011FL) +#endif + +#ifndef STATUS_CANCELLED +# define STATUS_CANCELLED ((NTSTATUS) 0xC0000120L) +#endif + +#ifndef STATUS_CANNOT_DELETE +# define STATUS_CANNOT_DELETE ((NTSTATUS) 0xC0000121L) +#endif + +#ifndef STATUS_INVALID_COMPUTER_NAME +# define STATUS_INVALID_COMPUTER_NAME ((NTSTATUS) 0xC0000122L) +#endif + +#ifndef STATUS_FILE_DELETED +# define STATUS_FILE_DELETED ((NTSTATUS) 0xC0000123L) +#endif + +#ifndef STATUS_SPECIAL_ACCOUNT +# define STATUS_SPECIAL_ACCOUNT ((NTSTATUS) 0xC0000124L) +#endif + +#ifndef STATUS_SPECIAL_GROUP +# define STATUS_SPECIAL_GROUP ((NTSTATUS) 0xC0000125L) +#endif + +#ifndef STATUS_SPECIAL_USER +# define STATUS_SPECIAL_USER ((NTSTATUS) 0xC0000126L) +#endif + +#ifndef STATUS_MEMBERS_PRIMARY_GROUP +# define STATUS_MEMBERS_PRIMARY_GROUP ((NTSTATUS) 0xC0000127L) +#endif + +#ifndef STATUS_FILE_CLOSED +# define STATUS_FILE_CLOSED ((NTSTATUS) 0xC0000128L) +#endif + +#ifndef STATUS_TOO_MANY_THREADS +# define STATUS_TOO_MANY_THREADS ((NTSTATUS) 0xC0000129L) +#endif + +#ifndef STATUS_THREAD_NOT_IN_PROCESS +# define STATUS_THREAD_NOT_IN_PROCESS ((NTSTATUS) 0xC000012AL) +#endif + +#ifndef STATUS_TOKEN_ALREADY_IN_USE +# define STATUS_TOKEN_ALREADY_IN_USE ((NTSTATUS) 0xC000012BL) +#endif + +#ifndef STATUS_PAGEFILE_QUOTA_EXCEEDED +# define STATUS_PAGEFILE_QUOTA_EXCEEDED ((NTSTATUS) 0xC000012CL) +#endif + +#ifndef STATUS_COMMITMENT_LIMIT +# define STATUS_COMMITMENT_LIMIT ((NTSTATUS) 0xC000012DL) +#endif + +#ifndef STATUS_INVALID_IMAGE_LE_FORMAT +# define STATUS_INVALID_IMAGE_LE_FORMAT ((NTSTATUS) 0xC000012EL) +#endif + +#ifndef STATUS_INVALID_IMAGE_NOT_MZ +# define STATUS_INVALID_IMAGE_NOT_MZ ((NTSTATUS) 0xC000012FL) +#endif + +#ifndef STATUS_INVALID_IMAGE_PROTECT +# define STATUS_INVALID_IMAGE_PROTECT ((NTSTATUS) 0xC0000130L) +#endif + +#ifndef STATUS_INVALID_IMAGE_WIN_16 +# define STATUS_INVALID_IMAGE_WIN_16 ((NTSTATUS) 0xC0000131L) +#endif + +#ifndef STATUS_LOGON_SERVER_CONFLICT +# define STATUS_LOGON_SERVER_CONFLICT ((NTSTATUS) 0xC0000132L) +#endif + +#ifndef STATUS_TIME_DIFFERENCE_AT_DC +# define STATUS_TIME_DIFFERENCE_AT_DC ((NTSTATUS) 0xC0000133L) +#endif + +#ifndef STATUS_SYNCHRONIZATION_REQUIRED +# define STATUS_SYNCHRONIZATION_REQUIRED ((NTSTATUS) 0xC0000134L) +#endif + +#ifndef STATUS_DLL_NOT_FOUND +# define STATUS_DLL_NOT_FOUND ((NTSTATUS) 0xC0000135L) +#endif + +#ifndef STATUS_OPEN_FAILED +# define STATUS_OPEN_FAILED ((NTSTATUS) 0xC0000136L) +#endif + +#ifndef STATUS_IO_PRIVILEGE_FAILED +# define STATUS_IO_PRIVILEGE_FAILED ((NTSTATUS) 0xC0000137L) +#endif + +#ifndef STATUS_ORDINAL_NOT_FOUND +# define STATUS_ORDINAL_NOT_FOUND ((NTSTATUS) 0xC0000138L) +#endif + +#ifndef STATUS_ENTRYPOINT_NOT_FOUND +# define STATUS_ENTRYPOINT_NOT_FOUND ((NTSTATUS) 0xC0000139L) +#endif + +#ifndef STATUS_CONTROL_C_EXIT +# define STATUS_CONTROL_C_EXIT ((NTSTATUS) 0xC000013AL) +#endif + +#ifndef STATUS_LOCAL_DISCONNECT +# define STATUS_LOCAL_DISCONNECT ((NTSTATUS) 0xC000013BL) +#endif + +#ifndef STATUS_REMOTE_DISCONNECT +# define STATUS_REMOTE_DISCONNECT ((NTSTATUS) 0xC000013CL) +#endif + +#ifndef STATUS_REMOTE_RESOURCES +# define STATUS_REMOTE_RESOURCES ((NTSTATUS) 0xC000013DL) +#endif + +#ifndef STATUS_LINK_FAILED +# define STATUS_LINK_FAILED ((NTSTATUS) 0xC000013EL) +#endif + +#ifndef STATUS_LINK_TIMEOUT +# define STATUS_LINK_TIMEOUT ((NTSTATUS) 0xC000013FL) +#endif + +#ifndef STATUS_INVALID_CONNECTION +# define STATUS_INVALID_CONNECTION ((NTSTATUS) 0xC0000140L) +#endif + +#ifndef STATUS_INVALID_ADDRESS +# define STATUS_INVALID_ADDRESS ((NTSTATUS) 0xC0000141L) +#endif + +#ifndef STATUS_DLL_INIT_FAILED +# define STATUS_DLL_INIT_FAILED ((NTSTATUS) 0xC0000142L) +#endif + +#ifndef STATUS_MISSING_SYSTEMFILE +# define STATUS_MISSING_SYSTEMFILE ((NTSTATUS) 0xC0000143L) +#endif + +#ifndef STATUS_UNHANDLED_EXCEPTION +# define STATUS_UNHANDLED_EXCEPTION ((NTSTATUS) 0xC0000144L) +#endif + +#ifndef STATUS_APP_INIT_FAILURE +# define STATUS_APP_INIT_FAILURE ((NTSTATUS) 0xC0000145L) +#endif + +#ifndef STATUS_PAGEFILE_CREATE_FAILED +# define STATUS_PAGEFILE_CREATE_FAILED ((NTSTATUS) 0xC0000146L) +#endif + +#ifndef STATUS_NO_PAGEFILE +# define STATUS_NO_PAGEFILE ((NTSTATUS) 0xC0000147L) +#endif + +#ifndef STATUS_INVALID_LEVEL +# define STATUS_INVALID_LEVEL ((NTSTATUS) 0xC0000148L) +#endif + +#ifndef STATUS_WRONG_PASSWORD_CORE +# define STATUS_WRONG_PASSWORD_CORE ((NTSTATUS) 0xC0000149L) +#endif + +#ifndef STATUS_ILLEGAL_FLOAT_CONTEXT +# define STATUS_ILLEGAL_FLOAT_CONTEXT ((NTSTATUS) 0xC000014AL) +#endif + +#ifndef STATUS_PIPE_BROKEN +# define STATUS_PIPE_BROKEN ((NTSTATUS) 0xC000014BL) +#endif + +#ifndef STATUS_REGISTRY_CORRUPT +# define STATUS_REGISTRY_CORRUPT ((NTSTATUS) 0xC000014CL) +#endif + +#ifndef STATUS_REGISTRY_IO_FAILED +# define STATUS_REGISTRY_IO_FAILED ((NTSTATUS) 0xC000014DL) +#endif + +#ifndef STATUS_NO_EVENT_PAIR +# define STATUS_NO_EVENT_PAIR ((NTSTATUS) 0xC000014EL) +#endif + +#ifndef STATUS_UNRECOGNIZED_VOLUME +# define STATUS_UNRECOGNIZED_VOLUME ((NTSTATUS) 0xC000014FL) +#endif + +#ifndef STATUS_SERIAL_NO_DEVICE_INITED +# define STATUS_SERIAL_NO_DEVICE_INITED ((NTSTATUS) 0xC0000150L) +#endif + +#ifndef STATUS_NO_SUCH_ALIAS +# define STATUS_NO_SUCH_ALIAS ((NTSTATUS) 0xC0000151L) +#endif + +#ifndef STATUS_MEMBER_NOT_IN_ALIAS +# define STATUS_MEMBER_NOT_IN_ALIAS ((NTSTATUS) 0xC0000152L) +#endif + +#ifndef STATUS_MEMBER_IN_ALIAS +# define STATUS_MEMBER_IN_ALIAS ((NTSTATUS) 0xC0000153L) +#endif + +#ifndef STATUS_ALIAS_EXISTS +# define STATUS_ALIAS_EXISTS ((NTSTATUS) 0xC0000154L) +#endif + +#ifndef STATUS_LOGON_NOT_GRANTED +# define STATUS_LOGON_NOT_GRANTED ((NTSTATUS) 0xC0000155L) +#endif + +#ifndef STATUS_TOO_MANY_SECRETS +# define STATUS_TOO_MANY_SECRETS ((NTSTATUS) 0xC0000156L) +#endif + +#ifndef STATUS_SECRET_TOO_LONG +# define STATUS_SECRET_TOO_LONG ((NTSTATUS) 0xC0000157L) +#endif + +#ifndef STATUS_INTERNAL_DB_ERROR +# define STATUS_INTERNAL_DB_ERROR ((NTSTATUS) 0xC0000158L) +#endif + +#ifndef STATUS_FULLSCREEN_MODE +# define STATUS_FULLSCREEN_MODE ((NTSTATUS) 0xC0000159L) +#endif + +#ifndef STATUS_TOO_MANY_CONTEXT_IDS +# define STATUS_TOO_MANY_CONTEXT_IDS ((NTSTATUS) 0xC000015AL) +#endif + +#ifndef STATUS_LOGON_TYPE_NOT_GRANTED +# define STATUS_LOGON_TYPE_NOT_GRANTED ((NTSTATUS) 0xC000015BL) +#endif + +#ifndef STATUS_NOT_REGISTRY_FILE +# define STATUS_NOT_REGISTRY_FILE ((NTSTATUS) 0xC000015CL) +#endif + +#ifndef STATUS_NT_CROSS_ENCRYPTION_REQUIRED +# define STATUS_NT_CROSS_ENCRYPTION_REQUIRED ((NTSTATUS) 0xC000015DL) +#endif + +#ifndef STATUS_DOMAIN_CTRLR_CONFIG_ERROR +# define STATUS_DOMAIN_CTRLR_CONFIG_ERROR ((NTSTATUS) 0xC000015EL) +#endif + +#ifndef STATUS_FT_MISSING_MEMBER +# define STATUS_FT_MISSING_MEMBER ((NTSTATUS) 0xC000015FL) +#endif + +#ifndef STATUS_ILL_FORMED_SERVICE_ENTRY +# define STATUS_ILL_FORMED_SERVICE_ENTRY ((NTSTATUS) 0xC0000160L) +#endif + +#ifndef STATUS_ILLEGAL_CHARACTER +# define STATUS_ILLEGAL_CHARACTER ((NTSTATUS) 0xC0000161L) +#endif + +#ifndef STATUS_UNMAPPABLE_CHARACTER +# define STATUS_UNMAPPABLE_CHARACTER ((NTSTATUS) 0xC0000162L) +#endif + +#ifndef STATUS_UNDEFINED_CHARACTER +# define STATUS_UNDEFINED_CHARACTER ((NTSTATUS) 0xC0000163L) +#endif + +#ifndef STATUS_FLOPPY_VOLUME +# define STATUS_FLOPPY_VOLUME ((NTSTATUS) 0xC0000164L) +#endif + +#ifndef STATUS_FLOPPY_ID_MARK_NOT_FOUND +# define STATUS_FLOPPY_ID_MARK_NOT_FOUND ((NTSTATUS) 0xC0000165L) +#endif + +#ifndef STATUS_FLOPPY_WRONG_CYLINDER +# define STATUS_FLOPPY_WRONG_CYLINDER ((NTSTATUS) 0xC0000166L) +#endif + +#ifndef STATUS_FLOPPY_UNKNOWN_ERROR +# define STATUS_FLOPPY_UNKNOWN_ERROR ((NTSTATUS) 0xC0000167L) +#endif + +#ifndef STATUS_FLOPPY_BAD_REGISTERS +# define STATUS_FLOPPY_BAD_REGISTERS ((NTSTATUS) 0xC0000168L) +#endif + +#ifndef STATUS_DISK_RECALIBRATE_FAILED +# define STATUS_DISK_RECALIBRATE_FAILED ((NTSTATUS) 0xC0000169L) +#endif + +#ifndef STATUS_DISK_OPERATION_FAILED +# define STATUS_DISK_OPERATION_FAILED ((NTSTATUS) 0xC000016AL) +#endif + +#ifndef STATUS_DISK_RESET_FAILED +# define STATUS_DISK_RESET_FAILED ((NTSTATUS) 0xC000016BL) +#endif + +#ifndef STATUS_SHARED_IRQ_BUSY +# define STATUS_SHARED_IRQ_BUSY ((NTSTATUS) 0xC000016CL) +#endif + +#ifndef STATUS_FT_ORPHANING +# define STATUS_FT_ORPHANING ((NTSTATUS) 0xC000016DL) +#endif + +#ifndef STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT +# define STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT ((NTSTATUS) 0xC000016EL) +#endif + +#ifndef STATUS_PARTITION_FAILURE +# define STATUS_PARTITION_FAILURE ((NTSTATUS) 0xC0000172L) +#endif + +#ifndef STATUS_INVALID_BLOCK_LENGTH +# define STATUS_INVALID_BLOCK_LENGTH ((NTSTATUS) 0xC0000173L) +#endif + +#ifndef STATUS_DEVICE_NOT_PARTITIONED +# define STATUS_DEVICE_NOT_PARTITIONED ((NTSTATUS) 0xC0000174L) +#endif + +#ifndef STATUS_UNABLE_TO_LOCK_MEDIA +# define STATUS_UNABLE_TO_LOCK_MEDIA ((NTSTATUS) 0xC0000175L) +#endif + +#ifndef STATUS_UNABLE_TO_UNLOAD_MEDIA +# define STATUS_UNABLE_TO_UNLOAD_MEDIA ((NTSTATUS) 0xC0000176L) +#endif + +#ifndef STATUS_EOM_OVERFLOW +# define STATUS_EOM_OVERFLOW ((NTSTATUS) 0xC0000177L) +#endif + +#ifndef STATUS_NO_MEDIA +# define STATUS_NO_MEDIA ((NTSTATUS) 0xC0000178L) +#endif + +#ifndef STATUS_NO_SUCH_MEMBER +# define STATUS_NO_SUCH_MEMBER ((NTSTATUS) 0xC000017AL) +#endif + +#ifndef STATUS_INVALID_MEMBER +# define STATUS_INVALID_MEMBER ((NTSTATUS) 0xC000017BL) +#endif + +#ifndef STATUS_KEY_DELETED +# define STATUS_KEY_DELETED ((NTSTATUS) 0xC000017CL) +#endif + +#ifndef STATUS_NO_LOG_SPACE +# define STATUS_NO_LOG_SPACE ((NTSTATUS) 0xC000017DL) +#endif + +#ifndef STATUS_TOO_MANY_SIDS +# define STATUS_TOO_MANY_SIDS ((NTSTATUS) 0xC000017EL) +#endif + +#ifndef STATUS_LM_CROSS_ENCRYPTION_REQUIRED +# define STATUS_LM_CROSS_ENCRYPTION_REQUIRED ((NTSTATUS) 0xC000017FL) +#endif + +#ifndef STATUS_KEY_HAS_CHILDREN +# define STATUS_KEY_HAS_CHILDREN ((NTSTATUS) 0xC0000180L) +#endif + +#ifndef STATUS_CHILD_MUST_BE_VOLATILE +# define STATUS_CHILD_MUST_BE_VOLATILE ((NTSTATUS) 0xC0000181L) +#endif + +#ifndef STATUS_DEVICE_CONFIGURATION_ERROR +# define STATUS_DEVICE_CONFIGURATION_ERROR ((NTSTATUS) 0xC0000182L) +#endif + +#ifndef STATUS_DRIVER_INTERNAL_ERROR +# define STATUS_DRIVER_INTERNAL_ERROR ((NTSTATUS) 0xC0000183L) +#endif + +#ifndef STATUS_INVALID_DEVICE_STATE +# define STATUS_INVALID_DEVICE_STATE ((NTSTATUS) 0xC0000184L) +#endif + +#ifndef STATUS_IO_DEVICE_ERROR +# define STATUS_IO_DEVICE_ERROR ((NTSTATUS) 0xC0000185L) +#endif + +#ifndef STATUS_DEVICE_PROTOCOL_ERROR +# define STATUS_DEVICE_PROTOCOL_ERROR ((NTSTATUS) 0xC0000186L) +#endif + +#ifndef STATUS_BACKUP_CONTROLLER +# define STATUS_BACKUP_CONTROLLER ((NTSTATUS) 0xC0000187L) +#endif + +#ifndef STATUS_LOG_FILE_FULL +# define STATUS_LOG_FILE_FULL ((NTSTATUS) 0xC0000188L) +#endif + +#ifndef STATUS_TOO_LATE +# define STATUS_TOO_LATE ((NTSTATUS) 0xC0000189L) +#endif + +#ifndef STATUS_NO_TRUST_LSA_SECRET +# define STATUS_NO_TRUST_LSA_SECRET ((NTSTATUS) 0xC000018AL) +#endif + +#ifndef STATUS_NO_TRUST_SAM_ACCOUNT +# define STATUS_NO_TRUST_SAM_ACCOUNT ((NTSTATUS) 0xC000018BL) +#endif + +#ifndef STATUS_TRUSTED_DOMAIN_FAILURE +# define STATUS_TRUSTED_DOMAIN_FAILURE ((NTSTATUS) 0xC000018CL) +#endif + +#ifndef STATUS_TRUSTED_RELATIONSHIP_FAILURE +# define STATUS_TRUSTED_RELATIONSHIP_FAILURE ((NTSTATUS) 0xC000018DL) +#endif + +#ifndef STATUS_EVENTLOG_FILE_CORRUPT +# define STATUS_EVENTLOG_FILE_CORRUPT ((NTSTATUS) 0xC000018EL) +#endif + +#ifndef STATUS_EVENTLOG_CANT_START +# define STATUS_EVENTLOG_CANT_START ((NTSTATUS) 0xC000018FL) +#endif + +#ifndef STATUS_TRUST_FAILURE +# define STATUS_TRUST_FAILURE ((NTSTATUS) 0xC0000190L) +#endif + +#ifndef STATUS_MUTANT_LIMIT_EXCEEDED +# define STATUS_MUTANT_LIMIT_EXCEEDED ((NTSTATUS) 0xC0000191L) +#endif + +#ifndef STATUS_NETLOGON_NOT_STARTED +# define STATUS_NETLOGON_NOT_STARTED ((NTSTATUS) 0xC0000192L) +#endif + +#ifndef STATUS_ACCOUNT_EXPIRED +# define STATUS_ACCOUNT_EXPIRED ((NTSTATUS) 0xC0000193L) +#endif + +#ifndef STATUS_POSSIBLE_DEADLOCK +# define STATUS_POSSIBLE_DEADLOCK ((NTSTATUS) 0xC0000194L) +#endif + +#ifndef STATUS_NETWORK_CREDENTIAL_CONFLICT +# define STATUS_NETWORK_CREDENTIAL_CONFLICT ((NTSTATUS) 0xC0000195L) +#endif + +#ifndef STATUS_REMOTE_SESSION_LIMIT +# define STATUS_REMOTE_SESSION_LIMIT ((NTSTATUS) 0xC0000196L) +#endif + +#ifndef STATUS_EVENTLOG_FILE_CHANGED +# define STATUS_EVENTLOG_FILE_CHANGED ((NTSTATUS) 0xC0000197L) +#endif + +#ifndef STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT +# define STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT ((NTSTATUS) 0xC0000198L) +#endif + +#ifndef STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT +# define STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT ((NTSTATUS) 0xC0000199L) +#endif + +#ifndef STATUS_NOLOGON_SERVER_TRUST_ACCOUNT +# define STATUS_NOLOGON_SERVER_TRUST_ACCOUNT ((NTSTATUS) 0xC000019AL) +#endif + +#ifndef STATUS_DOMAIN_TRUST_INCONSISTENT +# define STATUS_DOMAIN_TRUST_INCONSISTENT ((NTSTATUS) 0xC000019BL) +#endif + +#ifndef STATUS_FS_DRIVER_REQUIRED +# define STATUS_FS_DRIVER_REQUIRED ((NTSTATUS) 0xC000019CL) +#endif + +#ifndef STATUS_IMAGE_ALREADY_LOADED_AS_DLL +# define STATUS_IMAGE_ALREADY_LOADED_AS_DLL ((NTSTATUS) 0xC000019DL) +#endif + +#ifndef STATUS_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING +# define STATUS_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING ((NTSTATUS) 0xC000019EL) +#endif + +#ifndef STATUS_SHORT_NAMES_NOT_ENABLED_ON_VOLUME +# define STATUS_SHORT_NAMES_NOT_ENABLED_ON_VOLUME ((NTSTATUS) 0xC000019FL) +#endif + +#ifndef STATUS_SECURITY_STREAM_IS_INCONSISTENT +# define STATUS_SECURITY_STREAM_IS_INCONSISTENT ((NTSTATUS) 0xC00001A0L) +#endif + +#ifndef STATUS_INVALID_LOCK_RANGE +# define STATUS_INVALID_LOCK_RANGE ((NTSTATUS) 0xC00001A1L) +#endif + +#ifndef STATUS_INVALID_ACE_CONDITION +# define STATUS_INVALID_ACE_CONDITION ((NTSTATUS) 0xC00001A2L) +#endif + +#ifndef STATUS_IMAGE_SUBSYSTEM_NOT_PRESENT +# define STATUS_IMAGE_SUBSYSTEM_NOT_PRESENT ((NTSTATUS) 0xC00001A3L) +#endif + +#ifndef STATUS_NOTIFICATION_GUID_ALREADY_DEFINED +# define STATUS_NOTIFICATION_GUID_ALREADY_DEFINED ((NTSTATUS) 0xC00001A4L) +#endif + +#ifndef STATUS_NETWORK_OPEN_RESTRICTION +# define STATUS_NETWORK_OPEN_RESTRICTION ((NTSTATUS) 0xC0000201L) +#endif + +#ifndef STATUS_NO_USER_SESSION_KEY +# define STATUS_NO_USER_SESSION_KEY ((NTSTATUS) 0xC0000202L) +#endif + +#ifndef STATUS_USER_SESSION_DELETED +# define STATUS_USER_SESSION_DELETED ((NTSTATUS) 0xC0000203L) +#endif + +#ifndef STATUS_RESOURCE_LANG_NOT_FOUND +# define STATUS_RESOURCE_LANG_NOT_FOUND ((NTSTATUS) 0xC0000204L) +#endif + +#ifndef STATUS_INSUFF_SERVER_RESOURCES +# define STATUS_INSUFF_SERVER_RESOURCES ((NTSTATUS) 0xC0000205L) +#endif + +#ifndef STATUS_INVALID_BUFFER_SIZE +# define STATUS_INVALID_BUFFER_SIZE ((NTSTATUS) 0xC0000206L) +#endif + +#ifndef STATUS_INVALID_ADDRESS_COMPONENT +# define STATUS_INVALID_ADDRESS_COMPONENT ((NTSTATUS) 0xC0000207L) +#endif + +#ifndef STATUS_INVALID_ADDRESS_WILDCARD +# define STATUS_INVALID_ADDRESS_WILDCARD ((NTSTATUS) 0xC0000208L) +#endif + +#ifndef STATUS_TOO_MANY_ADDRESSES +# define STATUS_TOO_MANY_ADDRESSES ((NTSTATUS) 0xC0000209L) +#endif + +#ifndef STATUS_ADDRESS_ALREADY_EXISTS +# define STATUS_ADDRESS_ALREADY_EXISTS ((NTSTATUS) 0xC000020AL) +#endif + +#ifndef STATUS_ADDRESS_CLOSED +# define STATUS_ADDRESS_CLOSED ((NTSTATUS) 0xC000020BL) +#endif + +#ifndef STATUS_CONNECTION_DISCONNECTED +# define STATUS_CONNECTION_DISCONNECTED ((NTSTATUS) 0xC000020CL) +#endif + +#ifndef STATUS_CONNECTION_RESET +# define STATUS_CONNECTION_RESET ((NTSTATUS) 0xC000020DL) +#endif + +#ifndef STATUS_TOO_MANY_NODES +# define STATUS_TOO_MANY_NODES ((NTSTATUS) 0xC000020EL) +#endif + +#ifndef STATUS_TRANSACTION_ABORTED +# define STATUS_TRANSACTION_ABORTED ((NTSTATUS) 0xC000020FL) +#endif + +#ifndef STATUS_TRANSACTION_TIMED_OUT +# define STATUS_TRANSACTION_TIMED_OUT ((NTSTATUS) 0xC0000210L) +#endif + +#ifndef STATUS_TRANSACTION_NO_RELEASE +# define STATUS_TRANSACTION_NO_RELEASE ((NTSTATUS) 0xC0000211L) +#endif + +#ifndef STATUS_TRANSACTION_NO_MATCH +# define STATUS_TRANSACTION_NO_MATCH ((NTSTATUS) 0xC0000212L) +#endif + +#ifndef STATUS_TRANSACTION_RESPONDED +# define STATUS_TRANSACTION_RESPONDED ((NTSTATUS) 0xC0000213L) +#endif + +#ifndef STATUS_TRANSACTION_INVALID_ID +# define STATUS_TRANSACTION_INVALID_ID ((NTSTATUS) 0xC0000214L) +#endif + +#ifndef STATUS_TRANSACTION_INVALID_TYPE +# define STATUS_TRANSACTION_INVALID_TYPE ((NTSTATUS) 0xC0000215L) +#endif + +#ifndef STATUS_NOT_SERVER_SESSION +# define STATUS_NOT_SERVER_SESSION ((NTSTATUS) 0xC0000216L) +#endif + +#ifndef STATUS_NOT_CLIENT_SESSION +# define STATUS_NOT_CLIENT_SESSION ((NTSTATUS) 0xC0000217L) +#endif + +#ifndef STATUS_CANNOT_LOAD_REGISTRY_FILE +# define STATUS_CANNOT_LOAD_REGISTRY_FILE ((NTSTATUS) 0xC0000218L) +#endif + +#ifndef STATUS_DEBUG_ATTACH_FAILED +# define STATUS_DEBUG_ATTACH_FAILED ((NTSTATUS) 0xC0000219L) +#endif + +#ifndef STATUS_SYSTEM_PROCESS_TERMINATED +# define STATUS_SYSTEM_PROCESS_TERMINATED ((NTSTATUS) 0xC000021AL) +#endif + +#ifndef STATUS_DATA_NOT_ACCEPTED +# define STATUS_DATA_NOT_ACCEPTED ((NTSTATUS) 0xC000021BL) +#endif + +#ifndef STATUS_NO_BROWSER_SERVERS_FOUND +# define STATUS_NO_BROWSER_SERVERS_FOUND ((NTSTATUS) 0xC000021CL) +#endif + +#ifndef STATUS_VDM_HARD_ERROR +# define STATUS_VDM_HARD_ERROR ((NTSTATUS) 0xC000021DL) +#endif + +#ifndef STATUS_DRIVER_CANCEL_TIMEOUT +# define STATUS_DRIVER_CANCEL_TIMEOUT ((NTSTATUS) 0xC000021EL) +#endif + +#ifndef STATUS_REPLY_MESSAGE_MISMATCH +# define STATUS_REPLY_MESSAGE_MISMATCH ((NTSTATUS) 0xC000021FL) +#endif + +#ifndef STATUS_MAPPED_ALIGNMENT +# define STATUS_MAPPED_ALIGNMENT ((NTSTATUS) 0xC0000220L) +#endif + +#ifndef STATUS_IMAGE_CHECKSUM_MISMATCH +# define STATUS_IMAGE_CHECKSUM_MISMATCH ((NTSTATUS) 0xC0000221L) +#endif + +#ifndef STATUS_LOST_WRITEBEHIND_DATA +# define STATUS_LOST_WRITEBEHIND_DATA ((NTSTATUS) 0xC0000222L) +#endif + +#ifndef STATUS_CLIENT_SERVER_PARAMETERS_INVALID +# define STATUS_CLIENT_SERVER_PARAMETERS_INVALID ((NTSTATUS) 0xC0000223L) +#endif + +#ifndef STATUS_PASSWORD_MUST_CHANGE +# define STATUS_PASSWORD_MUST_CHANGE ((NTSTATUS) 0xC0000224L) +#endif + +#ifndef STATUS_NOT_FOUND +# define STATUS_NOT_FOUND ((NTSTATUS) 0xC0000225L) +#endif + +#ifndef STATUS_NOT_TINY_STREAM +# define STATUS_NOT_TINY_STREAM ((NTSTATUS) 0xC0000226L) +#endif + +#ifndef STATUS_RECOVERY_FAILURE +# define STATUS_RECOVERY_FAILURE ((NTSTATUS) 0xC0000227L) +#endif + +#ifndef STATUS_STACK_OVERFLOW_READ +# define STATUS_STACK_OVERFLOW_READ ((NTSTATUS) 0xC0000228L) +#endif + +#ifndef STATUS_FAIL_CHECK +# define STATUS_FAIL_CHECK ((NTSTATUS) 0xC0000229L) +#endif + +#ifndef STATUS_DUPLICATE_OBJECTID +# define STATUS_DUPLICATE_OBJECTID ((NTSTATUS) 0xC000022AL) +#endif + +#ifndef STATUS_OBJECTID_EXISTS +# define STATUS_OBJECTID_EXISTS ((NTSTATUS) 0xC000022BL) +#endif + +#ifndef STATUS_CONVERT_TO_LARGE +# define STATUS_CONVERT_TO_LARGE ((NTSTATUS) 0xC000022CL) +#endif + +#ifndef STATUS_RETRY +# define STATUS_RETRY ((NTSTATUS) 0xC000022DL) +#endif + +#ifndef STATUS_FOUND_OUT_OF_SCOPE +# define STATUS_FOUND_OUT_OF_SCOPE ((NTSTATUS) 0xC000022EL) +#endif + +#ifndef STATUS_ALLOCATE_BUCKET +# define STATUS_ALLOCATE_BUCKET ((NTSTATUS) 0xC000022FL) +#endif + +#ifndef STATUS_PROPSET_NOT_FOUND +# define STATUS_PROPSET_NOT_FOUND ((NTSTATUS) 0xC0000230L) +#endif + +#ifndef STATUS_MARSHALL_OVERFLOW +# define STATUS_MARSHALL_OVERFLOW ((NTSTATUS) 0xC0000231L) +#endif + +#ifndef STATUS_INVALID_VARIANT +# define STATUS_INVALID_VARIANT ((NTSTATUS) 0xC0000232L) +#endif + +#ifndef STATUS_DOMAIN_CONTROLLER_NOT_FOUND +# define STATUS_DOMAIN_CONTROLLER_NOT_FOUND ((NTSTATUS) 0xC0000233L) +#endif + +#ifndef STATUS_ACCOUNT_LOCKED_OUT +# define STATUS_ACCOUNT_LOCKED_OUT ((NTSTATUS) 0xC0000234L) +#endif + +#ifndef STATUS_HANDLE_NOT_CLOSABLE +# define STATUS_HANDLE_NOT_CLOSABLE ((NTSTATUS) 0xC0000235L) +#endif + +#ifndef STATUS_CONNECTION_REFUSED +# define STATUS_CONNECTION_REFUSED ((NTSTATUS) 0xC0000236L) +#endif + +#ifndef STATUS_GRACEFUL_DISCONNECT +# define STATUS_GRACEFUL_DISCONNECT ((NTSTATUS) 0xC0000237L) +#endif + +#ifndef STATUS_ADDRESS_ALREADY_ASSOCIATED +# define STATUS_ADDRESS_ALREADY_ASSOCIATED ((NTSTATUS) 0xC0000238L) +#endif + +#ifndef STATUS_ADDRESS_NOT_ASSOCIATED +# define STATUS_ADDRESS_NOT_ASSOCIATED ((NTSTATUS) 0xC0000239L) +#endif + +#ifndef STATUS_CONNECTION_INVALID +# define STATUS_CONNECTION_INVALID ((NTSTATUS) 0xC000023AL) +#endif + +#ifndef STATUS_CONNECTION_ACTIVE +# define STATUS_CONNECTION_ACTIVE ((NTSTATUS) 0xC000023BL) +#endif + +#ifndef STATUS_NETWORK_UNREACHABLE +# define STATUS_NETWORK_UNREACHABLE ((NTSTATUS) 0xC000023CL) +#endif + +#ifndef STATUS_HOST_UNREACHABLE +# define STATUS_HOST_UNREACHABLE ((NTSTATUS) 0xC000023DL) +#endif + +#ifndef STATUS_PROTOCOL_UNREACHABLE +# define STATUS_PROTOCOL_UNREACHABLE ((NTSTATUS) 0xC000023EL) +#endif + +#ifndef STATUS_PORT_UNREACHABLE +# define STATUS_PORT_UNREACHABLE ((NTSTATUS) 0xC000023FL) +#endif + +#ifndef STATUS_REQUEST_ABORTED +# define STATUS_REQUEST_ABORTED ((NTSTATUS) 0xC0000240L) +#endif + +#ifndef STATUS_CONNECTION_ABORTED +# define STATUS_CONNECTION_ABORTED ((NTSTATUS) 0xC0000241L) +#endif + +#ifndef STATUS_BAD_COMPRESSION_BUFFER +# define STATUS_BAD_COMPRESSION_BUFFER ((NTSTATUS) 0xC0000242L) +#endif + +#ifndef STATUS_USER_MAPPED_FILE +# define STATUS_USER_MAPPED_FILE ((NTSTATUS) 0xC0000243L) +#endif + +#ifndef STATUS_AUDIT_FAILED +# define STATUS_AUDIT_FAILED ((NTSTATUS) 0xC0000244L) +#endif + +#ifndef STATUS_TIMER_RESOLUTION_NOT_SET +# define STATUS_TIMER_RESOLUTION_NOT_SET ((NTSTATUS) 0xC0000245L) +#endif + +#ifndef STATUS_CONNECTION_COUNT_LIMIT +# define STATUS_CONNECTION_COUNT_LIMIT ((NTSTATUS) 0xC0000246L) +#endif + +#ifndef STATUS_LOGIN_TIME_RESTRICTION +# define STATUS_LOGIN_TIME_RESTRICTION ((NTSTATUS) 0xC0000247L) +#endif + +#ifndef STATUS_LOGIN_WKSTA_RESTRICTION +# define STATUS_LOGIN_WKSTA_RESTRICTION ((NTSTATUS) 0xC0000248L) +#endif + +#ifndef STATUS_IMAGE_MP_UP_MISMATCH +# define STATUS_IMAGE_MP_UP_MISMATCH ((NTSTATUS) 0xC0000249L) +#endif + +#ifndef STATUS_INSUFFICIENT_LOGON_INFO +# define STATUS_INSUFFICIENT_LOGON_INFO ((NTSTATUS) 0xC0000250L) +#endif + +#ifndef STATUS_BAD_DLL_ENTRYPOINT +# define STATUS_BAD_DLL_ENTRYPOINT ((NTSTATUS) 0xC0000251L) +#endif + +#ifndef STATUS_BAD_SERVICE_ENTRYPOINT +# define STATUS_BAD_SERVICE_ENTRYPOINT ((NTSTATUS) 0xC0000252L) +#endif + +#ifndef STATUS_LPC_REPLY_LOST +# define STATUS_LPC_REPLY_LOST ((NTSTATUS) 0xC0000253L) +#endif + +#ifndef STATUS_IP_ADDRESS_CONFLICT1 +# define STATUS_IP_ADDRESS_CONFLICT1 ((NTSTATUS) 0xC0000254L) +#endif + +#ifndef STATUS_IP_ADDRESS_CONFLICT2 +# define STATUS_IP_ADDRESS_CONFLICT2 ((NTSTATUS) 0xC0000255L) +#endif + +#ifndef STATUS_REGISTRY_QUOTA_LIMIT +# define STATUS_REGISTRY_QUOTA_LIMIT ((NTSTATUS) 0xC0000256L) +#endif + +#ifndef STATUS_PATH_NOT_COVERED +# define STATUS_PATH_NOT_COVERED ((NTSTATUS) 0xC0000257L) +#endif + +#ifndef STATUS_NO_CALLBACK_ACTIVE +# define STATUS_NO_CALLBACK_ACTIVE ((NTSTATUS) 0xC0000258L) +#endif + +#ifndef STATUS_LICENSE_QUOTA_EXCEEDED +# define STATUS_LICENSE_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000259L) +#endif + +#ifndef STATUS_PWD_TOO_SHORT +# define STATUS_PWD_TOO_SHORT ((NTSTATUS) 0xC000025AL) +#endif + +#ifndef STATUS_PWD_TOO_RECENT +# define STATUS_PWD_TOO_RECENT ((NTSTATUS) 0xC000025BL) +#endif + +#ifndef STATUS_PWD_HISTORY_CONFLICT +# define STATUS_PWD_HISTORY_CONFLICT ((NTSTATUS) 0xC000025CL) +#endif + +#ifndef STATUS_PLUGPLAY_NO_DEVICE +# define STATUS_PLUGPLAY_NO_DEVICE ((NTSTATUS) 0xC000025EL) +#endif + +#ifndef STATUS_UNSUPPORTED_COMPRESSION +# define STATUS_UNSUPPORTED_COMPRESSION ((NTSTATUS) 0xC000025FL) +#endif + +#ifndef STATUS_INVALID_HW_PROFILE +# define STATUS_INVALID_HW_PROFILE ((NTSTATUS) 0xC0000260L) +#endif + +#ifndef STATUS_INVALID_PLUGPLAY_DEVICE_PATH +# define STATUS_INVALID_PLUGPLAY_DEVICE_PATH ((NTSTATUS) 0xC0000261L) +#endif + +#ifndef STATUS_DRIVER_ORDINAL_NOT_FOUND +# define STATUS_DRIVER_ORDINAL_NOT_FOUND ((NTSTATUS) 0xC0000262L) +#endif + +#ifndef STATUS_DRIVER_ENTRYPOINT_NOT_FOUND +# define STATUS_DRIVER_ENTRYPOINT_NOT_FOUND ((NTSTATUS) 0xC0000263L) +#endif + +#ifndef STATUS_RESOURCE_NOT_OWNED +# define STATUS_RESOURCE_NOT_OWNED ((NTSTATUS) 0xC0000264L) +#endif + +#ifndef STATUS_TOO_MANY_LINKS +# define STATUS_TOO_MANY_LINKS ((NTSTATUS) 0xC0000265L) +#endif + +#ifndef STATUS_QUOTA_LIST_INCONSISTENT +# define STATUS_QUOTA_LIST_INCONSISTENT ((NTSTATUS) 0xC0000266L) +#endif + +#ifndef STATUS_FILE_IS_OFFLINE +# define STATUS_FILE_IS_OFFLINE ((NTSTATUS) 0xC0000267L) +#endif + +#ifndef STATUS_EVALUATION_EXPIRATION +# define STATUS_EVALUATION_EXPIRATION ((NTSTATUS) 0xC0000268L) +#endif + +#ifndef STATUS_ILLEGAL_DLL_RELOCATION +# define STATUS_ILLEGAL_DLL_RELOCATION ((NTSTATUS) 0xC0000269L) +#endif + +#ifndef STATUS_LICENSE_VIOLATION +# define STATUS_LICENSE_VIOLATION ((NTSTATUS) 0xC000026AL) +#endif + +#ifndef STATUS_DLL_INIT_FAILED_LOGOFF +# define STATUS_DLL_INIT_FAILED_LOGOFF ((NTSTATUS) 0xC000026BL) +#endif + +#ifndef STATUS_DRIVER_UNABLE_TO_LOAD +# define STATUS_DRIVER_UNABLE_TO_LOAD ((NTSTATUS) 0xC000026CL) +#endif + +#ifndef STATUS_DFS_UNAVAILABLE +# define STATUS_DFS_UNAVAILABLE ((NTSTATUS) 0xC000026DL) +#endif + +#ifndef STATUS_VOLUME_DISMOUNTED +# define STATUS_VOLUME_DISMOUNTED ((NTSTATUS) 0xC000026EL) +#endif + +#ifndef STATUS_WX86_INTERNAL_ERROR +# define STATUS_WX86_INTERNAL_ERROR ((NTSTATUS) 0xC000026FL) +#endif + +#ifndef STATUS_WX86_FLOAT_STACK_CHECK +# define STATUS_WX86_FLOAT_STACK_CHECK ((NTSTATUS) 0xC0000270L) +#endif + +#ifndef STATUS_VALIDATE_CONTINUE +# define STATUS_VALIDATE_CONTINUE ((NTSTATUS) 0xC0000271L) +#endif + +#ifndef STATUS_NO_MATCH +# define STATUS_NO_MATCH ((NTSTATUS) 0xC0000272L) +#endif + +#ifndef STATUS_NO_MORE_MATCHES +# define STATUS_NO_MORE_MATCHES ((NTSTATUS) 0xC0000273L) +#endif + +#ifndef STATUS_NOT_A_REPARSE_POINT +# define STATUS_NOT_A_REPARSE_POINT ((NTSTATUS) 0xC0000275L) +#endif + +#ifndef STATUS_IO_REPARSE_TAG_INVALID +# define STATUS_IO_REPARSE_TAG_INVALID ((NTSTATUS) 0xC0000276L) +#endif + +#ifndef STATUS_IO_REPARSE_TAG_MISMATCH +# define STATUS_IO_REPARSE_TAG_MISMATCH ((NTSTATUS) 0xC0000277L) +#endif + +#ifndef STATUS_IO_REPARSE_DATA_INVALID +# define STATUS_IO_REPARSE_DATA_INVALID ((NTSTATUS) 0xC0000278L) +#endif + +#ifndef STATUS_IO_REPARSE_TAG_NOT_HANDLED +# define STATUS_IO_REPARSE_TAG_NOT_HANDLED ((NTSTATUS) 0xC0000279L) +#endif + +#ifndef STATUS_REPARSE_POINT_NOT_RESOLVED +# define STATUS_REPARSE_POINT_NOT_RESOLVED ((NTSTATUS) 0xC0000280L) +#endif + +#ifndef STATUS_DIRECTORY_IS_A_REPARSE_POINT +# define STATUS_DIRECTORY_IS_A_REPARSE_POINT ((NTSTATUS) 0xC0000281L) +#endif + +#ifndef STATUS_RANGE_LIST_CONFLICT +# define STATUS_RANGE_LIST_CONFLICT ((NTSTATUS) 0xC0000282L) +#endif + +#ifndef STATUS_SOURCE_ELEMENT_EMPTY +# define STATUS_SOURCE_ELEMENT_EMPTY ((NTSTATUS) 0xC0000283L) +#endif + +#ifndef STATUS_DESTINATION_ELEMENT_FULL +# define STATUS_DESTINATION_ELEMENT_FULL ((NTSTATUS) 0xC0000284L) +#endif + +#ifndef STATUS_ILLEGAL_ELEMENT_ADDRESS +# define STATUS_ILLEGAL_ELEMENT_ADDRESS ((NTSTATUS) 0xC0000285L) +#endif + +#ifndef STATUS_MAGAZINE_NOT_PRESENT +# define STATUS_MAGAZINE_NOT_PRESENT ((NTSTATUS) 0xC0000286L) +#endif + +#ifndef STATUS_REINITIALIZATION_NEEDED +# define STATUS_REINITIALIZATION_NEEDED ((NTSTATUS) 0xC0000287L) +#endif + +#ifndef STATUS_DEVICE_REQUIRES_CLEANING +# define STATUS_DEVICE_REQUIRES_CLEANING ((NTSTATUS) 0x80000288L) +#endif + +#ifndef STATUS_DEVICE_DOOR_OPEN +# define STATUS_DEVICE_DOOR_OPEN ((NTSTATUS) 0x80000289L) +#endif + +#ifndef STATUS_ENCRYPTION_FAILED +# define STATUS_ENCRYPTION_FAILED ((NTSTATUS) 0xC000028AL) +#endif + +#ifndef STATUS_DECRYPTION_FAILED +# define STATUS_DECRYPTION_FAILED ((NTSTATUS) 0xC000028BL) +#endif + +#ifndef STATUS_RANGE_NOT_FOUND +# define STATUS_RANGE_NOT_FOUND ((NTSTATUS) 0xC000028CL) +#endif + +#ifndef STATUS_NO_RECOVERY_POLICY +# define STATUS_NO_RECOVERY_POLICY ((NTSTATUS) 0xC000028DL) +#endif + +#ifndef STATUS_NO_EFS +# define STATUS_NO_EFS ((NTSTATUS) 0xC000028EL) +#endif + +#ifndef STATUS_WRONG_EFS +# define STATUS_WRONG_EFS ((NTSTATUS) 0xC000028FL) +#endif + +#ifndef STATUS_NO_USER_KEYS +# define STATUS_NO_USER_KEYS ((NTSTATUS) 0xC0000290L) +#endif + +#ifndef STATUS_FILE_NOT_ENCRYPTED +# define STATUS_FILE_NOT_ENCRYPTED ((NTSTATUS) 0xC0000291L) +#endif + +#ifndef STATUS_NOT_EXPORT_FORMAT +# define STATUS_NOT_EXPORT_FORMAT ((NTSTATUS) 0xC0000292L) +#endif + +#ifndef STATUS_FILE_ENCRYPTED +# define STATUS_FILE_ENCRYPTED ((NTSTATUS) 0xC0000293L) +#endif + +#ifndef STATUS_WAKE_SYSTEM +# define STATUS_WAKE_SYSTEM ((NTSTATUS) 0x40000294L) +#endif + +#ifndef STATUS_WMI_GUID_NOT_FOUND +# define STATUS_WMI_GUID_NOT_FOUND ((NTSTATUS) 0xC0000295L) +#endif + +#ifndef STATUS_WMI_INSTANCE_NOT_FOUND +# define STATUS_WMI_INSTANCE_NOT_FOUND ((NTSTATUS) 0xC0000296L) +#endif + +#ifndef STATUS_WMI_ITEMID_NOT_FOUND +# define STATUS_WMI_ITEMID_NOT_FOUND ((NTSTATUS) 0xC0000297L) +#endif + +#ifndef STATUS_WMI_TRY_AGAIN +# define STATUS_WMI_TRY_AGAIN ((NTSTATUS) 0xC0000298L) +#endif + +#ifndef STATUS_SHARED_POLICY +# define STATUS_SHARED_POLICY ((NTSTATUS) 0xC0000299L) +#endif + +#ifndef STATUS_POLICY_OBJECT_NOT_FOUND +# define STATUS_POLICY_OBJECT_NOT_FOUND ((NTSTATUS) 0xC000029AL) +#endif + +#ifndef STATUS_POLICY_ONLY_IN_DS +# define STATUS_POLICY_ONLY_IN_DS ((NTSTATUS) 0xC000029BL) +#endif + +#ifndef STATUS_VOLUME_NOT_UPGRADED +# define STATUS_VOLUME_NOT_UPGRADED ((NTSTATUS) 0xC000029CL) +#endif + +#ifndef STATUS_REMOTE_STORAGE_NOT_ACTIVE +# define STATUS_REMOTE_STORAGE_NOT_ACTIVE ((NTSTATUS) 0xC000029DL) +#endif + +#ifndef STATUS_REMOTE_STORAGE_MEDIA_ERROR +# define STATUS_REMOTE_STORAGE_MEDIA_ERROR ((NTSTATUS) 0xC000029EL) +#endif + +#ifndef STATUS_NO_TRACKING_SERVICE +# define STATUS_NO_TRACKING_SERVICE ((NTSTATUS) 0xC000029FL) +#endif + +#ifndef STATUS_SERVER_SID_MISMATCH +# define STATUS_SERVER_SID_MISMATCH ((NTSTATUS) 0xC00002A0L) +#endif + +#ifndef STATUS_DS_NO_ATTRIBUTE_OR_VALUE +# define STATUS_DS_NO_ATTRIBUTE_OR_VALUE ((NTSTATUS) 0xC00002A1L) +#endif + +#ifndef STATUS_DS_INVALID_ATTRIBUTE_SYNTAX +# define STATUS_DS_INVALID_ATTRIBUTE_SYNTAX ((NTSTATUS) 0xC00002A2L) +#endif + +#ifndef STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED +# define STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED ((NTSTATUS) 0xC00002A3L) +#endif + +#ifndef STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS +# define STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS ((NTSTATUS) 0xC00002A4L) +#endif + +#ifndef STATUS_DS_BUSY +# define STATUS_DS_BUSY ((NTSTATUS) 0xC00002A5L) +#endif + +#ifndef STATUS_DS_UNAVAILABLE +# define STATUS_DS_UNAVAILABLE ((NTSTATUS) 0xC00002A6L) +#endif + +#ifndef STATUS_DS_NO_RIDS_ALLOCATED +# define STATUS_DS_NO_RIDS_ALLOCATED ((NTSTATUS) 0xC00002A7L) +#endif + +#ifndef STATUS_DS_NO_MORE_RIDS +# define STATUS_DS_NO_MORE_RIDS ((NTSTATUS) 0xC00002A8L) +#endif + +#ifndef STATUS_DS_INCORRECT_ROLE_OWNER +# define STATUS_DS_INCORRECT_ROLE_OWNER ((NTSTATUS) 0xC00002A9L) +#endif + +#ifndef STATUS_DS_RIDMGR_INIT_ERROR +# define STATUS_DS_RIDMGR_INIT_ERROR ((NTSTATUS) 0xC00002AAL) +#endif + +#ifndef STATUS_DS_OBJ_CLASS_VIOLATION +# define STATUS_DS_OBJ_CLASS_VIOLATION ((NTSTATUS) 0xC00002ABL) +#endif + +#ifndef STATUS_DS_CANT_ON_NON_LEAF +# define STATUS_DS_CANT_ON_NON_LEAF ((NTSTATUS) 0xC00002ACL) +#endif + +#ifndef STATUS_DS_CANT_ON_RDN +# define STATUS_DS_CANT_ON_RDN ((NTSTATUS) 0xC00002ADL) +#endif + +#ifndef STATUS_DS_CANT_MOD_OBJ_CLASS +# define STATUS_DS_CANT_MOD_OBJ_CLASS ((NTSTATUS) 0xC00002AEL) +#endif + +#ifndef STATUS_DS_CROSS_DOM_MOVE_FAILED +# define STATUS_DS_CROSS_DOM_MOVE_FAILED ((NTSTATUS) 0xC00002AFL) +#endif + +#ifndef STATUS_DS_GC_NOT_AVAILABLE +# define STATUS_DS_GC_NOT_AVAILABLE ((NTSTATUS) 0xC00002B0L) +#endif + +#ifndef STATUS_DIRECTORY_SERVICE_REQUIRED +# define STATUS_DIRECTORY_SERVICE_REQUIRED ((NTSTATUS) 0xC00002B1L) +#endif + +#ifndef STATUS_REPARSE_ATTRIBUTE_CONFLICT +# define STATUS_REPARSE_ATTRIBUTE_CONFLICT ((NTSTATUS) 0xC00002B2L) +#endif + +#ifndef STATUS_CANT_ENABLE_DENY_ONLY +# define STATUS_CANT_ENABLE_DENY_ONLY ((NTSTATUS) 0xC00002B3L) +#endif + +#ifndef STATUS_FLOAT_MULTIPLE_FAULTS +# define STATUS_FLOAT_MULTIPLE_FAULTS ((NTSTATUS) 0xC00002B4L) +#endif + +#ifndef STATUS_FLOAT_MULTIPLE_TRAPS +# define STATUS_FLOAT_MULTIPLE_TRAPS ((NTSTATUS) 0xC00002B5L) +#endif + +#ifndef STATUS_DEVICE_REMOVED +# define STATUS_DEVICE_REMOVED ((NTSTATUS) 0xC00002B6L) +#endif + +#ifndef STATUS_JOURNAL_DELETE_IN_PROGRESS +# define STATUS_JOURNAL_DELETE_IN_PROGRESS ((NTSTATUS) 0xC00002B7L) +#endif + +#ifndef STATUS_JOURNAL_NOT_ACTIVE +# define STATUS_JOURNAL_NOT_ACTIVE ((NTSTATUS) 0xC00002B8L) +#endif + +#ifndef STATUS_NOINTERFACE +# define STATUS_NOINTERFACE ((NTSTATUS) 0xC00002B9L) +#endif + +#ifndef STATUS_DS_ADMIN_LIMIT_EXCEEDED +# define STATUS_DS_ADMIN_LIMIT_EXCEEDED ((NTSTATUS) 0xC00002C1L) +#endif + +#ifndef STATUS_DRIVER_FAILED_SLEEP +# define STATUS_DRIVER_FAILED_SLEEP ((NTSTATUS) 0xC00002C2L) +#endif + +#ifndef STATUS_MUTUAL_AUTHENTICATION_FAILED +# define STATUS_MUTUAL_AUTHENTICATION_FAILED ((NTSTATUS) 0xC00002C3L) +#endif + +#ifndef STATUS_CORRUPT_SYSTEM_FILE +# define STATUS_CORRUPT_SYSTEM_FILE ((NTSTATUS) 0xC00002C4L) +#endif + +#ifndef STATUS_DATATYPE_MISALIGNMENT_ERROR +# define STATUS_DATATYPE_MISALIGNMENT_ERROR ((NTSTATUS) 0xC00002C5L) +#endif + +#ifndef STATUS_WMI_READ_ONLY +# define STATUS_WMI_READ_ONLY ((NTSTATUS) 0xC00002C6L) +#endif + +#ifndef STATUS_WMI_SET_FAILURE +# define STATUS_WMI_SET_FAILURE ((NTSTATUS) 0xC00002C7L) +#endif + +#ifndef STATUS_COMMITMENT_MINIMUM +# define STATUS_COMMITMENT_MINIMUM ((NTSTATUS) 0xC00002C8L) +#endif + +#ifndef STATUS_REG_NAT_CONSUMPTION +# define STATUS_REG_NAT_CONSUMPTION ((NTSTATUS) 0xC00002C9L) +#endif + +#ifndef STATUS_TRANSPORT_FULL +# define STATUS_TRANSPORT_FULL ((NTSTATUS) 0xC00002CAL) +#endif + +#ifndef STATUS_DS_SAM_INIT_FAILURE +# define STATUS_DS_SAM_INIT_FAILURE ((NTSTATUS) 0xC00002CBL) +#endif + +#ifndef STATUS_ONLY_IF_CONNECTED +# define STATUS_ONLY_IF_CONNECTED ((NTSTATUS) 0xC00002CCL) +#endif + +#ifndef STATUS_DS_SENSITIVE_GROUP_VIOLATION +# define STATUS_DS_SENSITIVE_GROUP_VIOLATION ((NTSTATUS) 0xC00002CDL) +#endif + +#ifndef STATUS_PNP_RESTART_ENUMERATION +# define STATUS_PNP_RESTART_ENUMERATION ((NTSTATUS) 0xC00002CEL) +#endif + +#ifndef STATUS_JOURNAL_ENTRY_DELETED +# define STATUS_JOURNAL_ENTRY_DELETED ((NTSTATUS) 0xC00002CFL) +#endif + +#ifndef STATUS_DS_CANT_MOD_PRIMARYGROUPID +# define STATUS_DS_CANT_MOD_PRIMARYGROUPID ((NTSTATUS) 0xC00002D0L) +#endif + +#ifndef STATUS_SYSTEM_IMAGE_BAD_SIGNATURE +# define STATUS_SYSTEM_IMAGE_BAD_SIGNATURE ((NTSTATUS) 0xC00002D1L) +#endif + +#ifndef STATUS_PNP_REBOOT_REQUIRED +# define STATUS_PNP_REBOOT_REQUIRED ((NTSTATUS) 0xC00002D2L) +#endif + +#ifndef STATUS_POWER_STATE_INVALID +# define STATUS_POWER_STATE_INVALID ((NTSTATUS) 0xC00002D3L) +#endif + +#ifndef STATUS_DS_INVALID_GROUP_TYPE +# define STATUS_DS_INVALID_GROUP_TYPE ((NTSTATUS) 0xC00002D4L) +#endif + +#ifndef STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN +# define STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN ((NTSTATUS) 0xC00002D5L) +#endif + +#ifndef STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN +# define STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN ((NTSTATUS) 0xC00002D6L) +#endif + +#ifndef STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER +# define STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER ((NTSTATUS) 0xC00002D7L) +#endif + +#ifndef STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER +# define STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER ((NTSTATUS) 0xC00002D8L) +#endif + +#ifndef STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER +# define STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER ((NTSTATUS) 0xC00002D9L) +#endif + +#ifndef STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER +# define STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER ((NTSTATUS) 0xC00002DAL) +#endif + +#ifndef STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER +# define STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER ((NTSTATUS) 0xC00002DBL) +#endif + +#ifndef STATUS_DS_HAVE_PRIMARY_MEMBERS +# define STATUS_DS_HAVE_PRIMARY_MEMBERS ((NTSTATUS) 0xC00002DCL) +#endif + +#ifndef STATUS_WMI_NOT_SUPPORTED +# define STATUS_WMI_NOT_SUPPORTED ((NTSTATUS) 0xC00002DDL) +#endif + +#ifndef STATUS_INSUFFICIENT_POWER +# define STATUS_INSUFFICIENT_POWER ((NTSTATUS) 0xC00002DEL) +#endif + +#ifndef STATUS_SAM_NEED_BOOTKEY_PASSWORD +# define STATUS_SAM_NEED_BOOTKEY_PASSWORD ((NTSTATUS) 0xC00002DFL) +#endif + +#ifndef STATUS_SAM_NEED_BOOTKEY_FLOPPY +# define STATUS_SAM_NEED_BOOTKEY_FLOPPY ((NTSTATUS) 0xC00002E0L) +#endif + +#ifndef STATUS_DS_CANT_START +# define STATUS_DS_CANT_START ((NTSTATUS) 0xC00002E1L) +#endif + +#ifndef STATUS_DS_INIT_FAILURE +# define STATUS_DS_INIT_FAILURE ((NTSTATUS) 0xC00002E2L) +#endif + +#ifndef STATUS_SAM_INIT_FAILURE +# define STATUS_SAM_INIT_FAILURE ((NTSTATUS) 0xC00002E3L) +#endif + +#ifndef STATUS_DS_GC_REQUIRED +# define STATUS_DS_GC_REQUIRED ((NTSTATUS) 0xC00002E4L) +#endif + +#ifndef STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY +# define STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY ((NTSTATUS) 0xC00002E5L) +#endif + +#ifndef STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS +# define STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS ((NTSTATUS) 0xC00002E6L) +#endif + +#ifndef STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED +# define STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED ((NTSTATUS) 0xC00002E7L) +#endif + +#ifndef STATUS_MULTIPLE_FAULT_VIOLATION +# define STATUS_MULTIPLE_FAULT_VIOLATION ((NTSTATUS) 0xC00002E8L) +#endif + +#ifndef STATUS_CURRENT_DOMAIN_NOT_ALLOWED +# define STATUS_CURRENT_DOMAIN_NOT_ALLOWED ((NTSTATUS) 0xC00002E9L) +#endif + +#ifndef STATUS_CANNOT_MAKE +# define STATUS_CANNOT_MAKE ((NTSTATUS) 0xC00002EAL) +#endif + +#ifndef STATUS_SYSTEM_SHUTDOWN +# define STATUS_SYSTEM_SHUTDOWN ((NTSTATUS) 0xC00002EBL) +#endif + +#ifndef STATUS_DS_INIT_FAILURE_CONSOLE +# define STATUS_DS_INIT_FAILURE_CONSOLE ((NTSTATUS) 0xC00002ECL) +#endif + +#ifndef STATUS_DS_SAM_INIT_FAILURE_CONSOLE +# define STATUS_DS_SAM_INIT_FAILURE_CONSOLE ((NTSTATUS) 0xC00002EDL) +#endif + +#ifndef STATUS_UNFINISHED_CONTEXT_DELETED +# define STATUS_UNFINISHED_CONTEXT_DELETED ((NTSTATUS) 0xC00002EEL) +#endif + +#ifndef STATUS_NO_TGT_REPLY +# define STATUS_NO_TGT_REPLY ((NTSTATUS) 0xC00002EFL) +#endif + +#ifndef STATUS_OBJECTID_NOT_FOUND +# define STATUS_OBJECTID_NOT_FOUND ((NTSTATUS) 0xC00002F0L) +#endif + +#ifndef STATUS_NO_IP_ADDRESSES +# define STATUS_NO_IP_ADDRESSES ((NTSTATUS) 0xC00002F1L) +#endif + +#ifndef STATUS_WRONG_CREDENTIAL_HANDLE +# define STATUS_WRONG_CREDENTIAL_HANDLE ((NTSTATUS) 0xC00002F2L) +#endif + +#ifndef STATUS_CRYPTO_SYSTEM_INVALID +# define STATUS_CRYPTO_SYSTEM_INVALID ((NTSTATUS) 0xC00002F3L) +#endif + +#ifndef STATUS_MAX_REFERRALS_EXCEEDED +# define STATUS_MAX_REFERRALS_EXCEEDED ((NTSTATUS) 0xC00002F4L) +#endif + +#ifndef STATUS_MUST_BE_KDC +# define STATUS_MUST_BE_KDC ((NTSTATUS) 0xC00002F5L) +#endif + +#ifndef STATUS_STRONG_CRYPTO_NOT_SUPPORTED +# define STATUS_STRONG_CRYPTO_NOT_SUPPORTED ((NTSTATUS) 0xC00002F6L) +#endif + +#ifndef STATUS_TOO_MANY_PRINCIPALS +# define STATUS_TOO_MANY_PRINCIPALS ((NTSTATUS) 0xC00002F7L) +#endif + +#ifndef STATUS_NO_PA_DATA +# define STATUS_NO_PA_DATA ((NTSTATUS) 0xC00002F8L) +#endif + +#ifndef STATUS_PKINIT_NAME_MISMATCH +# define STATUS_PKINIT_NAME_MISMATCH ((NTSTATUS) 0xC00002F9L) +#endif + +#ifndef STATUS_SMARTCARD_LOGON_REQUIRED +# define STATUS_SMARTCARD_LOGON_REQUIRED ((NTSTATUS) 0xC00002FAL) +#endif + +#ifndef STATUS_KDC_INVALID_REQUEST +# define STATUS_KDC_INVALID_REQUEST ((NTSTATUS) 0xC00002FBL) +#endif + +#ifndef STATUS_KDC_UNABLE_TO_REFER +# define STATUS_KDC_UNABLE_TO_REFER ((NTSTATUS) 0xC00002FCL) +#endif + +#ifndef STATUS_KDC_UNKNOWN_ETYPE +# define STATUS_KDC_UNKNOWN_ETYPE ((NTSTATUS) 0xC00002FDL) +#endif + +#ifndef STATUS_SHUTDOWN_IN_PROGRESS +# define STATUS_SHUTDOWN_IN_PROGRESS ((NTSTATUS) 0xC00002FEL) +#endif + +#ifndef STATUS_SERVER_SHUTDOWN_IN_PROGRESS +# define STATUS_SERVER_SHUTDOWN_IN_PROGRESS ((NTSTATUS) 0xC00002FFL) +#endif + +#ifndef STATUS_NOT_SUPPORTED_ON_SBS +# define STATUS_NOT_SUPPORTED_ON_SBS ((NTSTATUS) 0xC0000300L) +#endif + +#ifndef STATUS_WMI_GUID_DISCONNECTED +# define STATUS_WMI_GUID_DISCONNECTED ((NTSTATUS) 0xC0000301L) +#endif + +#ifndef STATUS_WMI_ALREADY_DISABLED +# define STATUS_WMI_ALREADY_DISABLED ((NTSTATUS) 0xC0000302L) +#endif + +#ifndef STATUS_WMI_ALREADY_ENABLED +# define STATUS_WMI_ALREADY_ENABLED ((NTSTATUS) 0xC0000303L) +#endif + +#ifndef STATUS_MFT_TOO_FRAGMENTED +# define STATUS_MFT_TOO_FRAGMENTED ((NTSTATUS) 0xC0000304L) +#endif + +#ifndef STATUS_COPY_PROTECTION_FAILURE +# define STATUS_COPY_PROTECTION_FAILURE ((NTSTATUS) 0xC0000305L) +#endif + +#ifndef STATUS_CSS_AUTHENTICATION_FAILURE +# define STATUS_CSS_AUTHENTICATION_FAILURE ((NTSTATUS) 0xC0000306L) +#endif + +#ifndef STATUS_CSS_KEY_NOT_PRESENT +# define STATUS_CSS_KEY_NOT_PRESENT ((NTSTATUS) 0xC0000307L) +#endif + +#ifndef STATUS_CSS_KEY_NOT_ESTABLISHED +# define STATUS_CSS_KEY_NOT_ESTABLISHED ((NTSTATUS) 0xC0000308L) +#endif + +#ifndef STATUS_CSS_SCRAMBLED_SECTOR +# define STATUS_CSS_SCRAMBLED_SECTOR ((NTSTATUS) 0xC0000309L) +#endif + +#ifndef STATUS_CSS_REGION_MISMATCH +# define STATUS_CSS_REGION_MISMATCH ((NTSTATUS) 0xC000030AL) +#endif + +#ifndef STATUS_CSS_RESETS_EXHAUSTED +# define STATUS_CSS_RESETS_EXHAUSTED ((NTSTATUS) 0xC000030BL) +#endif + +#ifndef STATUS_PKINIT_FAILURE +# define STATUS_PKINIT_FAILURE ((NTSTATUS) 0xC0000320L) +#endif + +#ifndef STATUS_SMARTCARD_SUBSYSTEM_FAILURE +# define STATUS_SMARTCARD_SUBSYSTEM_FAILURE ((NTSTATUS) 0xC0000321L) +#endif + +#ifndef STATUS_NO_KERB_KEY +# define STATUS_NO_KERB_KEY ((NTSTATUS) 0xC0000322L) +#endif + +#ifndef STATUS_HOST_DOWN +# define STATUS_HOST_DOWN ((NTSTATUS) 0xC0000350L) +#endif + +#ifndef STATUS_UNSUPPORTED_PREAUTH +# define STATUS_UNSUPPORTED_PREAUTH ((NTSTATUS) 0xC0000351L) +#endif + +#ifndef STATUS_EFS_ALG_BLOB_TOO_BIG +# define STATUS_EFS_ALG_BLOB_TOO_BIG ((NTSTATUS) 0xC0000352L) +#endif + +#ifndef STATUS_PORT_NOT_SET +# define STATUS_PORT_NOT_SET ((NTSTATUS) 0xC0000353L) +#endif + +#ifndef STATUS_DEBUGGER_INACTIVE +# define STATUS_DEBUGGER_INACTIVE ((NTSTATUS) 0xC0000354L) +#endif + +#ifndef STATUS_DS_VERSION_CHECK_FAILURE +# define STATUS_DS_VERSION_CHECK_FAILURE ((NTSTATUS) 0xC0000355L) +#endif + +#ifndef STATUS_AUDITING_DISABLED +# define STATUS_AUDITING_DISABLED ((NTSTATUS) 0xC0000356L) +#endif + +#ifndef STATUS_PRENT4_MACHINE_ACCOUNT +# define STATUS_PRENT4_MACHINE_ACCOUNT ((NTSTATUS) 0xC0000357L) +#endif + +#ifndef STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER +# define STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER ((NTSTATUS) 0xC0000358L) +#endif + +#ifndef STATUS_INVALID_IMAGE_WIN_32 +# define STATUS_INVALID_IMAGE_WIN_32 ((NTSTATUS) 0xC0000359L) +#endif + +#ifndef STATUS_INVALID_IMAGE_WIN_64 +# define STATUS_INVALID_IMAGE_WIN_64 ((NTSTATUS) 0xC000035AL) +#endif + +#ifndef STATUS_BAD_BINDINGS +# define STATUS_BAD_BINDINGS ((NTSTATUS) 0xC000035BL) +#endif + +#ifndef STATUS_NETWORK_SESSION_EXPIRED +# define STATUS_NETWORK_SESSION_EXPIRED ((NTSTATUS) 0xC000035CL) +#endif + +#ifndef STATUS_APPHELP_BLOCK +# define STATUS_APPHELP_BLOCK ((NTSTATUS) 0xC000035DL) +#endif + +#ifndef STATUS_ALL_SIDS_FILTERED +# define STATUS_ALL_SIDS_FILTERED ((NTSTATUS) 0xC000035EL) +#endif + +#ifndef STATUS_NOT_SAFE_MODE_DRIVER +# define STATUS_NOT_SAFE_MODE_DRIVER ((NTSTATUS) 0xC000035FL) +#endif + +#ifndef STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT +# define STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT ((NTSTATUS) 0xC0000361L) +#endif + +#ifndef STATUS_ACCESS_DISABLED_BY_POLICY_PATH +# define STATUS_ACCESS_DISABLED_BY_POLICY_PATH ((NTSTATUS) 0xC0000362L) +#endif + +#ifndef STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER +# define STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER ((NTSTATUS) 0xC0000363L) +#endif + +#ifndef STATUS_ACCESS_DISABLED_BY_POLICY_OTHER +# define STATUS_ACCESS_DISABLED_BY_POLICY_OTHER ((NTSTATUS) 0xC0000364L) +#endif + +#ifndef STATUS_FAILED_DRIVER_ENTRY +# define STATUS_FAILED_DRIVER_ENTRY ((NTSTATUS) 0xC0000365L) +#endif + +#ifndef STATUS_DEVICE_ENUMERATION_ERROR +# define STATUS_DEVICE_ENUMERATION_ERROR ((NTSTATUS) 0xC0000366L) +#endif + +#ifndef STATUS_MOUNT_POINT_NOT_RESOLVED +# define STATUS_MOUNT_POINT_NOT_RESOLVED ((NTSTATUS) 0xC0000368L) +#endif + +#ifndef STATUS_INVALID_DEVICE_OBJECT_PARAMETER +# define STATUS_INVALID_DEVICE_OBJECT_PARAMETER ((NTSTATUS) 0xC0000369L) +#endif + +#ifndef STATUS_MCA_OCCURED +# define STATUS_MCA_OCCURED ((NTSTATUS) 0xC000036AL) +#endif + +#ifndef STATUS_DRIVER_BLOCKED_CRITICAL +# define STATUS_DRIVER_BLOCKED_CRITICAL ((NTSTATUS) 0xC000036BL) +#endif + +#ifndef STATUS_DRIVER_BLOCKED +# define STATUS_DRIVER_BLOCKED ((NTSTATUS) 0xC000036CL) +#endif + +#ifndef STATUS_DRIVER_DATABASE_ERROR +# define STATUS_DRIVER_DATABASE_ERROR ((NTSTATUS) 0xC000036DL) +#endif + +#ifndef STATUS_SYSTEM_HIVE_TOO_LARGE +# define STATUS_SYSTEM_HIVE_TOO_LARGE ((NTSTATUS) 0xC000036EL) +#endif + +#ifndef STATUS_INVALID_IMPORT_OF_NON_DLL +# define STATUS_INVALID_IMPORT_OF_NON_DLL ((NTSTATUS) 0xC000036FL) +#endif + +#ifndef STATUS_DS_SHUTTING_DOWN +# define STATUS_DS_SHUTTING_DOWN ((NTSTATUS) 0x40000370L) +#endif + +#ifndef STATUS_NO_SECRETS +# define STATUS_NO_SECRETS ((NTSTATUS) 0xC0000371L) +#endif + +#ifndef STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY +# define STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY ((NTSTATUS) 0xC0000372L) +#endif + +#ifndef STATUS_FAILED_STACK_SWITCH +# define STATUS_FAILED_STACK_SWITCH ((NTSTATUS) 0xC0000373L) +#endif + +#ifndef STATUS_HEAP_CORRUPTION +# define STATUS_HEAP_CORRUPTION ((NTSTATUS) 0xC0000374L) +#endif + +#ifndef STATUS_SMARTCARD_WRONG_PIN +# define STATUS_SMARTCARD_WRONG_PIN ((NTSTATUS) 0xC0000380L) +#endif + +#ifndef STATUS_SMARTCARD_CARD_BLOCKED +# define STATUS_SMARTCARD_CARD_BLOCKED ((NTSTATUS) 0xC0000381L) +#endif + +#ifndef STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED +# define STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED ((NTSTATUS) 0xC0000382L) +#endif + +#ifndef STATUS_SMARTCARD_NO_CARD +# define STATUS_SMARTCARD_NO_CARD ((NTSTATUS) 0xC0000383L) +#endif + +#ifndef STATUS_SMARTCARD_NO_KEY_CONTAINER +# define STATUS_SMARTCARD_NO_KEY_CONTAINER ((NTSTATUS) 0xC0000384L) +#endif + +#ifndef STATUS_SMARTCARD_NO_CERTIFICATE +# define STATUS_SMARTCARD_NO_CERTIFICATE ((NTSTATUS) 0xC0000385L) +#endif + +#ifndef STATUS_SMARTCARD_NO_KEYSET +# define STATUS_SMARTCARD_NO_KEYSET ((NTSTATUS) 0xC0000386L) +#endif + +#ifndef STATUS_SMARTCARD_IO_ERROR +# define STATUS_SMARTCARD_IO_ERROR ((NTSTATUS) 0xC0000387L) +#endif + +#ifndef STATUS_DOWNGRADE_DETECTED +# define STATUS_DOWNGRADE_DETECTED ((NTSTATUS) 0xC0000388L) +#endif + +#ifndef STATUS_SMARTCARD_CERT_REVOKED +# define STATUS_SMARTCARD_CERT_REVOKED ((NTSTATUS) 0xC0000389L) +#endif + +#ifndef STATUS_ISSUING_CA_UNTRUSTED +# define STATUS_ISSUING_CA_UNTRUSTED ((NTSTATUS) 0xC000038AL) +#endif + +#ifndef STATUS_REVOCATION_OFFLINE_C +# define STATUS_REVOCATION_OFFLINE_C ((NTSTATUS) 0xC000038BL) +#endif + +#ifndef STATUS_PKINIT_CLIENT_FAILURE +# define STATUS_PKINIT_CLIENT_FAILURE ((NTSTATUS) 0xC000038CL) +#endif + +#ifndef STATUS_SMARTCARD_CERT_EXPIRED +# define STATUS_SMARTCARD_CERT_EXPIRED ((NTSTATUS) 0xC000038DL) +#endif + +#ifndef STATUS_DRIVER_FAILED_PRIOR_UNLOAD +# define STATUS_DRIVER_FAILED_PRIOR_UNLOAD ((NTSTATUS) 0xC000038EL) +#endif + +#ifndef STATUS_SMARTCARD_SILENT_CONTEXT +# define STATUS_SMARTCARD_SILENT_CONTEXT ((NTSTATUS) 0xC000038FL) +#endif + +#ifndef STATUS_PER_USER_TRUST_QUOTA_EXCEEDED +# define STATUS_PER_USER_TRUST_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000401L) +#endif + +#ifndef STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED +# define STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000402L) +#endif + +#ifndef STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED +# define STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000403L) +#endif + +#ifndef STATUS_DS_NAME_NOT_UNIQUE +# define STATUS_DS_NAME_NOT_UNIQUE ((NTSTATUS) 0xC0000404L) +#endif + +#ifndef STATUS_DS_DUPLICATE_ID_FOUND +# define STATUS_DS_DUPLICATE_ID_FOUND ((NTSTATUS) 0xC0000405L) +#endif + +#ifndef STATUS_DS_GROUP_CONVERSION_ERROR +# define STATUS_DS_GROUP_CONVERSION_ERROR ((NTSTATUS) 0xC0000406L) +#endif + +#ifndef STATUS_VOLSNAP_PREPARE_HIBERNATE +# define STATUS_VOLSNAP_PREPARE_HIBERNATE ((NTSTATUS) 0xC0000407L) +#endif + +#ifndef STATUS_USER2USER_REQUIRED +# define STATUS_USER2USER_REQUIRED ((NTSTATUS) 0xC0000408L) +#endif + +#ifndef STATUS_STACK_BUFFER_OVERRUN +# define STATUS_STACK_BUFFER_OVERRUN ((NTSTATUS) 0xC0000409L) +#endif + +#ifndef STATUS_NO_S4U_PROT_SUPPORT +# define STATUS_NO_S4U_PROT_SUPPORT ((NTSTATUS) 0xC000040AL) +#endif + +#ifndef STATUS_CROSSREALM_DELEGATION_FAILURE +# define STATUS_CROSSREALM_DELEGATION_FAILURE ((NTSTATUS) 0xC000040BL) +#endif + +#ifndef STATUS_REVOCATION_OFFLINE_KDC +# define STATUS_REVOCATION_OFFLINE_KDC ((NTSTATUS) 0xC000040CL) +#endif + +#ifndef STATUS_ISSUING_CA_UNTRUSTED_KDC +# define STATUS_ISSUING_CA_UNTRUSTED_KDC ((NTSTATUS) 0xC000040DL) +#endif + +#ifndef STATUS_KDC_CERT_EXPIRED +# define STATUS_KDC_CERT_EXPIRED ((NTSTATUS) 0xC000040EL) +#endif + +#ifndef STATUS_KDC_CERT_REVOKED +# define STATUS_KDC_CERT_REVOKED ((NTSTATUS) 0xC000040FL) +#endif + +#ifndef STATUS_PARAMETER_QUOTA_EXCEEDED +# define STATUS_PARAMETER_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000410L) +#endif + +#ifndef STATUS_HIBERNATION_FAILURE +# define STATUS_HIBERNATION_FAILURE ((NTSTATUS) 0xC0000411L) +#endif + +#ifndef STATUS_DELAY_LOAD_FAILED +# define STATUS_DELAY_LOAD_FAILED ((NTSTATUS) 0xC0000412L) +#endif + +#ifndef STATUS_AUTHENTICATION_FIREWALL_FAILED +# define STATUS_AUTHENTICATION_FIREWALL_FAILED ((NTSTATUS) 0xC0000413L) +#endif + +#ifndef STATUS_VDM_DISALLOWED +# define STATUS_VDM_DISALLOWED ((NTSTATUS) 0xC0000414L) +#endif + +#ifndef STATUS_HUNG_DISPLAY_DRIVER_THREAD +# define STATUS_HUNG_DISPLAY_DRIVER_THREAD ((NTSTATUS) 0xC0000415L) +#endif + +#ifndef STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE +# define STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE ((NTSTATUS) 0xC0000416L) +#endif + +#ifndef STATUS_INVALID_CRUNTIME_PARAMETER +# define STATUS_INVALID_CRUNTIME_PARAMETER ((NTSTATUS) 0xC0000417L) +#endif + +#ifndef STATUS_NTLM_BLOCKED +# define STATUS_NTLM_BLOCKED ((NTSTATUS) 0xC0000418L) +#endif + +#ifndef STATUS_DS_SRC_SID_EXISTS_IN_FOREST +# define STATUS_DS_SRC_SID_EXISTS_IN_FOREST ((NTSTATUS) 0xC0000419L) +#endif + +#ifndef STATUS_DS_DOMAIN_NAME_EXISTS_IN_FOREST +# define STATUS_DS_DOMAIN_NAME_EXISTS_IN_FOREST ((NTSTATUS) 0xC000041AL) +#endif + +#ifndef STATUS_DS_FLAT_NAME_EXISTS_IN_FOREST +# define STATUS_DS_FLAT_NAME_EXISTS_IN_FOREST ((NTSTATUS) 0xC000041BL) +#endif + +#ifndef STATUS_INVALID_USER_PRINCIPAL_NAME +# define STATUS_INVALID_USER_PRINCIPAL_NAME ((NTSTATUS) 0xC000041CL) +#endif + +#ifndef STATUS_FATAL_USER_CALLBACK_EXCEPTION +# define STATUS_FATAL_USER_CALLBACK_EXCEPTION ((NTSTATUS) 0xC000041DL) +#endif + +#ifndef STATUS_ASSERTION_FAILURE +# define STATUS_ASSERTION_FAILURE ((NTSTATUS) 0xC0000420L) +#endif + +#ifndef STATUS_VERIFIER_STOP +# define STATUS_VERIFIER_STOP ((NTSTATUS) 0xC0000421L) +#endif + +#ifndef STATUS_CALLBACK_POP_STACK +# define STATUS_CALLBACK_POP_STACK ((NTSTATUS) 0xC0000423L) +#endif + +#ifndef STATUS_INCOMPATIBLE_DRIVER_BLOCKED +# define STATUS_INCOMPATIBLE_DRIVER_BLOCKED ((NTSTATUS) 0xC0000424L) +#endif + +#ifndef STATUS_HIVE_UNLOADED +# define STATUS_HIVE_UNLOADED ((NTSTATUS) 0xC0000425L) +#endif + +#ifndef STATUS_COMPRESSION_DISABLED +# define STATUS_COMPRESSION_DISABLED ((NTSTATUS) 0xC0000426L) +#endif + +#ifndef STATUS_FILE_SYSTEM_LIMITATION +# define STATUS_FILE_SYSTEM_LIMITATION ((NTSTATUS) 0xC0000427L) +#endif + +#ifndef STATUS_INVALID_IMAGE_HASH +# define STATUS_INVALID_IMAGE_HASH ((NTSTATUS) 0xC0000428L) +#endif + +#ifndef STATUS_NOT_CAPABLE +# define STATUS_NOT_CAPABLE ((NTSTATUS) 0xC0000429L) +#endif + +#ifndef STATUS_REQUEST_OUT_OF_SEQUENCE +# define STATUS_REQUEST_OUT_OF_SEQUENCE ((NTSTATUS) 0xC000042AL) +#endif + +#ifndef STATUS_IMPLEMENTATION_LIMIT +# define STATUS_IMPLEMENTATION_LIMIT ((NTSTATUS) 0xC000042BL) +#endif + +#ifndef STATUS_ELEVATION_REQUIRED +# define STATUS_ELEVATION_REQUIRED ((NTSTATUS) 0xC000042CL) +#endif + +#ifndef STATUS_NO_SECURITY_CONTEXT +# define STATUS_NO_SECURITY_CONTEXT ((NTSTATUS) 0xC000042DL) +#endif + +#ifndef STATUS_PKU2U_CERT_FAILURE +# define STATUS_PKU2U_CERT_FAILURE ((NTSTATUS) 0xC000042FL) +#endif + +#ifndef STATUS_BEYOND_VDL +# define STATUS_BEYOND_VDL ((NTSTATUS) 0xC0000432L) +#endif + +#ifndef STATUS_ENCOUNTERED_WRITE_IN_PROGRESS +# define STATUS_ENCOUNTERED_WRITE_IN_PROGRESS ((NTSTATUS) 0xC0000433L) +#endif + +#ifndef STATUS_PTE_CHANGED +# define STATUS_PTE_CHANGED ((NTSTATUS) 0xC0000434L) +#endif + +#ifndef STATUS_PURGE_FAILED +# define STATUS_PURGE_FAILED ((NTSTATUS) 0xC0000435L) +#endif + +#ifndef STATUS_CRED_REQUIRES_CONFIRMATION +# define STATUS_CRED_REQUIRES_CONFIRMATION ((NTSTATUS) 0xC0000440L) +#endif + +#ifndef STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE +# define STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE ((NTSTATUS) 0xC0000441L) +#endif + +#ifndef STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER +# define STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER ((NTSTATUS) 0xC0000442L) +#endif + +#ifndef STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE +# define STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE ((NTSTATUS) 0xC0000443L) +#endif + +#ifndef STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE +# define STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE ((NTSTATUS) 0xC0000444L) +#endif + +#ifndef STATUS_CS_ENCRYPTION_FILE_NOT_CSE +# define STATUS_CS_ENCRYPTION_FILE_NOT_CSE ((NTSTATUS) 0xC0000445L) +#endif + +#ifndef STATUS_INVALID_LABEL +# define STATUS_INVALID_LABEL ((NTSTATUS) 0xC0000446L) +#endif + +#ifndef STATUS_DRIVER_PROCESS_TERMINATED +# define STATUS_DRIVER_PROCESS_TERMINATED ((NTSTATUS) 0xC0000450L) +#endif + +#ifndef STATUS_AMBIGUOUS_SYSTEM_DEVICE +# define STATUS_AMBIGUOUS_SYSTEM_DEVICE ((NTSTATUS) 0xC0000451L) +#endif + +#ifndef STATUS_SYSTEM_DEVICE_NOT_FOUND +# define STATUS_SYSTEM_DEVICE_NOT_FOUND ((NTSTATUS) 0xC0000452L) +#endif + +#ifndef STATUS_RESTART_BOOT_APPLICATION +# define STATUS_RESTART_BOOT_APPLICATION ((NTSTATUS) 0xC0000453L) +#endif + +#ifndef STATUS_INSUFFICIENT_NVRAM_RESOURCES +# define STATUS_INSUFFICIENT_NVRAM_RESOURCES ((NTSTATUS) 0xC0000454L) +#endif + +#ifndef STATUS_INVALID_TASK_NAME +# define STATUS_INVALID_TASK_NAME ((NTSTATUS) 0xC0000500L) +#endif + +#ifndef STATUS_INVALID_TASK_INDEX +# define STATUS_INVALID_TASK_INDEX ((NTSTATUS) 0xC0000501L) +#endif + +#ifndef STATUS_THREAD_ALREADY_IN_TASK +# define STATUS_THREAD_ALREADY_IN_TASK ((NTSTATUS) 0xC0000502L) +#endif + +#ifndef STATUS_CALLBACK_BYPASS +# define STATUS_CALLBACK_BYPASS ((NTSTATUS) 0xC0000503L) +#endif + +#ifndef STATUS_FAIL_FAST_EXCEPTION +# define STATUS_FAIL_FAST_EXCEPTION ((NTSTATUS) 0xC0000602L) +#endif + +#ifndef STATUS_IMAGE_CERT_REVOKED +# define STATUS_IMAGE_CERT_REVOKED ((NTSTATUS) 0xC0000603L) +#endif + +#ifndef STATUS_PORT_CLOSED +# define STATUS_PORT_CLOSED ((NTSTATUS) 0xC0000700L) +#endif + +#ifndef STATUS_MESSAGE_LOST +# define STATUS_MESSAGE_LOST ((NTSTATUS) 0xC0000701L) +#endif + +#ifndef STATUS_INVALID_MESSAGE +# define STATUS_INVALID_MESSAGE ((NTSTATUS) 0xC0000702L) +#endif + +#ifndef STATUS_REQUEST_CANCELED +# define STATUS_REQUEST_CANCELED ((NTSTATUS) 0xC0000703L) +#endif + +#ifndef STATUS_RECURSIVE_DISPATCH +# define STATUS_RECURSIVE_DISPATCH ((NTSTATUS) 0xC0000704L) +#endif + +#ifndef STATUS_LPC_RECEIVE_BUFFER_EXPECTED +# define STATUS_LPC_RECEIVE_BUFFER_EXPECTED ((NTSTATUS) 0xC0000705L) +#endif + +#ifndef STATUS_LPC_INVALID_CONNECTION_USAGE +# define STATUS_LPC_INVALID_CONNECTION_USAGE ((NTSTATUS) 0xC0000706L) +#endif + +#ifndef STATUS_LPC_REQUESTS_NOT_ALLOWED +# define STATUS_LPC_REQUESTS_NOT_ALLOWED ((NTSTATUS) 0xC0000707L) +#endif + +#ifndef STATUS_RESOURCE_IN_USE +# define STATUS_RESOURCE_IN_USE ((NTSTATUS) 0xC0000708L) +#endif + +#ifndef STATUS_HARDWARE_MEMORY_ERROR +# define STATUS_HARDWARE_MEMORY_ERROR ((NTSTATUS) 0xC0000709L) +#endif + +#ifndef STATUS_THREADPOOL_HANDLE_EXCEPTION +# define STATUS_THREADPOOL_HANDLE_EXCEPTION ((NTSTATUS) 0xC000070AL) +#endif + +#ifndef STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED +# define STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED ((NTSTATUS) 0xC000070BL) +#endif + +#ifndef STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED +# define STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED ((NTSTATUS) 0xC000070CL) +#endif + +#ifndef STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED +# define STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED ((NTSTATUS) 0xC000070DL) +#endif + +#ifndef STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED +# define STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED ((NTSTATUS) 0xC000070EL) +#endif + +#ifndef STATUS_THREADPOOL_RELEASED_DURING_OPERATION +# define STATUS_THREADPOOL_RELEASED_DURING_OPERATION ((NTSTATUS) 0xC000070FL) +#endif + +#ifndef STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING +# define STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING ((NTSTATUS) 0xC0000710L) +#endif + +#ifndef STATUS_APC_RETURNED_WHILE_IMPERSONATING +# define STATUS_APC_RETURNED_WHILE_IMPERSONATING ((NTSTATUS) 0xC0000711L) +#endif + +#ifndef STATUS_PROCESS_IS_PROTECTED +# define STATUS_PROCESS_IS_PROTECTED ((NTSTATUS) 0xC0000712L) +#endif + +#ifndef STATUS_MCA_EXCEPTION +# define STATUS_MCA_EXCEPTION ((NTSTATUS) 0xC0000713L) +#endif + +#ifndef STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE +# define STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE ((NTSTATUS) 0xC0000714L) +#endif + +#ifndef STATUS_SYMLINK_CLASS_DISABLED +# define STATUS_SYMLINK_CLASS_DISABLED ((NTSTATUS) 0xC0000715L) +#endif + +#ifndef STATUS_INVALID_IDN_NORMALIZATION +# define STATUS_INVALID_IDN_NORMALIZATION ((NTSTATUS) 0xC0000716L) +#endif + +#ifndef STATUS_NO_UNICODE_TRANSLATION +# define STATUS_NO_UNICODE_TRANSLATION ((NTSTATUS) 0xC0000717L) +#endif + +#ifndef STATUS_ALREADY_REGISTERED +# define STATUS_ALREADY_REGISTERED ((NTSTATUS) 0xC0000718L) +#endif + +#ifndef STATUS_CONTEXT_MISMATCH +# define STATUS_CONTEXT_MISMATCH ((NTSTATUS) 0xC0000719L) +#endif + +#ifndef STATUS_PORT_ALREADY_HAS_COMPLETION_LIST +# define STATUS_PORT_ALREADY_HAS_COMPLETION_LIST ((NTSTATUS) 0xC000071AL) +#endif + +#ifndef STATUS_CALLBACK_RETURNED_THREAD_PRIORITY +# define STATUS_CALLBACK_RETURNED_THREAD_PRIORITY ((NTSTATUS) 0xC000071BL) +#endif + +#ifndef STATUS_INVALID_THREAD +# define STATUS_INVALID_THREAD ((NTSTATUS) 0xC000071CL) +#endif + +#ifndef STATUS_CALLBACK_RETURNED_TRANSACTION +# define STATUS_CALLBACK_RETURNED_TRANSACTION ((NTSTATUS) 0xC000071DL) +#endif + +#ifndef STATUS_CALLBACK_RETURNED_LDR_LOCK +# define STATUS_CALLBACK_RETURNED_LDR_LOCK ((NTSTATUS) 0xC000071EL) +#endif + +#ifndef STATUS_CALLBACK_RETURNED_LANG +# define STATUS_CALLBACK_RETURNED_LANG ((NTSTATUS) 0xC000071FL) +#endif + +#ifndef STATUS_CALLBACK_RETURNED_PRI_BACK +# define STATUS_CALLBACK_RETURNED_PRI_BACK ((NTSTATUS) 0xC0000720L) +#endif + +#ifndef STATUS_CALLBACK_RETURNED_THREAD_AFFINITY +# define STATUS_CALLBACK_RETURNED_THREAD_AFFINITY ((NTSTATUS) 0xC0000721L) +#endif + +#ifndef STATUS_DISK_REPAIR_DISABLED +# define STATUS_DISK_REPAIR_DISABLED ((NTSTATUS) 0xC0000800L) +#endif + +#ifndef STATUS_DS_DOMAIN_RENAME_IN_PROGRESS +# define STATUS_DS_DOMAIN_RENAME_IN_PROGRESS ((NTSTATUS) 0xC0000801L) +#endif + +#ifndef STATUS_DISK_QUOTA_EXCEEDED +# define STATUS_DISK_QUOTA_EXCEEDED ((NTSTATUS) 0xC0000802L) +#endif + +#ifndef STATUS_DATA_LOST_REPAIR +# define STATUS_DATA_LOST_REPAIR ((NTSTATUS) 0x80000803L) +#endif + +#ifndef STATUS_CONTENT_BLOCKED +# define STATUS_CONTENT_BLOCKED ((NTSTATUS) 0xC0000804L) +#endif + +#ifndef STATUS_BAD_CLUSTERS +# define STATUS_BAD_CLUSTERS ((NTSTATUS) 0xC0000805L) +#endif + +#ifndef STATUS_VOLUME_DIRTY +# define STATUS_VOLUME_DIRTY ((NTSTATUS) 0xC0000806L) +#endif + +#ifndef STATUS_FILE_CHECKED_OUT +# define STATUS_FILE_CHECKED_OUT ((NTSTATUS) 0xC0000901L) +#endif + +#ifndef STATUS_CHECKOUT_REQUIRED +# define STATUS_CHECKOUT_REQUIRED ((NTSTATUS) 0xC0000902L) +#endif + +#ifndef STATUS_BAD_FILE_TYPE +# define STATUS_BAD_FILE_TYPE ((NTSTATUS) 0xC0000903L) +#endif + +#ifndef STATUS_FILE_TOO_LARGE +# define STATUS_FILE_TOO_LARGE ((NTSTATUS) 0xC0000904L) +#endif + +#ifndef STATUS_FORMS_AUTH_REQUIRED +# define STATUS_FORMS_AUTH_REQUIRED ((NTSTATUS) 0xC0000905L) +#endif + +#ifndef STATUS_VIRUS_INFECTED +# define STATUS_VIRUS_INFECTED ((NTSTATUS) 0xC0000906L) +#endif + +#ifndef STATUS_VIRUS_DELETED +# define STATUS_VIRUS_DELETED ((NTSTATUS) 0xC0000907L) +#endif + +#ifndef STATUS_BAD_MCFG_TABLE +# define STATUS_BAD_MCFG_TABLE ((NTSTATUS) 0xC0000908L) +#endif + +#ifndef STATUS_CANNOT_BREAK_OPLOCK +# define STATUS_CANNOT_BREAK_OPLOCK ((NTSTATUS) 0xC0000909L) +#endif + +#ifndef STATUS_WOW_ASSERTION +# define STATUS_WOW_ASSERTION ((NTSTATUS) 0xC0009898L) +#endif + +#ifndef STATUS_INVALID_SIGNATURE +# define STATUS_INVALID_SIGNATURE ((NTSTATUS) 0xC000A000L) +#endif + +#ifndef STATUS_HMAC_NOT_SUPPORTED +# define STATUS_HMAC_NOT_SUPPORTED ((NTSTATUS) 0xC000A001L) +#endif + +#ifndef STATUS_AUTH_TAG_MISMATCH +# define STATUS_AUTH_TAG_MISMATCH ((NTSTATUS) 0xC000A002L) +#endif + +#ifndef STATUS_IPSEC_QUEUE_OVERFLOW +# define STATUS_IPSEC_QUEUE_OVERFLOW ((NTSTATUS) 0xC000A010L) +#endif + +#ifndef STATUS_ND_QUEUE_OVERFLOW +# define STATUS_ND_QUEUE_OVERFLOW ((NTSTATUS) 0xC000A011L) +#endif + +#ifndef STATUS_HOPLIMIT_EXCEEDED +# define STATUS_HOPLIMIT_EXCEEDED ((NTSTATUS) 0xC000A012L) +#endif + +#ifndef STATUS_PROTOCOL_NOT_SUPPORTED +# define STATUS_PROTOCOL_NOT_SUPPORTED ((NTSTATUS) 0xC000A013L) +#endif + +#ifndef STATUS_FASTPATH_REJECTED +# define STATUS_FASTPATH_REJECTED ((NTSTATUS) 0xC000A014L) +#endif + +#ifndef STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED +# define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED ((NTSTATUS) 0xC000A080L) +#endif + +#ifndef STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR +# define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR ((NTSTATUS) 0xC000A081L) +#endif + +#ifndef STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR +# define STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR ((NTSTATUS) 0xC000A082L) +#endif + +#ifndef STATUS_XML_PARSE_ERROR +# define STATUS_XML_PARSE_ERROR ((NTSTATUS) 0xC000A083L) +#endif + +#ifndef STATUS_XMLDSIG_ERROR +# define STATUS_XMLDSIG_ERROR ((NTSTATUS) 0xC000A084L) +#endif + +#ifndef STATUS_WRONG_COMPARTMENT +# define STATUS_WRONG_COMPARTMENT ((NTSTATUS) 0xC000A085L) +#endif + +#ifndef STATUS_AUTHIP_FAILURE +# define STATUS_AUTHIP_FAILURE ((NTSTATUS) 0xC000A086L) +#endif + +#ifndef STATUS_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS +# define STATUS_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS ((NTSTATUS) 0xC000A087L) +#endif + +#ifndef STATUS_DS_OID_NOT_FOUND +# define STATUS_DS_OID_NOT_FOUND ((NTSTATUS) 0xC000A088L) +#endif + +#ifndef STATUS_HASH_NOT_SUPPORTED +# define STATUS_HASH_NOT_SUPPORTED ((NTSTATUS) 0xC000A100L) +#endif + +#ifndef STATUS_HASH_NOT_PRESENT +# define STATUS_HASH_NOT_PRESENT ((NTSTATUS) 0xC000A101L) +#endif + +/* This is not the NTSTATUS_FROM_WIN32 that the DDK provides, because the */ +/* DDK got it wrong! */ +#ifdef NTSTATUS_FROM_WIN32 +# undef NTSTATUS_FROM_WIN32 +#endif +#define NTSTATUS_FROM_WIN32(error) ((NTSTATUS) (error) <= 0 ? \ + ((NTSTATUS) (error)) : ((NTSTATUS) (((error) & 0x0000FFFF) | \ + (FACILITY_NTWIN32 << 16) | ERROR_SEVERITY_WARNING))) + +#ifndef JOB_OBJECT_LIMIT_PROCESS_MEMORY +# define JOB_OBJECT_LIMIT_PROCESS_MEMORY 0x00000100 +#endif +#ifndef JOB_OBJECT_LIMIT_JOB_MEMORY +# define JOB_OBJECT_LIMIT_JOB_MEMORY 0x00000200 +#endif +#ifndef JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION +# define JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION 0x00000400 +#endif +#ifndef JOB_OBJECT_LIMIT_BREAKAWAY_OK +# define JOB_OBJECT_LIMIT_BREAKAWAY_OK 0x00000800 +#endif +#ifndef JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK +# define JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK 0x00001000 +#endif +#ifndef JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE +# define JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE 0x00002000 +#endif + +/* from winternl.h */ +typedef struct _UNICODE_STRING { + USHORT Length; + USHORT MaximumLength; + PWSTR Buffer; +} UNICODE_STRING, *PUNICODE_STRING; + +typedef const UNICODE_STRING *PCUNICODE_STRING; + +/* from ntifs.h */ +#ifndef DEVICE_TYPE +# define DEVICE_TYPE DWORD +#endif + +/* MinGW already has a definition for REPARSE_DATA_BUFFER, but mingw-w64 does + * not. + */ +#if defined(_MSC_VER) || defined(__MINGW64_VERSION_MAJOR) + typedef struct _REPARSE_DATA_BUFFER { + ULONG ReparseTag; + USHORT ReparseDataLength; + USHORT Reserved; + union { + struct { + USHORT SubstituteNameOffset; + USHORT SubstituteNameLength; + USHORT PrintNameOffset; + USHORT PrintNameLength; + ULONG Flags; + WCHAR PathBuffer[1]; + } SymbolicLinkReparseBuffer; + struct { + USHORT SubstituteNameOffset; + USHORT SubstituteNameLength; + USHORT PrintNameOffset; + USHORT PrintNameLength; + WCHAR PathBuffer[1]; + } MountPointReparseBuffer; + struct { + UCHAR DataBuffer[1]; + } GenericReparseBuffer; + } DUMMYUNIONNAME; + } REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER; +#endif + +typedef struct _IO_STATUS_BLOCK { + union { + NTSTATUS Status; + PVOID Pointer; + } DUMMYUNIONNAME; + ULONG_PTR Information; +} IO_STATUS_BLOCK, *PIO_STATUS_BLOCK; + +typedef enum _FILE_INFORMATION_CLASS { + FileDirectoryInformation = 1, + FileFullDirectoryInformation, + FileBothDirectoryInformation, + FileBasicInformation, + FileStandardInformation, + FileInternalInformation, + FileEaInformation, + FileAccessInformation, + FileNameInformation, + FileRenameInformation, + FileLinkInformation, + FileNamesInformation, + FileDispositionInformation, + FilePositionInformation, + FileFullEaInformation, + FileModeInformation, + FileAlignmentInformation, + FileAllInformation, + FileAllocationInformation, + FileEndOfFileInformation, + FileAlternateNameInformation, + FileStreamInformation, + FilePipeInformation, + FilePipeLocalInformation, + FilePipeRemoteInformation, + FileMailslotQueryInformation, + FileMailslotSetInformation, + FileCompressionInformation, + FileObjectIdInformation, + FileCompletionInformation, + FileMoveClusterInformation, + FileQuotaInformation, + FileReparsePointInformation, + FileNetworkOpenInformation, + FileAttributeTagInformation, + FileTrackingInformation, + FileIdBothDirectoryInformation, + FileIdFullDirectoryInformation, + FileValidDataLengthInformation, + FileShortNameInformation, + FileIoCompletionNotificationInformation, + FileIoStatusBlockRangeInformation, + FileIoPriorityHintInformation, + FileSfioReserveInformation, + FileSfioVolumeInformation, + FileHardLinkInformation, + FileProcessIdsUsingFileInformation, + FileNormalizedNameInformation, + FileNetworkPhysicalNameInformation, + FileIdGlobalTxDirectoryInformation, + FileIsRemoteDeviceInformation, + FileAttributeCacheInformation, + FileNumaNodeInformation, + FileStandardLinkInformation, + FileRemoteProtocolInformation, + FileMaximumInformation +} FILE_INFORMATION_CLASS, *PFILE_INFORMATION_CLASS; + +typedef struct _FILE_DIRECTORY_INFORMATION { + ULONG NextEntryOffset; + ULONG FileIndex; + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER EndOfFile; + LARGE_INTEGER AllocationSize; + ULONG FileAttributes; + ULONG FileNameLength; + WCHAR FileName[1]; +} FILE_DIRECTORY_INFORMATION, *PFILE_DIRECTORY_INFORMATION; + +typedef struct _FILE_BOTH_DIR_INFORMATION { + ULONG NextEntryOffset; + ULONG FileIndex; + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + LARGE_INTEGER EndOfFile; + LARGE_INTEGER AllocationSize; + ULONG FileAttributes; + ULONG FileNameLength; + ULONG EaSize; + CCHAR ShortNameLength; + WCHAR ShortName[12]; + WCHAR FileName[1]; +} FILE_BOTH_DIR_INFORMATION, *PFILE_BOTH_DIR_INFORMATION; + +typedef struct _FILE_BASIC_INFORMATION { + LARGE_INTEGER CreationTime; + LARGE_INTEGER LastAccessTime; + LARGE_INTEGER LastWriteTime; + LARGE_INTEGER ChangeTime; + DWORD FileAttributes; +} FILE_BASIC_INFORMATION, *PFILE_BASIC_INFORMATION; + +typedef struct _FILE_STANDARD_INFORMATION { + LARGE_INTEGER AllocationSize; + LARGE_INTEGER EndOfFile; + ULONG NumberOfLinks; + BOOLEAN DeletePending; + BOOLEAN Directory; +} FILE_STANDARD_INFORMATION, *PFILE_STANDARD_INFORMATION; + +typedef struct _FILE_INTERNAL_INFORMATION { + LARGE_INTEGER IndexNumber; +} FILE_INTERNAL_INFORMATION, *PFILE_INTERNAL_INFORMATION; + +typedef struct _FILE_EA_INFORMATION { + ULONG EaSize; +} FILE_EA_INFORMATION, *PFILE_EA_INFORMATION; + +typedef struct _FILE_ACCESS_INFORMATION { + ACCESS_MASK AccessFlags; +} FILE_ACCESS_INFORMATION, *PFILE_ACCESS_INFORMATION; + +typedef struct _FILE_POSITION_INFORMATION { + LARGE_INTEGER CurrentByteOffset; +} FILE_POSITION_INFORMATION, *PFILE_POSITION_INFORMATION; + +typedef struct _FILE_MODE_INFORMATION { + ULONG Mode; +} FILE_MODE_INFORMATION, *PFILE_MODE_INFORMATION; + +typedef struct _FILE_ALIGNMENT_INFORMATION { + ULONG AlignmentRequirement; +} FILE_ALIGNMENT_INFORMATION, *PFILE_ALIGNMENT_INFORMATION; + +typedef struct _FILE_NAME_INFORMATION { + ULONG FileNameLength; + WCHAR FileName[1]; +} FILE_NAME_INFORMATION, *PFILE_NAME_INFORMATION; + +typedef struct _FILE_END_OF_FILE_INFORMATION { + LARGE_INTEGER EndOfFile; +} FILE_END_OF_FILE_INFORMATION, *PFILE_END_OF_FILE_INFORMATION; + +typedef struct _FILE_ALL_INFORMATION { + FILE_BASIC_INFORMATION BasicInformation; + FILE_STANDARD_INFORMATION StandardInformation; + FILE_INTERNAL_INFORMATION InternalInformation; + FILE_EA_INFORMATION EaInformation; + FILE_ACCESS_INFORMATION AccessInformation; + FILE_POSITION_INFORMATION PositionInformation; + FILE_MODE_INFORMATION ModeInformation; + FILE_ALIGNMENT_INFORMATION AlignmentInformation; + FILE_NAME_INFORMATION NameInformation; +} FILE_ALL_INFORMATION, *PFILE_ALL_INFORMATION; + +typedef struct _FILE_DISPOSITION_INFORMATION { + BOOLEAN DeleteFile; +} FILE_DISPOSITION_INFORMATION, *PFILE_DISPOSITION_INFORMATION; + +typedef struct _FILE_PIPE_LOCAL_INFORMATION { + ULONG NamedPipeType; + ULONG NamedPipeConfiguration; + ULONG MaximumInstances; + ULONG CurrentInstances; + ULONG InboundQuota; + ULONG ReadDataAvailable; + ULONG OutboundQuota; + ULONG WriteQuotaAvailable; + ULONG NamedPipeState; + ULONG NamedPipeEnd; +} FILE_PIPE_LOCAL_INFORMATION, *PFILE_PIPE_LOCAL_INFORMATION; + +#define FILE_SYNCHRONOUS_IO_ALERT 0x00000010 +#define FILE_SYNCHRONOUS_IO_NONALERT 0x00000020 + +typedef enum _FS_INFORMATION_CLASS { + FileFsVolumeInformation = 1, + FileFsLabelInformation = 2, + FileFsSizeInformation = 3, + FileFsDeviceInformation = 4, + FileFsAttributeInformation = 5, + FileFsControlInformation = 6, + FileFsFullSizeInformation = 7, + FileFsObjectIdInformation = 8, + FileFsDriverPathInformation = 9, + FileFsVolumeFlagsInformation = 10, + FileFsSectorSizeInformation = 11 +} FS_INFORMATION_CLASS, *PFS_INFORMATION_CLASS; + +typedef struct _FILE_FS_VOLUME_INFORMATION { + LARGE_INTEGER VolumeCreationTime; + ULONG VolumeSerialNumber; + ULONG VolumeLabelLength; + BOOLEAN SupportsObjects; + WCHAR VolumeLabel[1]; +} FILE_FS_VOLUME_INFORMATION, *PFILE_FS_VOLUME_INFORMATION; + +typedef struct _FILE_FS_LABEL_INFORMATION { + ULONG VolumeLabelLength; + WCHAR VolumeLabel[1]; +} FILE_FS_LABEL_INFORMATION, *PFILE_FS_LABEL_INFORMATION; + +typedef struct _FILE_FS_SIZE_INFORMATION { + LARGE_INTEGER TotalAllocationUnits; + LARGE_INTEGER AvailableAllocationUnits; + ULONG SectorsPerAllocationUnit; + ULONG BytesPerSector; +} FILE_FS_SIZE_INFORMATION, *PFILE_FS_SIZE_INFORMATION; + +typedef struct _FILE_FS_DEVICE_INFORMATION { + DEVICE_TYPE DeviceType; + ULONG Characteristics; +} FILE_FS_DEVICE_INFORMATION, *PFILE_FS_DEVICE_INFORMATION; + +typedef struct _FILE_FS_ATTRIBUTE_INFORMATION { + ULONG FileSystemAttributes; + LONG MaximumComponentNameLength; + ULONG FileSystemNameLength; + WCHAR FileSystemName[1]; +} FILE_FS_ATTRIBUTE_INFORMATION, *PFILE_FS_ATTRIBUTE_INFORMATION; + +typedef struct _FILE_FS_CONTROL_INFORMATION { + LARGE_INTEGER FreeSpaceStartFiltering; + LARGE_INTEGER FreeSpaceThreshold; + LARGE_INTEGER FreeSpaceStopFiltering; + LARGE_INTEGER DefaultQuotaThreshold; + LARGE_INTEGER DefaultQuotaLimit; + ULONG FileSystemControlFlags; +} FILE_FS_CONTROL_INFORMATION, *PFILE_FS_CONTROL_INFORMATION; + +typedef struct _FILE_FS_FULL_SIZE_INFORMATION { + LARGE_INTEGER TotalAllocationUnits; + LARGE_INTEGER CallerAvailableAllocationUnits; + LARGE_INTEGER ActualAvailableAllocationUnits; + ULONG SectorsPerAllocationUnit; + ULONG BytesPerSector; +} FILE_FS_FULL_SIZE_INFORMATION, *PFILE_FS_FULL_SIZE_INFORMATION; + +typedef struct _FILE_FS_OBJECTID_INFORMATION { + UCHAR ObjectId[16]; + UCHAR ExtendedInfo[48]; +} FILE_FS_OBJECTID_INFORMATION, *PFILE_FS_OBJECTID_INFORMATION; + +typedef struct _FILE_FS_DRIVER_PATH_INFORMATION { + BOOLEAN DriverInPath; + ULONG DriverNameLength; + WCHAR DriverName[1]; +} FILE_FS_DRIVER_PATH_INFORMATION, *PFILE_FS_DRIVER_PATH_INFORMATION; + +typedef struct _FILE_FS_VOLUME_FLAGS_INFORMATION { + ULONG Flags; +} FILE_FS_VOLUME_FLAGS_INFORMATION, *PFILE_FS_VOLUME_FLAGS_INFORMATION; + +typedef struct _FILE_FS_SECTOR_SIZE_INFORMATION { + ULONG LogicalBytesPerSector; + ULONG PhysicalBytesPerSectorForAtomicity; + ULONG PhysicalBytesPerSectorForPerformance; + ULONG FileSystemEffectivePhysicalBytesPerSectorForAtomicity; + ULONG Flags; + ULONG ByteOffsetForSectorAlignment; + ULONG ByteOffsetForPartitionAlignment; +} FILE_FS_SECTOR_SIZE_INFORMATION, *PFILE_FS_SECTOR_SIZE_INFORMATION; + +typedef struct _SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION { + LARGE_INTEGER IdleTime; + LARGE_INTEGER KernelTime; + LARGE_INTEGER UserTime; + LARGE_INTEGER DpcTime; + LARGE_INTEGER InterruptTime; + ULONG InterruptCount; +} SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION, *PSYSTEM_PROCESSOR_PERFORMANCE_INFORMATION; + +#ifndef SystemProcessorPerformanceInformation +# define SystemProcessorPerformanceInformation 8 +#endif + +#ifndef FILE_DEVICE_FILE_SYSTEM +# define FILE_DEVICE_FILE_SYSTEM 0x00000009 +#endif + +#ifndef FILE_DEVICE_NETWORK +# define FILE_DEVICE_NETWORK 0x00000012 +#endif + +#ifndef METHOD_BUFFERED +# define METHOD_BUFFERED 0 +#endif + +#ifndef METHOD_IN_DIRECT +# define METHOD_IN_DIRECT 1 +#endif + +#ifndef METHOD_OUT_DIRECT +# define METHOD_OUT_DIRECT 2 +#endif + +#ifndef METHOD_NEITHER +#define METHOD_NEITHER 3 +#endif + +#ifndef METHOD_DIRECT_TO_HARDWARE +# define METHOD_DIRECT_TO_HARDWARE METHOD_IN_DIRECT +#endif + +#ifndef METHOD_DIRECT_FROM_HARDWARE +# define METHOD_DIRECT_FROM_HARDWARE METHOD_OUT_DIRECT +#endif + +#ifndef FILE_ANY_ACCESS +# define FILE_ANY_ACCESS 0 +#endif + +#ifndef FILE_SPECIAL_ACCESS +# define FILE_SPECIAL_ACCESS (FILE_ANY_ACCESS) +#endif + +#ifndef FILE_READ_ACCESS +# define FILE_READ_ACCESS 0x0001 +#endif + +#ifndef FILE_WRITE_ACCESS +# define FILE_WRITE_ACCESS 0x0002 +#endif + +#ifndef CTL_CODE +# define CTL_CODE(device_type, function, method, access) \ + (((device_type) << 16) | ((access) << 14) | ((function) << 2) | (method)) +#endif + +#ifndef FSCTL_SET_REPARSE_POINT +# define FSCTL_SET_REPARSE_POINT CTL_CODE(FILE_DEVICE_FILE_SYSTEM, \ + 41, \ + METHOD_BUFFERED, \ + FILE_SPECIAL_ACCESS) +#endif + +#ifndef FSCTL_GET_REPARSE_POINT +# define FSCTL_GET_REPARSE_POINT CTL_CODE(FILE_DEVICE_FILE_SYSTEM, \ + 42, \ + METHOD_BUFFERED, \ + FILE_ANY_ACCESS) +#endif + +#ifndef FSCTL_DELETE_REPARSE_POINT +# define FSCTL_DELETE_REPARSE_POINT CTL_CODE(FILE_DEVICE_FILE_SYSTEM, \ + 43, \ + METHOD_BUFFERED, \ + FILE_SPECIAL_ACCESS) +#endif + +#ifndef IO_REPARSE_TAG_SYMLINK +# define IO_REPARSE_TAG_SYMLINK (0xA000000CL) +#endif + +typedef VOID (NTAPI *PIO_APC_ROUTINE) + (PVOID ApcContext, + PIO_STATUS_BLOCK IoStatusBlock, + ULONG Reserved); + +typedef ULONG (NTAPI *sRtlNtStatusToDosError) + (NTSTATUS Status); + +typedef NTSTATUS (NTAPI *sNtDeviceIoControlFile) + (HANDLE FileHandle, + HANDLE Event, + PIO_APC_ROUTINE ApcRoutine, + PVOID ApcContext, + PIO_STATUS_BLOCK IoStatusBlock, + ULONG IoControlCode, + PVOID InputBuffer, + ULONG InputBufferLength, + PVOID OutputBuffer, + ULONG OutputBufferLength); + +typedef NTSTATUS (NTAPI *sNtQueryInformationFile) + (HANDLE FileHandle, + PIO_STATUS_BLOCK IoStatusBlock, + PVOID FileInformation, + ULONG Length, + FILE_INFORMATION_CLASS FileInformationClass); + +typedef NTSTATUS (NTAPI *sNtSetInformationFile) + (HANDLE FileHandle, + PIO_STATUS_BLOCK IoStatusBlock, + PVOID FileInformation, + ULONG Length, + FILE_INFORMATION_CLASS FileInformationClass); + +typedef NTSTATUS (NTAPI *sNtQueryVolumeInformationFile) + (HANDLE FileHandle, + PIO_STATUS_BLOCK IoStatusBlock, + PVOID FsInformation, + ULONG Length, + FS_INFORMATION_CLASS FsInformationClass); + +typedef NTSTATUS (NTAPI *sNtQuerySystemInformation) + (UINT SystemInformationClass, + PVOID SystemInformation, + ULONG SystemInformationLength, + PULONG ReturnLength); + +typedef NTSTATUS (NTAPI *sNtQueryDirectoryFile) + (HANDLE FileHandle, + HANDLE Event, + PIO_APC_ROUTINE ApcRoutine, + PVOID ApcContext, + PIO_STATUS_BLOCK IoStatusBlock, + PVOID FileInformation, + ULONG Length, + FILE_INFORMATION_CLASS FileInformationClass, + BOOLEAN ReturnSingleEntry, + PUNICODE_STRING FileName, + BOOLEAN RestartScan + ); + +/* + * Kernel32 headers + */ +#ifndef FILE_SKIP_COMPLETION_PORT_ON_SUCCESS +# define FILE_SKIP_COMPLETION_PORT_ON_SUCCESS 0x1 +#endif + +#ifndef FILE_SKIP_SET_EVENT_ON_HANDLE +# define FILE_SKIP_SET_EVENT_ON_HANDLE 0x2 +#endif + +#ifndef SYMBOLIC_LINK_FLAG_DIRECTORY +# define SYMBOLIC_LINK_FLAG_DIRECTORY 0x1 +#endif + +#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR) + typedef struct _OVERLAPPED_ENTRY { + ULONG_PTR lpCompletionKey; + LPOVERLAPPED lpOverlapped; + ULONG_PTR Internal; + DWORD dwNumberOfBytesTransferred; + } OVERLAPPED_ENTRY, *LPOVERLAPPED_ENTRY; +#endif + +/* from wincon.h */ +#ifndef ENABLE_INSERT_MODE +# define ENABLE_INSERT_MODE 0x20 +#endif + +#ifndef ENABLE_QUICK_EDIT_MODE +# define ENABLE_QUICK_EDIT_MODE 0x40 +#endif + +#ifndef ENABLE_EXTENDED_FLAGS +# define ENABLE_EXTENDED_FLAGS 0x80 +#endif + +/* from winerror.h */ +#ifndef ERROR_SYMLINK_NOT_SUPPORTED +# define ERROR_SYMLINK_NOT_SUPPORTED 1464 +#endif + +#ifndef ERROR_MUI_FILE_NOT_FOUND +# define ERROR_MUI_FILE_NOT_FOUND 15100 +#endif + +#ifndef ERROR_MUI_INVALID_FILE +# define ERROR_MUI_INVALID_FILE 15101 +#endif + +#ifndef ERROR_MUI_INVALID_RC_CONFIG +# define ERROR_MUI_INVALID_RC_CONFIG 15102 +#endif + +#ifndef ERROR_MUI_INVALID_LOCALE_NAME +# define ERROR_MUI_INVALID_LOCALE_NAME 15103 +#endif + +#ifndef ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME +# define ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME 15104 +#endif + +#ifndef ERROR_MUI_FILE_NOT_LOADED +# define ERROR_MUI_FILE_NOT_LOADED 15105 +#endif + +typedef BOOL (WINAPI *sGetQueuedCompletionStatusEx) + (HANDLE CompletionPort, + LPOVERLAPPED_ENTRY lpCompletionPortEntries, + ULONG ulCount, + PULONG ulNumEntriesRemoved, + DWORD dwMilliseconds, + BOOL fAlertable); + +typedef BOOL (WINAPI* sSetFileCompletionNotificationModes) + (HANDLE FileHandle, + UCHAR Flags); + +typedef BOOLEAN (WINAPI* sCreateSymbolicLinkW) + (LPCWSTR lpSymlinkFileName, + LPCWSTR lpTargetFileName, + DWORD dwFlags); + +typedef BOOL (WINAPI* sCancelIoEx) + (HANDLE hFile, + LPOVERLAPPED lpOverlapped); + +typedef VOID (WINAPI* sInitializeConditionVariable) + (PCONDITION_VARIABLE ConditionVariable); + +typedef BOOL (WINAPI* sSleepConditionVariableCS) + (PCONDITION_VARIABLE ConditionVariable, + PCRITICAL_SECTION CriticalSection, + DWORD dwMilliseconds); + +typedef BOOL (WINAPI* sSleepConditionVariableSRW) + (PCONDITION_VARIABLE ConditionVariable, + PSRWLOCK SRWLock, + DWORD dwMilliseconds, + ULONG Flags); + +typedef VOID (WINAPI* sWakeAllConditionVariable) + (PCONDITION_VARIABLE ConditionVariable); + +typedef VOID (WINAPI* sWakeConditionVariable) + (PCONDITION_VARIABLE ConditionVariable); + +typedef BOOL (WINAPI* sCancelSynchronousIo) + (HANDLE hThread); + +typedef DWORD (WINAPI* sGetFinalPathNameByHandleW) + (HANDLE hFile, + LPWSTR lpszFilePath, + DWORD cchFilePath, + DWORD dwFlags); + +/* from powerbase.h */ +#ifndef DEVICE_NOTIFY_CALLBACK +# define DEVICE_NOTIFY_CALLBACK 2 +#endif + +#ifndef PBT_APMRESUMEAUTOMATIC +# define PBT_APMRESUMEAUTOMATIC 18 +#endif + +#ifndef PBT_APMRESUMESUSPEND +# define PBT_APMRESUMESUSPEND 7 +#endif + +typedef ULONG CALLBACK _DEVICE_NOTIFY_CALLBACK_ROUTINE( + PVOID Context, + ULONG Type, + PVOID Setting +); +typedef _DEVICE_NOTIFY_CALLBACK_ROUTINE* _PDEVICE_NOTIFY_CALLBACK_ROUTINE; + +typedef struct _DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS { + _PDEVICE_NOTIFY_CALLBACK_ROUTINE Callback; + PVOID Context; +} _DEVICE_NOTIFY_SUBSCRIBE_PARAMETERS, *_PDEVICE_NOTIFY_SUBSCRIBE_PARAMETERS; + +typedef PVOID _HPOWERNOTIFY; +typedef _HPOWERNOTIFY *_PHPOWERNOTIFY; + +typedef DWORD (WINAPI *sPowerRegisterSuspendResumeNotification) + (DWORD Flags, + HANDLE Recipient, + _PHPOWERNOTIFY RegistrationHandle); + + +/* Ntdll function pointers */ +extern sRtlNtStatusToDosError pRtlNtStatusToDosError; +extern sNtDeviceIoControlFile pNtDeviceIoControlFile; +extern sNtQueryInformationFile pNtQueryInformationFile; +extern sNtSetInformationFile pNtSetInformationFile; +extern sNtQueryVolumeInformationFile pNtQueryVolumeInformationFile; +extern sNtQueryDirectoryFile pNtQueryDirectoryFile; +extern sNtQuerySystemInformation pNtQuerySystemInformation; + + +/* Kernel32 function pointers */ +extern sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx; +extern sSetFileCompletionNotificationModes pSetFileCompletionNotificationModes; +extern sCreateSymbolicLinkW pCreateSymbolicLinkW; +extern sCancelIoEx pCancelIoEx; +extern sInitializeConditionVariable pInitializeConditionVariable; +extern sSleepConditionVariableCS pSleepConditionVariableCS; +extern sSleepConditionVariableSRW pSleepConditionVariableSRW; +extern sWakeAllConditionVariable pWakeAllConditionVariable; +extern sWakeConditionVariable pWakeConditionVariable; +extern sCancelSynchronousIo pCancelSynchronousIo; +extern sGetFinalPathNameByHandleW pGetFinalPathNameByHandleW; + + +/* Powrprof.dll function pointer */ +extern sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotification; + +#endif /* UV_WIN_WINAPI_H_ */ diff --git a/deps/libuv/src/win/winsock.c b/deps/libuv/src/win/winsock.c new file mode 100644 index 00000000..d2e667e9 --- /dev/null +++ b/deps/libuv/src/win/winsock.c @@ -0,0 +1,561 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include + +#include "uv.h" +#include "internal.h" + + +/* Whether there are any non-IFS LSPs stacked on TCP */ +int uv_tcp_non_ifs_lsp_ipv4; +int uv_tcp_non_ifs_lsp_ipv6; + +/* Ip address used to bind to any port at any interface */ +struct sockaddr_in uv_addr_ip4_any_; +struct sockaddr_in6 uv_addr_ip6_any_; + + +/* + * Retrieves the pointer to a winsock extension function. + */ +static BOOL uv_get_extension_function(SOCKET socket, GUID guid, + void **target) { + int result; + DWORD bytes; + + result = WSAIoctl(socket, + SIO_GET_EXTENSION_FUNCTION_POINTER, + &guid, + sizeof(guid), + (void*)target, + sizeof(*target), + &bytes, + NULL, + NULL); + + if (result == SOCKET_ERROR) { + *target = NULL; + return FALSE; + } else { + return TRUE; + } +} + + +BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target) { + const GUID wsaid_acceptex = WSAID_ACCEPTEX; + return uv_get_extension_function(socket, wsaid_acceptex, (void**)target); +} + + +BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target) { + const GUID wsaid_connectex = WSAID_CONNECTEX; + return uv_get_extension_function(socket, wsaid_connectex, (void**)target); +} + + +static int error_means_no_support(DWORD error) { + return error == WSAEPROTONOSUPPORT || error == WSAESOCKTNOSUPPORT || + error == WSAEPFNOSUPPORT || error == WSAEAFNOSUPPORT; +} + + +void uv_winsock_init() { + WSADATA wsa_data; + int errorno; + SOCKET dummy; + WSAPROTOCOL_INFOW protocol_info; + int opt_len; + + /* Initialize winsock */ + errorno = WSAStartup(MAKEWORD(2, 2), &wsa_data); + if (errorno != 0) { + uv_fatal_error(errorno, "WSAStartup"); + } + + /* Set implicit binding address used by connectEx */ + if (uv_ip4_addr("0.0.0.0", 0, &uv_addr_ip4_any_)) { + abort(); + } + + if (uv_ip6_addr("::", 0, &uv_addr_ip6_any_)) { + abort(); + } + + /* Detect non-IFS LSPs */ + dummy = socket(AF_INET, SOCK_STREAM, IPPROTO_IP); + + if (dummy != INVALID_SOCKET) { + opt_len = (int) sizeof protocol_info; + if (getsockopt(dummy, + SOL_SOCKET, + SO_PROTOCOL_INFOW, + (char*) &protocol_info, + &opt_len) == SOCKET_ERROR) + uv_fatal_error(WSAGetLastError(), "getsockopt"); + + if (!(protocol_info.dwServiceFlags1 & XP1_IFS_HANDLES)) + uv_tcp_non_ifs_lsp_ipv4 = 1; + + if (closesocket(dummy) == SOCKET_ERROR) + uv_fatal_error(WSAGetLastError(), "closesocket"); + + } else if (!error_means_no_support(WSAGetLastError())) { + /* Any error other than "socket type not supported" is fatal. */ + uv_fatal_error(WSAGetLastError(), "socket"); + } + + /* Detect IPV6 support and non-IFS LSPs */ + dummy = socket(AF_INET6, SOCK_STREAM, IPPROTO_IP); + + if (dummy != INVALID_SOCKET) { + opt_len = (int) sizeof protocol_info; + if (getsockopt(dummy, + SOL_SOCKET, + SO_PROTOCOL_INFOW, + (char*) &protocol_info, + &opt_len) == SOCKET_ERROR) + uv_fatal_error(WSAGetLastError(), "getsockopt"); + + if (!(protocol_info.dwServiceFlags1 & XP1_IFS_HANDLES)) + uv_tcp_non_ifs_lsp_ipv6 = 1; + + if (closesocket(dummy) == SOCKET_ERROR) + uv_fatal_error(WSAGetLastError(), "closesocket"); + + } else if (!error_means_no_support(WSAGetLastError())) { + /* Any error other than "socket type not supported" is fatal. */ + uv_fatal_error(WSAGetLastError(), "socket"); + } +} + + +int uv_ntstatus_to_winsock_error(NTSTATUS status) { + switch (status) { + case STATUS_SUCCESS: + return ERROR_SUCCESS; + + case STATUS_PENDING: + return ERROR_IO_PENDING; + + case STATUS_INVALID_HANDLE: + case STATUS_OBJECT_TYPE_MISMATCH: + return WSAENOTSOCK; + + case STATUS_INSUFFICIENT_RESOURCES: + case STATUS_PAGEFILE_QUOTA: + case STATUS_COMMITMENT_LIMIT: + case STATUS_WORKING_SET_QUOTA: + case STATUS_NO_MEMORY: + case STATUS_QUOTA_EXCEEDED: + case STATUS_TOO_MANY_PAGING_FILES: + case STATUS_REMOTE_RESOURCES: + return WSAENOBUFS; + + case STATUS_TOO_MANY_ADDRESSES: + case STATUS_SHARING_VIOLATION: + case STATUS_ADDRESS_ALREADY_EXISTS: + return WSAEADDRINUSE; + + case STATUS_LINK_TIMEOUT: + case STATUS_IO_TIMEOUT: + case STATUS_TIMEOUT: + return WSAETIMEDOUT; + + case STATUS_GRACEFUL_DISCONNECT: + return WSAEDISCON; + + case STATUS_REMOTE_DISCONNECT: + case STATUS_CONNECTION_RESET: + case STATUS_LINK_FAILED: + case STATUS_CONNECTION_DISCONNECTED: + case STATUS_PORT_UNREACHABLE: + case STATUS_HOPLIMIT_EXCEEDED: + return WSAECONNRESET; + + case STATUS_LOCAL_DISCONNECT: + case STATUS_TRANSACTION_ABORTED: + case STATUS_CONNECTION_ABORTED: + return WSAECONNABORTED; + + case STATUS_BAD_NETWORK_PATH: + case STATUS_NETWORK_UNREACHABLE: + case STATUS_PROTOCOL_UNREACHABLE: + return WSAENETUNREACH; + + case STATUS_HOST_UNREACHABLE: + return WSAEHOSTUNREACH; + + case STATUS_CANCELLED: + case STATUS_REQUEST_ABORTED: + return WSAEINTR; + + case STATUS_BUFFER_OVERFLOW: + case STATUS_INVALID_BUFFER_SIZE: + return WSAEMSGSIZE; + + case STATUS_BUFFER_TOO_SMALL: + case STATUS_ACCESS_VIOLATION: + return WSAEFAULT; + + case STATUS_DEVICE_NOT_READY: + case STATUS_REQUEST_NOT_ACCEPTED: + return WSAEWOULDBLOCK; + + case STATUS_INVALID_NETWORK_RESPONSE: + case STATUS_NETWORK_BUSY: + case STATUS_NO_SUCH_DEVICE: + case STATUS_NO_SUCH_FILE: + case STATUS_OBJECT_PATH_NOT_FOUND: + case STATUS_OBJECT_NAME_NOT_FOUND: + case STATUS_UNEXPECTED_NETWORK_ERROR: + return WSAENETDOWN; + + case STATUS_INVALID_CONNECTION: + return WSAENOTCONN; + + case STATUS_REMOTE_NOT_LISTENING: + case STATUS_CONNECTION_REFUSED: + return WSAECONNREFUSED; + + case STATUS_PIPE_DISCONNECTED: + return WSAESHUTDOWN; + + case STATUS_CONFLICTING_ADDRESSES: + case STATUS_INVALID_ADDRESS: + case STATUS_INVALID_ADDRESS_COMPONENT: + return WSAEADDRNOTAVAIL; + + case STATUS_NOT_SUPPORTED: + case STATUS_NOT_IMPLEMENTED: + return WSAEOPNOTSUPP; + + case STATUS_ACCESS_DENIED: + return WSAEACCES; + + default: + if ((status & (FACILITY_NTWIN32 << 16)) == (FACILITY_NTWIN32 << 16) && + (status & (ERROR_SEVERITY_ERROR | ERROR_SEVERITY_WARNING))) { + /* It's a windows error that has been previously mapped to an */ + /* ntstatus code. */ + return (DWORD) (status & 0xffff); + } else { + /* The default fallback for unmappable ntstatus codes. */ + return WSAEINVAL; + } + } +} + + +/* + * This function provides a workaround for a bug in the winsock implementation + * of WSARecv. The problem is that when SetFileCompletionNotificationModes is + * used to avoid IOCP notifications of completed reads, WSARecv does not + * reliably indicate whether we can expect a completion package to be posted + * when the receive buffer is smaller than the received datagram. + * + * However it is desirable to use SetFileCompletionNotificationModes because + * it yields a massive performance increase. + * + * This function provides a workaround for that bug, but it only works for the + * specific case that we need it for. E.g. it assumes that the "avoid iocp" + * bit has been set, and supports only overlapped operation. It also requires + * the user to use the default msafd driver, doesn't work when other LSPs are + * stacked on top of it. + */ +int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers, + DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped, + LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine) { + NTSTATUS status; + void* apc_context; + IO_STATUS_BLOCK* iosb = (IO_STATUS_BLOCK*) &overlapped->Internal; + AFD_RECV_INFO info; + DWORD error; + + if (overlapped == NULL || completion_routine != NULL) { + WSASetLastError(WSAEINVAL); + return SOCKET_ERROR; + } + + info.BufferArray = buffers; + info.BufferCount = buffer_count; + info.AfdFlags = AFD_OVERLAPPED; + info.TdiFlags = TDI_RECEIVE_NORMAL; + + if (*flags & MSG_PEEK) { + info.TdiFlags |= TDI_RECEIVE_PEEK; + } + + if (*flags & MSG_PARTIAL) { + info.TdiFlags |= TDI_RECEIVE_PARTIAL; + } + + if (!((intptr_t) overlapped->hEvent & 1)) { + apc_context = (void*) overlapped; + } else { + apc_context = NULL; + } + + iosb->Status = STATUS_PENDING; + iosb->Pointer = 0; + + status = pNtDeviceIoControlFile((HANDLE) socket, + overlapped->hEvent, + NULL, + apc_context, + iosb, + IOCTL_AFD_RECEIVE, + &info, + sizeof(info), + NULL, + 0); + + *flags = 0; + *bytes = (DWORD) iosb->Information; + + switch (status) { + case STATUS_SUCCESS: + error = ERROR_SUCCESS; + break; + + case STATUS_PENDING: + error = WSA_IO_PENDING; + break; + + case STATUS_BUFFER_OVERFLOW: + error = WSAEMSGSIZE; + break; + + case STATUS_RECEIVE_EXPEDITED: + error = ERROR_SUCCESS; + *flags = MSG_OOB; + break; + + case STATUS_RECEIVE_PARTIAL_EXPEDITED: + error = ERROR_SUCCESS; + *flags = MSG_PARTIAL | MSG_OOB; + break; + + case STATUS_RECEIVE_PARTIAL: + error = ERROR_SUCCESS; + *flags = MSG_PARTIAL; + break; + + default: + error = uv_ntstatus_to_winsock_error(status); + break; + } + + WSASetLastError(error); + + if (error == ERROR_SUCCESS) { + return 0; + } else { + return SOCKET_ERROR; + } +} + + +/* See description of uv_wsarecv_workaround. */ +int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers, + DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr, + int* addr_len, WSAOVERLAPPED *overlapped, + LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine) { + NTSTATUS status; + void* apc_context; + IO_STATUS_BLOCK* iosb = (IO_STATUS_BLOCK*) &overlapped->Internal; + AFD_RECV_DATAGRAM_INFO info; + DWORD error; + + if (overlapped == NULL || addr == NULL || addr_len == NULL || + completion_routine != NULL) { + WSASetLastError(WSAEINVAL); + return SOCKET_ERROR; + } + + info.BufferArray = buffers; + info.BufferCount = buffer_count; + info.AfdFlags = AFD_OVERLAPPED; + info.TdiFlags = TDI_RECEIVE_NORMAL; + info.Address = addr; + info.AddressLength = addr_len; + + if (*flags & MSG_PEEK) { + info.TdiFlags |= TDI_RECEIVE_PEEK; + } + + if (*flags & MSG_PARTIAL) { + info.TdiFlags |= TDI_RECEIVE_PARTIAL; + } + + if (!((intptr_t) overlapped->hEvent & 1)) { + apc_context = (void*) overlapped; + } else { + apc_context = NULL; + } + + iosb->Status = STATUS_PENDING; + iosb->Pointer = 0; + + status = pNtDeviceIoControlFile((HANDLE) socket, + overlapped->hEvent, + NULL, + apc_context, + iosb, + IOCTL_AFD_RECEIVE_DATAGRAM, + &info, + sizeof(info), + NULL, + 0); + + *flags = 0; + *bytes = (DWORD) iosb->Information; + + switch (status) { + case STATUS_SUCCESS: + error = ERROR_SUCCESS; + break; + + case STATUS_PENDING: + error = WSA_IO_PENDING; + break; + + case STATUS_BUFFER_OVERFLOW: + error = WSAEMSGSIZE; + break; + + case STATUS_RECEIVE_EXPEDITED: + error = ERROR_SUCCESS; + *flags = MSG_OOB; + break; + + case STATUS_RECEIVE_PARTIAL_EXPEDITED: + error = ERROR_SUCCESS; + *flags = MSG_PARTIAL | MSG_OOB; + break; + + case STATUS_RECEIVE_PARTIAL: + error = ERROR_SUCCESS; + *flags = MSG_PARTIAL; + break; + + default: + error = uv_ntstatus_to_winsock_error(status); + break; + } + + WSASetLastError(error); + + if (error == ERROR_SUCCESS) { + return 0; + } else { + return SOCKET_ERROR; + } +} + + +int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in, + AFD_POLL_INFO* info_out, OVERLAPPED* overlapped) { + IO_STATUS_BLOCK iosb; + IO_STATUS_BLOCK* iosb_ptr; + HANDLE event = NULL; + void* apc_context; + NTSTATUS status; + DWORD error; + + if (overlapped != NULL) { + /* Overlapped operation. */ + iosb_ptr = (IO_STATUS_BLOCK*) &overlapped->Internal; + event = overlapped->hEvent; + + /* Do not report iocp completion if hEvent is tagged. */ + if ((uintptr_t) event & 1) { + event = (HANDLE)((uintptr_t) event & ~(uintptr_t) 1); + apc_context = NULL; + } else { + apc_context = overlapped; + } + + } else { + /* Blocking operation. */ + iosb_ptr = &iosb; + event = CreateEvent(NULL, FALSE, FALSE, NULL); + if (event == NULL) { + return SOCKET_ERROR; + } + apc_context = NULL; + } + + iosb_ptr->Status = STATUS_PENDING; + status = pNtDeviceIoControlFile((HANDLE) socket, + event, + NULL, + apc_context, + iosb_ptr, + IOCTL_AFD_POLL, + info_in, + sizeof *info_in, + info_out, + sizeof *info_out); + + if (overlapped == NULL) { + /* If this is a blocking operation, wait for the event to become */ + /* signaled, and then grab the real status from the io status block. */ + if (status == STATUS_PENDING) { + DWORD r = WaitForSingleObject(event, INFINITE); + + if (r == WAIT_FAILED) { + DWORD saved_error = GetLastError(); + CloseHandle(event); + WSASetLastError(saved_error); + return SOCKET_ERROR; + } + + status = iosb.Status; + } + + CloseHandle(event); + } + + switch (status) { + case STATUS_SUCCESS: + error = ERROR_SUCCESS; + break; + + case STATUS_PENDING: + error = WSA_IO_PENDING; + break; + + default: + error = uv_ntstatus_to_winsock_error(status); + break; + } + + WSASetLastError(error); + + if (error == ERROR_SUCCESS) { + return 0; + } else { + return SOCKET_ERROR; + } +} diff --git a/deps/libuv/src/win/winsock.h b/deps/libuv/src/win/winsock.h new file mode 100644 index 00000000..7c007ab4 --- /dev/null +++ b/deps/libuv/src/win/winsock.h @@ -0,0 +1,190 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef UV_WIN_WINSOCK_H_ +#define UV_WIN_WINSOCK_H_ + +#include +#include +#include +#include +#include + +#include "winapi.h" + + +/* + * MinGW is missing these too + */ +#ifndef SO_UPDATE_CONNECT_CONTEXT +# define SO_UPDATE_CONNECT_CONTEXT 0x7010 +#endif + +#ifndef TCP_KEEPALIVE +# define TCP_KEEPALIVE 3 +#endif + +#ifndef IPV6_V6ONLY +# define IPV6_V6ONLY 27 +#endif + +#ifndef IPV6_HOPLIMIT +# define IPV6_HOPLIMIT 21 +#endif + +#ifndef SIO_BASE_HANDLE +# define SIO_BASE_HANDLE 0x48000022 +#endif + +/* + * TDI defines that are only in the DDK. + * We only need receive flags so far. + */ +#ifndef TDI_RECEIVE_NORMAL + #define TDI_RECEIVE_BROADCAST 0x00000004 + #define TDI_RECEIVE_MULTICAST 0x00000008 + #define TDI_RECEIVE_PARTIAL 0x00000010 + #define TDI_RECEIVE_NORMAL 0x00000020 + #define TDI_RECEIVE_EXPEDITED 0x00000040 + #define TDI_RECEIVE_PEEK 0x00000080 + #define TDI_RECEIVE_NO_RESPONSE_EXP 0x00000100 + #define TDI_RECEIVE_COPY_LOOKAHEAD 0x00000200 + #define TDI_RECEIVE_ENTIRE_MESSAGE 0x00000400 + #define TDI_RECEIVE_AT_DISPATCH_LEVEL 0x00000800 + #define TDI_RECEIVE_CONTROL_INFO 0x00001000 + #define TDI_RECEIVE_FORCE_INDICATION 0x00002000 + #define TDI_RECEIVE_NO_PUSH 0x00004000 +#endif + +/* + * The "Auxiliary Function Driver" is the windows kernel-mode driver that does + * TCP, UDP etc. Winsock is just a layer that dispatches requests to it. + * Having these definitions allows us to bypass winsock and make an AFD kernel + * call directly, avoiding a bug in winsock's recvfrom implementation. + */ + +#define AFD_NO_FAST_IO 0x00000001 +#define AFD_OVERLAPPED 0x00000002 +#define AFD_IMMEDIATE 0x00000004 + +#define AFD_POLL_RECEIVE_BIT 0 +#define AFD_POLL_RECEIVE (1 << AFD_POLL_RECEIVE_BIT) +#define AFD_POLL_RECEIVE_EXPEDITED_BIT 1 +#define AFD_POLL_RECEIVE_EXPEDITED (1 << AFD_POLL_RECEIVE_EXPEDITED_BIT) +#define AFD_POLL_SEND_BIT 2 +#define AFD_POLL_SEND (1 << AFD_POLL_SEND_BIT) +#define AFD_POLL_DISCONNECT_BIT 3 +#define AFD_POLL_DISCONNECT (1 << AFD_POLL_DISCONNECT_BIT) +#define AFD_POLL_ABORT_BIT 4 +#define AFD_POLL_ABORT (1 << AFD_POLL_ABORT_BIT) +#define AFD_POLL_LOCAL_CLOSE_BIT 5 +#define AFD_POLL_LOCAL_CLOSE (1 << AFD_POLL_LOCAL_CLOSE_BIT) +#define AFD_POLL_CONNECT_BIT 6 +#define AFD_POLL_CONNECT (1 << AFD_POLL_CONNECT_BIT) +#define AFD_POLL_ACCEPT_BIT 7 +#define AFD_POLL_ACCEPT (1 << AFD_POLL_ACCEPT_BIT) +#define AFD_POLL_CONNECT_FAIL_BIT 8 +#define AFD_POLL_CONNECT_FAIL (1 << AFD_POLL_CONNECT_FAIL_BIT) +#define AFD_POLL_QOS_BIT 9 +#define AFD_POLL_QOS (1 << AFD_POLL_QOS_BIT) +#define AFD_POLL_GROUP_QOS_BIT 10 +#define AFD_POLL_GROUP_QOS (1 << AFD_POLL_GROUP_QOS_BIT) + +#define AFD_NUM_POLL_EVENTS 11 +#define AFD_POLL_ALL ((1 << AFD_NUM_POLL_EVENTS) - 1) + +typedef struct _AFD_RECV_DATAGRAM_INFO { + LPWSABUF BufferArray; + ULONG BufferCount; + ULONG AfdFlags; + ULONG TdiFlags; + struct sockaddr* Address; + int* AddressLength; +} AFD_RECV_DATAGRAM_INFO, *PAFD_RECV_DATAGRAM_INFO; + +typedef struct _AFD_RECV_INFO { + LPWSABUF BufferArray; + ULONG BufferCount; + ULONG AfdFlags; + ULONG TdiFlags; +} AFD_RECV_INFO, *PAFD_RECV_INFO; + + +#define _AFD_CONTROL_CODE(operation, method) \ + ((FSCTL_AFD_BASE) << 12 | (operation << 2) | method) + +#define FSCTL_AFD_BASE FILE_DEVICE_NETWORK + +#define AFD_RECEIVE 5 +#define AFD_RECEIVE_DATAGRAM 6 +#define AFD_POLL 9 + +#define IOCTL_AFD_RECEIVE \ + _AFD_CONTROL_CODE(AFD_RECEIVE, METHOD_NEITHER) + +#define IOCTL_AFD_RECEIVE_DATAGRAM \ + _AFD_CONTROL_CODE(AFD_RECEIVE_DATAGRAM, METHOD_NEITHER) + +#define IOCTL_AFD_POLL \ + _AFD_CONTROL_CODE(AFD_POLL, METHOD_BUFFERED) + +#if defined(__MINGW32__) && !defined(__MINGW64_VERSION_MAJOR) +typedef struct _IP_ADAPTER_UNICAST_ADDRESS_XP { + /* FIXME: __C89_NAMELESS was removed */ + /* __C89_NAMELESS */ union { + ULONGLONG Alignment; + /* __C89_NAMELESS */ struct { + ULONG Length; + DWORD Flags; + }; + }; + struct _IP_ADAPTER_UNICAST_ADDRESS_XP *Next; + SOCKET_ADDRESS Address; + IP_PREFIX_ORIGIN PrefixOrigin; + IP_SUFFIX_ORIGIN SuffixOrigin; + IP_DAD_STATE DadState; + ULONG ValidLifetime; + ULONG PreferredLifetime; + ULONG LeaseLifetime; +} IP_ADAPTER_UNICAST_ADDRESS_XP,*PIP_ADAPTER_UNICAST_ADDRESS_XP; + +typedef struct _IP_ADAPTER_UNICAST_ADDRESS_LH { + union { + ULONGLONG Alignment; + struct { + ULONG Length; + DWORD Flags; + }; + }; + struct _IP_ADAPTER_UNICAST_ADDRESS_LH *Next; + SOCKET_ADDRESS Address; + IP_PREFIX_ORIGIN PrefixOrigin; + IP_SUFFIX_ORIGIN SuffixOrigin; + IP_DAD_STATE DadState; + ULONG ValidLifetime; + ULONG PreferredLifetime; + ULONG LeaseLifetime; + UINT8 OnLinkPrefixLength; +} IP_ADAPTER_UNICAST_ADDRESS_LH,*PIP_ADAPTER_UNICAST_ADDRESS_LH; + +#endif + +#endif /* UV_WIN_WINSOCK_H_ */ diff --git a/deps/libuv/uv.gyp b/deps/libuv/uv.gyp new file mode 100644 index 00000000..b969a8cd --- /dev/null +++ b/deps/libuv/uv.gyp @@ -0,0 +1,573 @@ +{ + 'target_defaults': { + 'conditions': [ + ['OS != "win"', { + 'defines': [ + '_LARGEFILE_SOURCE', + '_FILE_OFFSET_BITS=64', + ], + 'conditions': [ + ['OS=="solaris"', { + 'cflags': [ '-pthreads' ], + }], + ['OS not in "solaris android zos"', { + 'cflags': [ '-pthread' ], + }], + ['OS in "zos"', { + 'defines': [ + '_UNIX03_THREADS', + '_UNIX03_SOURCE', + '_OPEN_SYS_IF_EXT', + '_OPEN_SYS_SOCK_IPV6', + '_OPEN_MSGQ_EXT', + '_XOPEN_SOURCE_EXTENDED', + '_ALL_SOURCE', + '_LARGE_TIME_API', + '_OPEN_SYS_FILE_EXT', + '_AE_BIMODAL', + 'PATH_MAX=255' + ], + 'cflags': [ '-qxplink' ], + }] + ], + }], + ], + 'xcode_settings': { + 'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden + 'WARNING_CFLAGS': [ '-Wall', '-Wextra', '-Wno-unused-parameter' ], + 'OTHER_CFLAGS': [ '-g', '--std=gnu89', '-pedantic' ], + } + }, + + 'targets': [ + { + 'target_name': 'libuv', + 'type': '<(uv_library)', + 'include_dirs': [ + 'include', + 'src/', + ], + 'direct_dependent_settings': { + 'include_dirs': [ 'include' ], + 'conditions': [ + ['OS != "win"', { + 'defines': [ + '_LARGEFILE_SOURCE', + '_FILE_OFFSET_BITS=64', + ], + }], + ['OS in "mac ios"', { + 'defines': [ '_DARWIN_USE_64_BIT_INODE=1' ], + }], + ['OS == "linux"', { + 'defines': [ '_POSIX_C_SOURCE=200112' ], + }], + ], + }, + 'sources': [ + 'common.gypi', + 'include/uv.h', + 'include/tree.h', + 'include/uv-errno.h', + 'include/uv-threadpool.h', + 'include/uv-version.h', + 'src/fs-poll.c', + 'src/heap-inl.h', + 'src/inet.c', + 'src/queue.h', + 'src/threadpool.c', + 'src/uv-common.c', + 'src/uv-common.h', + 'src/version.c' + ], + 'conditions': [ + [ 'OS=="win"', { + 'defines': [ + '_WIN32_WINNT=0x0600', + '_GNU_SOURCE', + ], + 'sources': [ + 'include/uv-win.h', + 'src/win/async.c', + 'src/win/atomicops-inl.h', + 'src/win/core.c', + 'src/win/detect-wakeup.c', + 'src/win/dl.c', + 'src/win/error.c', + 'src/win/fs.c', + 'src/win/fs-event.c', + 'src/win/getaddrinfo.c', + 'src/win/getnameinfo.c', + 'src/win/handle.c', + 'src/win/handle-inl.h', + 'src/win/internal.h', + 'src/win/loop-watcher.c', + 'src/win/pipe.c', + 'src/win/thread.c', + 'src/win/poll.c', + 'src/win/process.c', + 'src/win/process-stdio.c', + 'src/win/req.c', + 'src/win/req-inl.h', + 'src/win/signal.c', + 'src/win/snprintf.c', + 'src/win/stream.c', + 'src/win/stream-inl.h', + 'src/win/tcp.c', + 'src/win/tty.c', + 'src/win/timer.c', + 'src/win/udp.c', + 'src/win/util.c', + 'src/win/winapi.c', + 'src/win/winapi.h', + 'src/win/winsock.c', + 'src/win/winsock.h', + ], + 'link_settings': { + 'libraries': [ + '-ladvapi32', + '-liphlpapi', + '-lpsapi', + '-lshell32', + '-luser32', + '-luserenv', + '-lws2_32' + ], + }, + }, { # Not Windows i.e. POSIX + 'sources': [ + 'include/uv-unix.h', + 'include/uv-linux.h', + 'include/uv-sunos.h', + 'include/uv-darwin.h', + 'include/uv-bsd.h', + 'include/uv-aix.h', + 'src/unix/async.c', + 'src/unix/atomic-ops.h', + 'src/unix/core.c', + 'src/unix/dl.c', + 'src/unix/fs.c', + 'src/unix/getaddrinfo.c', + 'src/unix/getnameinfo.c', + 'src/unix/internal.h', + 'src/unix/loop.c', + 'src/unix/loop-watcher.c', + 'src/unix/pipe.c', + 'src/unix/poll.c', + 'src/unix/process.c', + 'src/unix/signal.c', + 'src/unix/spinlock.h', + 'src/unix/stream.c', + 'src/unix/tcp.c', + 'src/unix/thread.c', + 'src/unix/timer.c', + 'src/unix/tty.c', + 'src/unix/udp.c', + ], + 'link_settings': { + 'libraries': [ '-lm' ], + 'conditions': [ + ['OS=="solaris"', { + 'ldflags': [ '-pthreads' ], + }], + [ 'OS=="zos" and uv_library=="shared_library"', { + 'ldflags': [ '-Wl,DLL' ], + }], + ['OS != "solaris" and OS != "android" and OS != "zos"', { + 'ldflags': [ '-pthread' ], + }], + ], + }, + 'conditions': [ + ['uv_library=="shared_library"', { + 'conditions': [ + ['OS=="zos"', { + 'cflags': [ '-qexportall' ], + }, { + 'cflags': [ '-fPIC' ], + }], + ], + }], + ['uv_library=="shared_library" and OS!="mac" and OS!="zos"', { + # This will cause gyp to set soname + # Must correspond with UV_VERSION_MAJOR + # in include/uv-version.h + 'product_extension': 'so.1', + }], + ], + }], + [ 'OS in "linux mac ios android"', { + 'sources': [ 'src/unix/proctitle.c' ], + }], + [ 'OS != "zos"', { + 'cflags': [ + '-fvisibility=hidden', + '-g', + '--std=gnu89', + '-pedantic', + '-Wall', + '-Wextra', + '-Wno-unused-parameter', + ], + }], + [ 'OS in "mac ios"', { + 'sources': [ + 'src/unix/darwin.c', + 'src/unix/fsevents.c', + 'src/unix/darwin-proctitle.c', + 'src/unix/pthread-barrier.c' + ], + 'defines': [ + '_DARWIN_USE_64_BIT_INODE=1', + '_DARWIN_UNLIMITED_SELECT=1', + ] + }], + [ 'OS!="mac" and OS!="zos"', { + # Enable on all platforms except OS X. The antique gcc/clang that + # ships with Xcode emits waaaay too many false positives. + 'cflags': [ '-Wstrict-aliasing' ], + }], + [ 'OS=="linux"', { + 'defines': [ '_GNU_SOURCE' ], + 'sources': [ + 'src/unix/linux-core.c', + 'src/unix/linux-inotify.c', + 'src/unix/linux-syscalls.c', + 'src/unix/linux-syscalls.h', + ], + 'link_settings': { + 'libraries': [ '-ldl', '-lrt' ], + }, + }], + [ 'OS=="android"', { + 'sources': [ + 'src/unix/linux-core.c', + 'src/unix/linux-inotify.c', + 'src/unix/linux-syscalls.c', + 'src/unix/linux-syscalls.h', + 'src/unix/pthread-fixes.c', + 'src/unix/android-ifaddrs.c', + 'src/unix/pthread-barrier.c' + ], + 'link_settings': { + 'libraries': [ '-ldl' ], + }, + }], + [ 'OS=="solaris"', { + 'sources': [ 'src/unix/sunos.c' ], + 'defines': [ + '__EXTENSIONS__', + '_XOPEN_SOURCE=500', + ], + 'link_settings': { + 'libraries': [ + '-lkstat', + '-lnsl', + '-lsendfile', + '-lsocket', + ], + }, + }], + [ 'OS=="aix"', { + 'sources': [ 'src/unix/aix.c' ], + 'defines': [ + '_ALL_SOURCE', + '_XOPEN_SOURCE=500', + '_LINUX_SOURCE_COMPAT', + '_THREAD_SAFE', + ], + 'link_settings': { + 'libraries': [ + '-lperfstat', + ], + }, + }], + [ 'OS=="freebsd" or OS=="dragonflybsd"', { + 'sources': [ 'src/unix/freebsd.c' ], + }], + [ 'OS=="openbsd"', { + 'sources': [ 'src/unix/openbsd.c' ], + }], + [ 'OS=="netbsd"', { + 'sources': [ 'src/unix/netbsd.c' ], + }], + [ 'OS in "freebsd dragonflybsd openbsd netbsd".split()', { + 'link_settings': { + 'libraries': [ '-lkvm' ], + }, + }], + [ 'OS in "ios mac freebsd dragonflybsd openbsd netbsd".split()', { + 'sources': [ 'src/unix/kqueue.c' ], + }], + ['uv_library=="shared_library"', { + 'defines': [ 'BUILDING_UV_SHARED=1' ] + }], + ['OS=="zos"', { + 'sources': [ + 'src/unix/pthread-fixes.c', + 'src/unix/pthread-barrier.c' + 'src/unix/os390.c' + ] + }], + ] + }, + + { + 'target_name': 'run-tests', + 'type': 'executable', + 'dependencies': [ 'libuv' ], + 'sources': [ + 'test/blackhole-server.c', + 'test/echo-server.c', + 'test/run-tests.c', + 'test/runner.c', + 'test/runner.h', + 'test/test-get-loadavg.c', + 'test/task.h', + 'test/test-active.c', + 'test/test-async.c', + 'test/test-async-null-cb.c', + 'test/test-callback-stack.c', + 'test/test-callback-order.c', + 'test/test-close-fd.c', + 'test/test-close-order.c', + 'test/test-connection-fail.c', + 'test/test-cwd-and-chdir.c', + 'test/test-default-loop-close.c', + 'test/test-delayed-accept.c', + 'test/test-eintr-handling.c', + 'test/test-error.c', + 'test/test-embed.c', + 'test/test-emfile.c', + 'test/test-fail-always.c', + 'test/test-fs.c', + 'test/test-fs-event.c', + 'test/test-get-currentexe.c', + 'test/test-get-memory.c', + 'test/test-get-passwd.c', + 'test/test-getaddrinfo.c', + 'test/test-getnameinfo.c', + 'test/test-getsockname.c', + 'test/test-handle-fileno.c', + 'test/test-homedir.c', + 'test/test-hrtime.c', + 'test/test-idle.c', + 'test/test-ip6-addr.c', + 'test/test-ipc.c', + 'test/test-ipc-send-recv.c', + 'test/test-list.h', + 'test/test-loop-handles.c', + 'test/test-loop-alive.c', + 'test/test-loop-close.c', + 'test/test-loop-stop.c', + 'test/test-loop-time.c', + 'test/test-loop-configure.c', + 'test/test-walk-handles.c', + 'test/test-watcher-cross-stop.c', + 'test/test-multiple-listen.c', + 'test/test-osx-select.c', + 'test/test-pass-always.c', + 'test/test-ping-pong.c', + 'test/test-pipe-bind-error.c', + 'test/test-pipe-connect-error.c', + 'test/test-pipe-connect-multiple.c', + 'test/test-pipe-connect-prepare.c', + 'test/test-pipe-getsockname.c', + 'test/test-pipe-pending-instances.c', + 'test/test-pipe-sendmsg.c', + 'test/test-pipe-server-close.c', + 'test/test-pipe-close-stdout-read-stdin.c', + 'test/test-pipe-set-non-blocking.c', + 'test/test-platform-output.c', + 'test/test-poll.c', + 'test/test-poll-close.c', + 'test/test-poll-close-doesnt-corrupt-stack.c', + 'test/test-poll-closesocket.c', + 'test/test-process-title.c', + 'test/test-queue-foreach-delete.c', + 'test/test-ref.c', + 'test/test-run-nowait.c', + 'test/test-run-once.c', + 'test/test-semaphore.c', + 'test/test-shutdown-close.c', + 'test/test-shutdown-eof.c', + 'test/test-shutdown-twice.c', + 'test/test-signal.c', + 'test/test-signal-multiple-loops.c', + 'test/test-socket-buffer-size.c', + 'test/test-spawn.c', + 'test/test-fs-poll.c', + 'test/test-stdio-over-pipes.c', + 'test/test-tcp-alloc-cb-fail.c', + 'test/test-tcp-bind-error.c', + 'test/test-tcp-bind6-error.c', + 'test/test-tcp-close.c', + 'test/test-tcp-close-accept.c', + 'test/test-tcp-close-while-connecting.c', + 'test/test-tcp-create-socket-early.c', + 'test/test-tcp-connect-error-after-write.c', + 'test/test-tcp-shutdown-after-write.c', + 'test/test-tcp-flags.c', + 'test/test-tcp-connect-error.c', + 'test/test-tcp-connect-timeout.c', + 'test/test-tcp-connect6-error.c', + 'test/test-tcp-open.c', + 'test/test-tcp-write-to-half-open-connection.c', + 'test/test-tcp-write-after-connect.c', + 'test/test-tcp-writealot.c', + 'test/test-tcp-write-fail.c', + 'test/test-tcp-try-write.c', + 'test/test-tcp-unexpected-read.c', + 'test/test-tcp-oob.c', + 'test/test-tcp-read-stop.c', + 'test/test-tcp-write-queue-order.c', + 'test/test-threadpool.c', + 'test/test-threadpool-cancel.c', + 'test/test-thread-equal.c', + 'test/test-tmpdir.c', + 'test/test-mutexes.c', + 'test/test-thread.c', + 'test/test-barrier.c', + 'test/test-condvar.c', + 'test/test-timer-again.c', + 'test/test-timer-from-check.c', + 'test/test-timer.c', + 'test/test-tty.c', + 'test/test-udp-alloc-cb-fail.c', + 'test/test-udp-bind.c', + 'test/test-udp-create-socket-early.c', + 'test/test-udp-dgram-too-big.c', + 'test/test-udp-ipv6.c', + 'test/test-udp-open.c', + 'test/test-udp-options.c', + 'test/test-udp-send-and-recv.c', + 'test/test-udp-send-immediate.c', + 'test/test-udp-send-unreachable.c', + 'test/test-udp-multicast-join.c', + 'test/test-udp-multicast-join6.c', + 'test/test-dlerror.c', + 'test/test-udp-multicast-ttl.c', + 'test/test-ip4-addr.c', + 'test/test-ip6-addr.c', + 'test/test-udp-multicast-interface.c', + 'test/test-udp-multicast-interface6.c', + 'test/test-udp-try-send.c', + ], + 'conditions': [ + [ 'OS=="win"', { + 'sources': [ + 'test/runner-win.c', + 'test/runner-win.h', + 'src/win/snprintf.c', + ], + 'libraries': [ '-lws2_32' ] + }, { # POSIX + 'sources': [ + 'test/runner-unix.c', + 'test/runner-unix.h', + ], + 'conditions': [ + [ 'OS != "zos"', { + 'defines': [ '_GNU_SOURCE' ], + 'cflags': [ '-Wno-long-long' ], + 'xcode_settings': { + 'WARNING_CFLAGS': [ '-Wno-long-long' ] + } + }], + ]}, + ], + [ 'OS in "mac dragonflybsd freebsd linux netbsd openbsd".split()', { + 'link_settings': { + 'libraries': [ '-lutil' ], + }, + }], + [ 'OS=="solaris"', { # make test-fs.c compile, needs _POSIX_C_SOURCE + 'defines': [ + '__EXTENSIONS__', + '_XOPEN_SOURCE=500', + ], + }], + [ 'OS=="aix"', { # make test-fs.c compile, needs _POSIX_C_SOURCE + 'defines': [ + '_ALL_SOURCE', + '_XOPEN_SOURCE=500', + ], + }], + ['uv_library=="shared_library"', { + 'defines': [ 'USING_UV_SHARED=1' ], + 'conditions': [ + [ 'OS == "zos"', { + 'cflags': [ '-Wc,DLL' ], + }], + ], + }], + ], + 'msvs-settings': { + 'VCLinkerTool': { + 'SubSystem': 1, # /subsystem:console + }, + }, + }, + + { + 'target_name': 'run-benchmarks', + 'type': 'executable', + 'dependencies': [ 'libuv' ], + 'sources': [ + 'test/benchmark-async.c', + 'test/benchmark-async-pummel.c', + 'test/benchmark-fs-stat.c', + 'test/benchmark-getaddrinfo.c', + 'test/benchmark-list.h', + 'test/benchmark-loop-count.c', + 'test/benchmark-million-async.c', + 'test/benchmark-million-timers.c', + 'test/benchmark-multi-accept.c', + 'test/benchmark-ping-pongs.c', + 'test/benchmark-pound.c', + 'test/benchmark-pump.c', + 'test/benchmark-sizes.c', + 'test/benchmark-spawn.c', + 'test/benchmark-thread.c', + 'test/benchmark-tcp-write-batch.c', + 'test/benchmark-udp-pummel.c', + 'test/dns-server.c', + 'test/echo-server.c', + 'test/blackhole-server.c', + 'test/run-benchmarks.c', + 'test/runner.c', + 'test/runner.h', + 'test/task.h', + ], + 'conditions': [ + [ 'OS=="win"', { + 'sources': [ + 'test/runner-win.c', + 'test/runner-win.h', + 'src/win/snprintf.c', + ], + 'libraries': [ '-lws2_32' ] + }, { # POSIX + 'defines': [ '_GNU_SOURCE' ], + 'sources': [ + 'test/runner-unix.c', + 'test/runner-unix.h', + ] + }], + ['uv_library=="shared_library"', { + 'defines': [ 'USING_UV_SHARED=1' ], + 'conditions': [ + [ 'OS == "zos"', { + 'cflags': [ '-Wc,DLL' ], + }], + ], + }], + ], + 'msvs-settings': { + 'VCLinkerTool': { + 'SubSystem': 1, # /subsystem:console + }, + }, + }, + ] +} diff --git a/deps/libuv/vcbuild.bat b/deps/libuv/vcbuild.bat new file mode 100644 index 00000000..91f45b72 --- /dev/null +++ b/deps/libuv/vcbuild.bat @@ -0,0 +1,158 @@ +@echo off + +cd %~dp0 + +if /i "%1"=="help" goto help +if /i "%1"=="--help" goto help +if /i "%1"=="-help" goto help +if /i "%1"=="/help" goto help +if /i "%1"=="?" goto help +if /i "%1"=="-?" goto help +if /i "%1"=="--?" goto help +if /i "%1"=="/?" goto help + +@rem Process arguments. +set config= +set target=Build +set noprojgen= +set nobuild= +set run= +set target_arch=ia32 +set vs_toolset=x86 +set msbuild_platform=WIN32 +set library=static_library + +:next-arg +if "%1"=="" goto args-done +if /i "%1"=="debug" set config=Debug&goto arg-ok +if /i "%1"=="release" set config=Release&goto arg-ok +if /i "%1"=="test" set run=run-tests.exe&goto arg-ok +if /i "%1"=="bench" set run=run-benchmarks.exe&goto arg-ok +if /i "%1"=="clean" set target=Clean&goto arg-ok +if /i "%1"=="noprojgen" set noprojgen=1&goto arg-ok +if /i "%1"=="nobuild" set nobuild=1&goto arg-ok +if /i "%1"=="x86" set target_arch=ia32&set msbuild_platform=WIN32&set vs_toolset=x86&goto arg-ok +if /i "%1"=="ia32" set target_arch=ia32&set msbuild_platform=WIN32&set vs_toolset=x86&goto arg-ok +if /i "%1"=="x64" set target_arch=x64&set msbuild_platform=x64&set vs_toolset=x64&goto arg-ok +if /i "%1"=="shared" set library=shared_library&goto arg-ok +if /i "%1"=="static" set library=static_library&goto arg-ok +:arg-ok +shift +goto next-arg +:args-done + +if defined WindowsSDKDir goto select-target +if defined VCINSTALLDIR goto select-target + +@rem Look for Visual Studio 2015 +if not defined VS140COMNTOOLS goto vc-set-2013 +if not exist "%VS140COMNTOOLS%\..\..\vc\vcvarsall.bat" goto vc-set-2013 +call "%VS140COMNTOOLS%\..\..\vc\vcvarsall.bat" %vs_toolset% +set GYP_MSVS_VERSION=2015 +echo Using Visual Studio 2015 +goto select-target + +:vc-set-2013 +@rem Look for Visual Studio 2013 +if not defined VS120COMNTOOLS goto vc-set-2012 +if not exist "%VS120COMNTOOLS%\..\..\vc\vcvarsall.bat" goto vc-set-2012 +call "%VS120COMNTOOLS%\..\..\vc\vcvarsall.bat" %vs_toolset% +set GYP_MSVS_VERSION=2013 +echo Using Visual Studio 2013 +goto select-target + +:vc-set-2012 +@rem Look for Visual Studio 2012 +if not defined VS110COMNTOOLS goto vc-set-2010 +if not exist "%VS110COMNTOOLS%\..\..\vc\vcvarsall.bat" goto vc-set-2010 +call "%VS110COMNTOOLS%\..\..\vc\vcvarsall.bat" %vs_toolset% +set GYP_MSVS_VERSION=2012 +echo Using Visual Studio 2012 +goto select-target + +:vc-set-2010 +@rem Look for Visual Studio 2010 +if not defined VS100COMNTOOLS goto vc-set-2008 +if not exist "%VS100COMNTOOLS%\..\..\vc\vcvarsall.bat" goto vc-set-2008 +call "%VS100COMNTOOLS%\..\..\vc\vcvarsall.bat" %vs_toolset% +set GYP_MSVS_VERSION=2010 +echo Using Visual Studio 2010 +goto select-target + +:vc-set-2008 +@rem Look for Visual Studio 2008 +if not defined VS90COMNTOOLS goto vc-set-notfound +if not exist "%VS90COMNTOOLS%\..\..\vc\vcvarsall.bat" goto vc-set-notfound +call "%VS90COMNTOOLS%\..\..\vc\vcvarsall.bat" %vs_toolset% +set GYP_MSVS_VERSION=2008 +echo Using Visual Studio 2008 +goto select-target + +:vc-set-notfound +echo Warning: Visual Studio not found + +:select-target +if not "%config%"=="" goto project-gen +if "%run%"=="run-tests.exe" set config=Debug& goto project-gen +if "%run%"=="run-benchmarks.exe" set config=Release& goto project-gen +set config=Debug + +:project-gen +@rem Skip project generation if requested. +if defined noprojgen goto msbuild + +@rem Generate the VS project. +if exist build\gyp goto have_gyp +echo git clone https://chromium.googlesource.com/external/gyp build/gyp +git clone https://chromium.googlesource.com/external/gyp build/gyp +if errorlevel 1 goto gyp_install_failed +goto have_gyp + +:gyp_install_failed +echo Failed to download gyp. Make sure you have git installed, or +echo manually install gyp into %~dp0build\gyp. +exit /b 1 + +:have_gyp +if not defined PYTHON set PYTHON=python +"%PYTHON%" gyp_uv.py -Dtarget_arch=%target_arch% -Duv_library=%library% +if errorlevel 1 goto create-msvs-files-failed +if not exist uv.sln goto create-msvs-files-failed +echo Project files generated. + +:msbuild +@rem Skip project generation if requested. +if defined nobuild goto run + +@rem Check if VS build env is available +if defined VCINSTALLDIR goto msbuild-found +if defined WindowsSDKDir goto msbuild-found +echo Build skipped. To build, this file needs to run from VS cmd prompt. +goto run + +@rem Build the sln with msbuild. +:msbuild-found +msbuild uv.sln /t:%target% /p:Configuration=%config% /p:Platform="%msbuild_platform%" /clp:NoSummary;NoItemAndPropertyList;Verbosity=minimal /nologo +if errorlevel 1 exit /b 1 + +:run +@rem Run tests if requested. +if "%run%"=="" goto exit +if not exist %config%\%run% goto exit +echo running '%config%\%run%' +%config%\%run% +goto exit + +:create-msvs-files-failed +echo Failed to create vc project files. +exit /b 1 + +:help +echo vcbuild.bat [debug/release] [test/bench] [clean] [noprojgen] [nobuild] [x86/x64] [static/shared] +echo Examples: +echo vcbuild.bat : builds debug build +echo vcbuild.bat test : builds debug build and runs tests +echo vcbuild.bat release bench: builds release build and runs benchmarks +goto exit + +:exit diff --git a/doc/site/getting-started.markdown b/doc/site/getting-started.markdown index 4cb6bca9..317901df 100644 --- a/doc/site/getting-started.markdown +++ b/doc/site/getting-started.markdown @@ -26,10 +26,9 @@ If you're on a Unix or Mac and you can rock a command line, it's just: $ make $ ./wren -This builds both the VM and the CLI. It downloads libuv automatically for you. -The release build of the CLI goes right into the repo's top level directory. -Binaries for other configurations are built to `bin/`. Static and shared -libraries for embedding Wren get built in `lib/`. +This builds both the VM and the CLI. The release build of the CLI goes right +into the repo's top level directory. Binaries for other configurations are built +to `bin/`. Static and shared libraries for embedding Wren get built in `lib/`. For Mac users, there is also an XCode project under `util/xcode`. For Windows brethren, `util/msvc2013` contains a Visual Studio solution. Note @@ -41,8 +40,7 @@ If you only want to build the VM, you can do: :::sh $ make vm -This compiles the VM to static and shared libraries. It does not even download -libuv since it isn't needed. +This compiles the VM to static and shared libraries. ## Interactive mode diff --git a/util/build_libuv.py b/util/build_libuv.py new file mode 100755 index 00000000..a8e9e427 --- /dev/null +++ b/util/build_libuv.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python + +# Runs GYP to generate the right project then uses that to build libuv. + +from __future__ import print_function + +import os +import platform +import shutil +import sys + +from util import ensure_dir, python2_binary, run + +LIB_UV_VERSION = "v1.10.0" +LIB_UV_DIR = "deps/libuv" + + +def build_libuv_mac(): + # Create the XCode project. + run([ + python2_binary(), LIB_UV_DIR + "/gyp_uv.py", "-f", "xcode" + ]) + + # Compile it. + # TODO: Support debug builds too. + run([ + "xcodebuild", + # Build a 32-bit + 64-bit universal binary: + "ARCHS=i386 x86_64", "ONLY_ACTIVE_ARCH=NO", + "BUILD_DIR=out", + "-project", LIB_UV_DIR + "/uv.xcodeproj", + "-configuration", "Release", + "-target", "libuv" + ]) + + +def build_libuv_linux(arch): + # Set up the Makefile to build for the right architecture. + args = [python2_binary(), "gyp_uv.py", "-f", "make"] + if arch == "-32": + args.append("-Dtarget_arch=ia32") + elif arch == "-64": + args.append("-Dtarget_arch=x64") + + run(args, cwd=LIB_UV_DIR) + run(["make", "-C", "out", "BUILDTYPE=Release"], cwd=LIB_UV_DIR) + + +def build_libuv_windows(arch): + args = ["cmd", "/c", "vcbuild.bat", "release"] + if arch == "-32": + args.append("x86") + elif arch == "-64": + args.append("x64") + run(args, cwd=LIB_UV_DIR) + + +def build_libuv(arch, out): + if platform.system() == "Darwin": + build_libuv_mac() + elif platform.system() == "Linux": + build_libuv_linux(arch) + elif platform.system() == "Windows": + build_libuv_windows(arch) + else: + print("Unsupported platform: " + platform.system()) + sys.exit(1) + + # Copy the build library to the build directory for Mac and Linux where we + # support building for multiple architectures. + if platform.system() != "Windows": + ensure_dir(os.path.dirname(out)) + shutil.copyfile( + os.path.join(LIB_UV_DIR, "out", "Release", "libuv.a"), out) + + +def main(args): + expect_usage(len(args) >= 1 and len(args) <= 2) + + arch = "" if len(args) < 2 else args[2] + out = os.path.join("build", "libuv" + arch + ".a") + + build_libuv(arch, out) + + +def expect_usage(condition): + if (condition): return + + print("Usage: build_libuv.py [-32|-64]") + sys.exit(1) + + +main(sys.argv) diff --git a/util/libuv.py b/util/libuv.py deleted file mode 100755 index 719652d7..00000000 --- a/util/libuv.py +++ /dev/null @@ -1,188 +0,0 @@ -#!/usr/bin/env python - -# Downloads and compiles libuv. - -from __future__ import print_function - -import os -import os.path -import platform -import shutil -import subprocess -import sys - -LIB_UV_VERSION = "v1.10.0" -LIB_UV_DIR = "deps/libuv" - -def python2_binary(): - """Tries to find a python 2 executable.""" - - # Using [0] instead of .major here to support Python 2.6. - if sys.version_info[0] == 2: - return sys.executable or "python" - else: - return "python2" - -def ensure_dir(dir): - """Creates dir if not already there.""" - - if os.path.isdir(dir): - return - - os.makedirs(dir) - - -def remove_dir(dir): - """Recursively removes dir.""" - - if platform.system() == "Windows": - # rmtree gives up on readonly files on Windows - # rd doesn't like paths with forward slashes - subprocess.check_call( - ['cmd', '/c', 'rd', '/s', '/q', dir.replace('/', '\\')]) - else: - shutil.rmtree(dir) - - -def download_libuv(): - """Clones libuv into deps/libuv and checks out the right version.""" - - # Delete it if already there so we ensure we get the correct version if the - # version number in this script changes. - if os.path.isdir(LIB_UV_DIR): - print("Cleaning output directory...") - remove_dir(LIB_UV_DIR) - - ensure_dir("deps") - - print("Cloning libuv...") - run([ - "git", "clone", "--quiet", "--depth=1", - "https://github.com/libuv/libuv.git", - LIB_UV_DIR - ]) - - print("Getting tags...") - run([ - "git", "fetch", "--quiet", "--depth=1", "--tags" - ], cwd=LIB_UV_DIR) - - print("Checking out libuv " + LIB_UV_VERSION + "...") - run([ - "git", "checkout", "--quiet", LIB_UV_VERSION - ], cwd=LIB_UV_DIR) - - # TODO: Pin gyp to a known-good commit. Update a previously downloaded gyp - # if it doesn't match that commit. - print("Downloading gyp...") - run([ - "git", "clone", "--quiet", "--depth=1", - "https://chromium.googlesource.com/external/gyp.git", - LIB_UV_DIR + "/build/gyp" - ]) - - -def build_libuv_mac(): - # Create the XCode project. - run([ - python2_binary(), LIB_UV_DIR + "/gyp_uv.py", "-f", "xcode" - ]) - - # Compile it. - # TODO: Support debug builds too. - run([ - "xcodebuild", - # Build a 32-bit + 64-bit universal binary: - "ARCHS=i386 x86_64", "ONLY_ACTIVE_ARCH=NO", - "BUILD_DIR=out", - "-project", LIB_UV_DIR + "/uv.xcodeproj", - "-configuration", "Release", - "-target", "All" - ]) - - -def build_libuv_linux(arch): - # Set up the Makefile to build for the right architecture. - args = [python2_binary(), "gyp_uv.py", "-f", "make"] - if arch == "-32": - args.append("-Dtarget_arch=ia32") - elif arch == "-64": - args.append("-Dtarget_arch=x64") - - run(args, cwd=LIB_UV_DIR) - run(["make", "-C", "out", "BUILDTYPE=Release"], cwd=LIB_UV_DIR) - - -def build_libuv_windows(arch): - args = ["cmd", "/c", "vcbuild.bat", "release"] - if arch == "-32": - args.append("x86") - elif arch == "-64": - args.append("x64") - run(args, cwd=LIB_UV_DIR) - - -def build_libuv(arch, out): - if platform.system() == "Darwin": - build_libuv_mac() - elif platform.system() == "Linux": - build_libuv_linux(arch) - elif platform.system() == "Windows": - build_libuv_windows(arch) - else: - print("Unsupported platform: " + platform.system()) - sys.exit(1) - - # Copy the build library to the build directory for Mac and Linux where we - # support building for multiple architectures. - if platform.system() != "Windows": - ensure_dir(os.path.dirname(out)) - shutil.copyfile( - os.path.join(LIB_UV_DIR, "out", "Release", "libuv.a"), out) - - -def run(args, cwd=None): - """Spawn a process to invoke [args] and mute its output.""" - - try: - # check_output() was added in Python 2.7. - has_check_output = (sys.version_info[0] > 2 or - (sys.version_info[0] == 2 and sys.version_info[1] >= 7)) - - if has_check_output: - subprocess.check_output(args, cwd=cwd, stderr=subprocess.STDOUT) - else: - proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE) - proc.communicate()[0].split() - except subprocess.CalledProcessError as error: - print(error.output) - sys.exit(error.returncode) - - -def main(): - expect_usage(len(sys.argv) >= 2) - - if sys.argv[1] == "download": - download_libuv() - elif sys.argv[1] == "build": - expect_usage(len(sys.argv) <= 3) - arch = "" - if len(sys.argv) == 3: - arch = sys.argv[2] - - out = os.path.join("build", "libuv" + arch + ".a") - - build_libuv(arch, out) - else: - expect_usage(false) - - -def expect_usage(condition): - if (condition): return - - print("Usage: libuv.py download") - print(" libuv.py build [-32|-64]") - sys.exit(1) - - -main() diff --git a/util/update_deps.py b/util/update_deps.py new file mode 100755 index 00000000..e951b95e --- /dev/null +++ b/util/update_deps.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +# Downloads GYP and libuv into deps/. +# +# Run this manually to update the vendored copies of GYP and libuv that are +# committed in the Wren repo. + +from __future__ import print_function + +import sys + +from util import clean_dir, remove_dir, replace_in_file, run + +LIB_UV_VERSION = "v1.10.0" +LIB_UV_DIR = "deps/libuv" + + +def main(args): + # Delete it if already there so we ensure we get the correct version if the + # version number in this script changes. + clean_dir("deps") + + print("Cloning libuv...") + run([ + "git", "clone", "--quiet", "--depth=1", + "https://github.com/libuv/libuv.git", + LIB_UV_DIR + ]) + + print("Getting tags...") + run([ + "git", "fetch", "--quiet", "--depth=1", "--tags" + ], cwd=LIB_UV_DIR) + + print("Checking out libuv " + LIB_UV_VERSION + "...") + run([ + "git", "checkout", "--quiet", LIB_UV_VERSION + ], cwd=LIB_UV_DIR) + + + # TODO: Pin gyp to a known-good commit. Update a previously downloaded gyp + # if it doesn't match that commit. + print("Downloading gyp...") + run([ + "git", "clone", "--quiet", "--depth=1", + "https://chromium.googlesource.com/external/gyp.git", + LIB_UV_DIR + "/build/gyp" + ]) + + # We don't need all of libuv and gyp's various support files. + print("Deleting unneeded files...") + remove_dir("deps/libuv/build/gyp/buildbot") + remove_dir("deps/libuv/build/gyp/infra") + remove_dir("deps/libuv/build/gyp/samples") + remove_dir("deps/libuv/build/gyp/test") + remove_dir("deps/libuv/build/gyp/tools") + remove_dir("deps/libuv/docs") + remove_dir("deps/libuv/img") + remove_dir("deps/libuv/samples") + remove_dir("deps/libuv/test") + + # We are going to commit libuv and GYP in the main Wren repo, so we don't + # want them to be their own repos. + remove_dir("deps/libuv/.git") + remove_dir("deps/libuv/build/gyp/.git") + + # Libuv's .gitignore ignores GYP, but we want to commit it. + replace_in_file("deps/libuv/.gitignore", + "/build/gyp", + "# /build/gyp (We do want to commit GYP in Wren's repo)") + + +main(sys.argv) diff --git a/util/util.py b/util/util.py new file mode 100644 index 00000000..74043f6e --- /dev/null +++ b/util/util.py @@ -0,0 +1,77 @@ +# Utility functions used by other Python files in this directory. + +from __future__ import print_function + +import os +import os.path +import platform +import shutil +import subprocess +import sys + +def python2_binary(): + """Tries to find a python 2 executable.""" + + # Using [0] instead of .major here to support Python 2.6. + if sys.version_info[0] == 2: + return sys.executable or "python" + else: + return "python2" + + +def clean_dir(dir): + """If dir exists, deletes it and recreates it, otherwise creates it.""" + if os.path.isdir(dir): + remove_dir(dir) + + os.makedirs(dir) + + +def ensure_dir(dir): + """Creates dir if not already there.""" + + if os.path.isdir(dir): + return + + os.makedirs(dir) + + +def remove_dir(dir): + """Recursively removes dir.""" + + if platform.system() == "Windows": + # rmtree gives up on readonly files on Windows + # rd doesn't like paths with forward slashes + subprocess.check_call( + ['cmd', '/c', 'rd', '/s', '/q', dir.replace('/', '\\')]) + else: + shutil.rmtree(dir) + + +def replace_in_file(path, text, replace): + """Replaces all occurrences of `text` in the file at `path` with `replace`.""" + with open(path) as file: + contents = file.read() + + contents = contents.replace(text, replace) + + with open(path, "w") as file: + file.write(contents) + + +def run(args, cwd=None): + """Spawn a process to invoke [args] and mute its output.""" + + try: + # check_output() was added in Python 2.7. + has_check_output = (sys.version_info[0] > 2 or + (sys.version_info[0] == 2 and sys.version_info[1] >= 7)) + + if has_check_output: + subprocess.check_output(args, cwd=cwd, stderr=subprocess.STDOUT) + else: + proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE) + proc.communicate()[0].split() + except subprocess.CalledProcessError as error: + print(error.output) + sys.exit(error.returncode) diff --git a/util/wren.mk b/util/wren.mk index efdef641..490e3f86 100644 --- a/util/wren.mk +++ b/util/wren.mk @@ -213,13 +213,10 @@ $(BUILD_DIR)/test/%.o: test/api/%.c $(OPT_HEADERS) $(MODULE_HEADERS) \ $(V) mkdir -p $(dir $@) $(V) $(CC) -c $(CFLAGS) $(CLI_FLAGS) -o $@ $(FILE_FLAG) $< -# Download libuv. -$(LIBUV_DIR)/build/gyp/gyp: util/libuv.py - $(V) ./util/libuv.py download - # Build libuv to a static library. -$(LIBUV): $(LIBUV_DIR)/build/gyp/gyp util/libuv.py - $(V) ./util/libuv.py build $(LIBUV_ARCH) +$(LIBUV): $(LIBUV_DIR)/build/gyp/gyp util/update_deps.py + @ printf "%10s %-30s %s\n" run util/build_libuv.py + $(V) ./util/build_libuv.py $(LIBUV_ARCH) # Wren modules that get compiled into the binary as C strings. src/optional/wren_opt_%.wren.inc: src/optional/wren_opt_%.wren util/wren_to_c_string.py