This commit is contained in:
CHEVALLIER Abel
2025-11-13 16:23:22 +01:00
parent de9c515a47
commit cb235644dc
34924 changed files with 3811102 additions and 0 deletions

21
node_modules/@parcel/watcher-linux-x64-glibc/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017-present Devon Govett
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1 @@
This is the linux-x64-glibc build of @parcel/watcher. See https://github.com/parcel-bundler/watcher for details.

View File

@@ -0,0 +1,33 @@
{
"name": "@parcel/watcher-linux-x64-glibc",
"version": "2.5.1",
"main": "watcher.node",
"repository": {
"type": "git",
"url": "https://github.com/parcel-bundler/watcher.git"
},
"description": "A native C++ Node module for querying and subscribing to filesystem events. Used by Parcel 2.",
"license": "MIT",
"publishConfig": {
"access": "public"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
},
"files": [
"watcher.node"
],
"engines": {
"node": ">= 10.0.0"
},
"os": [
"linux"
],
"cpu": [
"x64"
],
"libc": [
"glibc"
]
}

Binary file not shown.

21
node_modules/@parcel/watcher-linux-x64-musl/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017-present Devon Govett
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1 @@
This is the linux-x64-musl build of @parcel/watcher. See https://github.com/parcel-bundler/watcher for details.

View File

@@ -0,0 +1,33 @@
{
"name": "@parcel/watcher-linux-x64-musl",
"version": "2.5.1",
"main": "watcher.node",
"repository": {
"type": "git",
"url": "https://github.com/parcel-bundler/watcher.git"
},
"description": "A native C++ Node module for querying and subscribing to filesystem events. Used by Parcel 2.",
"license": "MIT",
"publishConfig": {
"access": "public"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
},
"files": [
"watcher.node"
],
"engines": {
"node": ">= 10.0.0"
},
"os": [
"linux"
],
"cpu": [
"x64"
],
"libc": [
"musl"
]
}

Binary file not shown.

21
node_modules/@parcel/watcher/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017-present Devon Govett
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

135
node_modules/@parcel/watcher/README.md generated vendored Normal file
View File

@@ -0,0 +1,135 @@
# @parcel/watcher
A native C++ Node module for querying and subscribing to filesystem events. Used by [Parcel 2](https://github.com/parcel-bundler/parcel).
## Features
- **Watch** - subscribe to realtime recursive directory change notifications when files or directories are created, updated, or deleted.
- **Query** - performantly query for historical change events in a directory, even when your program is not running.
- **Native** - implemented in C++ for performance and low-level integration with the operating system.
- **Cross platform** - includes backends for macOS, Linux, Windows, FreeBSD, and Watchman.
- **Performant** - events are throttled in C++ so the JavaScript thread is not overwhelmed during large filesystem changes (e.g. `git checkout` or `npm install`).
- **Scalable** - tens of thousands of files can be watched or queried at once with good performance.
## Example
```javascript
const watcher = require('@parcel/watcher');
const path = require('path');
// Subscribe to events
let subscription = await watcher.subscribe(process.cwd(), (err, events) => {
console.log(events);
});
// later on...
await subscription.unsubscribe();
// Get events since some saved snapshot in the past
let snapshotPath = path.join(process.cwd(), 'snapshot.txt');
let events = await watcher.getEventsSince(process.cwd(), snapshotPath);
// Save a snapshot for later
await watcher.writeSnapshot(process.cwd(), snapshotPath);
```
## Watching
`@parcel/watcher` supports subscribing to realtime notifications of changes in a directory. It works recursively, so changes in sub-directories will also be emitted.
Events are throttled and coalesced for performance during large changes like `git checkout` or `npm install`, and a single notification will be emitted with all of the events at the end.
Only one notification will be emitted per file. For example, if a file was both created and updated since the last event, you'll get only a `create` event. If a file is both created and deleted, you will not be notifed of that file. Renames cause two events: a `delete` for the old name, and a `create` for the new name.
```javascript
let subscription = await watcher.subscribe(process.cwd(), (err, events) => {
console.log(events);
});
```
Events have two properties:
- `type` - the event type: `create`, `update`, or `delete`.
- `path` - the absolute path to the file or directory.
To unsubscribe from change notifications, call the `unsubscribe` method on the returned subscription object.
```javascript
await subscription.unsubscribe();
```
`@parcel/watcher` has the following watcher backends, listed in priority order:
- [FSEvents](https://developer.apple.com/documentation/coreservices/file_system_events) on macOS
- [Watchman](https://facebook.github.io/watchman/) if installed
- [inotify](http://man7.org/linux/man-pages/man7/inotify.7.html) on Linux
- [ReadDirectoryChangesW](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365465%28v%3Dvs.85%29.aspx) on Windows
- [kqueue](https://man.freebsd.org/cgi/man.cgi?kqueue) on FreeBSD, or as an alternative to FSEvents on macOS
You can specify the exact backend you wish to use by passing the `backend` option. If that backend is not available on the current platform, the default backend will be used instead. See below for the list of backend names that can be passed to the options.
## Querying
`@parcel/watcher` also supports querying for historical changes made in a directory, even when your program is not running. This makes it easy to invalidate a cache and re-build only the files that have changed, for example. It can be **significantly** faster than traversing the entire filesystem to determine what files changed, depending on the platform.
In order to query for historical changes, you first need a previous snapshot to compare to. This can be saved to a file with the `writeSnapshot` function, e.g. just before your program exits.
```javascript
await watcher.writeSnapshot(dirPath, snapshotPath);
```
When your program starts up, you can query for changes that have occurred since that snapshot using the `getEventsSince` function.
```javascript
let events = await watcher.getEventsSince(dirPath, snapshotPath);
```
The events returned are exactly the same as the events that would be passed to the `subscribe` callback (see above).
`@parcel/watcher` has the following watcher backends, listed in priority order:
- [FSEvents](https://developer.apple.com/documentation/coreservices/file_system_events) on macOS
- [Watchman](https://facebook.github.io/watchman/) if installed
- [fts](http://man7.org/linux/man-pages/man3/fts.3.html) (brute force) on Linux and FreeBSD
- [FindFirstFile](https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-findfirstfilea) (brute force) on Windows
The FSEvents (macOS) and Watchman backends are significantly more performant than the brute force backends used by default on Linux and Windows, for example returning results in miliseconds instead of seconds for large directory trees. This is because a background daemon monitoring filesystem changes on those platforms allows us to query cached data rather than traversing the filesystem manually (brute force).
macOS has good performance with FSEvents by default. For the best performance on other platforms, install [Watchman](https://facebook.github.io/watchman/) and it will be used by `@parcel/watcher` automatically.
You can specify the exact backend you wish to use by passing the `backend` option. If that backend is not available on the current platform, the default backend will be used instead. See below for the list of backend names that can be passed to the options.
## Options
All of the APIs in `@parcel/watcher` support the following options, which are passed as an object as the last function argument.
- `ignore` - an array of paths or glob patterns to ignore. uses [`is-glob`](https://github.com/micromatch/is-glob) to distinguish paths from globs. glob patterns are parsed with [`micromatch`](https://github.com/micromatch/micromatch) (see [features](https://github.com/micromatch/micromatch#matching-features)).
- paths can be relative or absolute and can either be files or directories. No events will be emitted about these files or directories or their children.
- glob patterns match on relative paths from the root that is watched. No events will be emitted for matching paths.
- `backend` - the name of an explicitly chosen backend to use. Allowed options are `"fs-events"`, `"watchman"`, `"inotify"`, `"kqueue"`, `"windows"`, or `"brute-force"` (only for querying). If the specified backend is not available on the current platform, the default backend will be used instead.
## WASM
The `@parcel/watcher-wasm` package can be used in place of `@parcel/watcher` on unsupported platforms. It relies on the Node `fs` module, so in non-Node environments such as browsers, an `fs` polyfill will be needed.
**Note**: the WASM implementation is significantly less efficient than the native implementations because it must crawl the file system to watch each directory individually. Use the native `@parcel/watcher` package wherever possible.
```js
import {subscribe} from '@parcel/watcher-wasm';
// Use the module as documented above.
subscribe(/* ... */);
```
## Who is using this?
- [Parcel 2](https://parceljs.org/)
- [VSCode](https://code.visualstudio.com/updates/v1_62#_file-watching-changes)
- [Tailwind CSS Intellisense](https://github.com/tailwindlabs/tailwindcss-intellisense)
- [Gatsby Cloud](https://twitter.com/chatsidhartha/status/1435647412828196867)
- [Nx](https://nx.dev)
- [Nuxt](https://nuxt.com)
## License
MIT

93
node_modules/@parcel/watcher/binding.gyp generated vendored Normal file
View File

@@ -0,0 +1,93 @@
{
"targets": [
{
"target_name": "watcher",
"defines": [ "NAPI_DISABLE_CPP_EXCEPTIONS" ],
"sources": [ "src/binding.cc", "src/Watcher.cc", "src/Backend.cc", "src/DirTree.cc", "src/Glob.cc", "src/Debounce.cc" ],
"include_dirs" : ["<!(node -p \"require('node-addon-api').include_dir\")"],
'cflags!': [ '-fno-exceptions', '-std=c++17' ],
'cflags_cc!': [ '-fno-exceptions', '-std=c++17' ],
"conditions": [
['OS=="mac"', {
"sources": [
"src/watchman/BSER.cc",
"src/watchman/WatchmanBackend.cc",
"src/shared/BruteForceBackend.cc",
"src/unix/fts.cc",
"src/macos/FSEventsBackend.cc",
"src/kqueue/KqueueBackend.cc"
],
"link_settings": {
"libraries": ["CoreServices.framework"]
},
"defines": [
"WATCHMAN",
"BRUTE_FORCE",
"FS_EVENTS",
"KQUEUE"
],
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES"
}
}],
['OS=="mac" and target_arch=="arm64"', {
"xcode_settings": {
"ARCHS": ["arm64"]
}
}],
['OS=="linux" or OS=="android"', {
"sources": [
"src/watchman/BSER.cc",
"src/watchman/WatchmanBackend.cc",
"src/shared/BruteForceBackend.cc",
"src/linux/InotifyBackend.cc",
"src/unix/legacy.cc"
],
"defines": [
"WATCHMAN",
"INOTIFY",
"BRUTE_FORCE"
]
}],
['OS=="win"', {
"sources": [
"src/watchman/BSER.cc",
"src/watchman/WatchmanBackend.cc",
"src/shared/BruteForceBackend.cc",
"src/windows/WindowsBackend.cc",
"src/windows/win_utils.cc"
],
"defines": [
"WATCHMAN",
"WINDOWS",
"BRUTE_FORCE"
],
"msvs_settings": {
"VCCLCompilerTool": {
"ExceptionHandling": 1, # /EHsc
"AdditionalOptions": ['-std:c++17']
}
}
}],
['OS=="freebsd"', {
"sources": [
"src/watchman/BSER.cc",
"src/watchman/WatchmanBackend.cc",
"src/shared/BruteForceBackend.cc",
"src/unix/fts.cc",
"src/kqueue/KqueueBackend.cc"
],
"defines": [
"WATCHMAN",
"BRUTE_FORCE",
"KQUEUE"
]
}]
]
}
],
"variables": {
"openssl_fips": "",
"node_use_dtrace": "false"
}
}

49
node_modules/@parcel/watcher/index.d.ts generated vendored Normal file
View File

@@ -0,0 +1,49 @@
declare type FilePath = string;
declare type GlobPattern = string;
declare namespace ParcelWatcher {
export type BackendType =
| 'fs-events'
| 'watchman'
| 'inotify'
| 'windows'
| 'brute-force';
export type EventType = 'create' | 'update' | 'delete';
export interface Options {
ignore?: (FilePath|GlobPattern)[];
backend?: BackendType;
}
export type SubscribeCallback = (
err: Error | null,
events: Event[]
) => unknown;
export interface AsyncSubscription {
unsubscribe(): Promise<void>;
}
export interface Event {
path: FilePath;
type: EventType;
}
export function getEventsSince(
dir: FilePath,
snapshot: FilePath,
opts?: Options
): Promise<Event[]>;
export function subscribe(
dir: FilePath,
fn: SubscribeCallback,
opts?: Options
): Promise<AsyncSubscription>;
export function unsubscribe(
dir: FilePath,
fn: SubscribeCallback,
opts?: Options
): Promise<void>;
export function writeSnapshot(
dir: FilePath,
snapshot: FilePath,
opts?: Options
): Promise<FilePath>;
}
export = ParcelWatcher;

41
node_modules/@parcel/watcher/index.js generated vendored Normal file
View File

@@ -0,0 +1,41 @@
const {createWrapper} = require('./wrapper');
let name = `@parcel/watcher-${process.platform}-${process.arch}`;
if (process.platform === 'linux') {
const { MUSL, family } = require('detect-libc');
if (family === MUSL) {
name += '-musl';
} else {
name += '-glibc';
}
}
let binding;
try {
binding = require(name);
} catch (err) {
handleError(err);
try {
binding = require('./build/Release/watcher.node');
} catch (err) {
handleError(err);
try {
binding = require('./build/Debug/watcher.node');
} catch (err) {
handleError(err);
throw new Error(`No prebuild or local build of @parcel/watcher found. Tried ${name}. Please ensure it is installed (don't use --no-optional when installing with npm). Otherwise it is possible we don't support your platform yet. If this is the case, please report an issue to https://github.com/parcel-bundler/watcher.`);
}
}
}
function handleError(err) {
if (err?.code !== 'MODULE_NOT_FOUND') {
throw err;
}
}
const wrapper = createWrapper(binding);
exports.writeSnapshot = wrapper.writeSnapshot;
exports.getEventsSince = wrapper.getEventsSince;
exports.subscribe = wrapper.subscribe;
exports.unsubscribe = wrapper.unsubscribe;

48
node_modules/@parcel/watcher/index.js.flow generated vendored Normal file
View File

@@ -0,0 +1,48 @@
// @flow
declare type FilePath = string;
declare type GlobPattern = string;
export type BackendType =
| 'fs-events'
| 'watchman'
| 'inotify'
| 'windows'
| 'brute-force';
export type EventType = 'create' | 'update' | 'delete';
export interface Options {
ignore?: Array<FilePath | GlobPattern>,
backend?: BackendType
}
export type SubscribeCallback = (
err: ?Error,
events: Array<Event>
) => mixed;
export interface AsyncSubscription {
unsubscribe(): Promise<void>
}
export interface Event {
path: FilePath,
type: EventType
}
declare module.exports: {
getEventsSince(
dir: FilePath,
snapshot: FilePath,
opts?: Options
): Promise<Array<Event>>,
subscribe(
dir: FilePath,
fn: SubscribeCallback,
opts?: Options
): Promise<AsyncSubscription>,
unsubscribe(
dir: FilePath,
fn: SubscribeCallback,
opts?: Options
): Promise<void>,
writeSnapshot(
dir: FilePath,
snapshot: FilePath,
opts?: Options
): Promise<FilePath>
}

View File

@@ -0,0 +1 @@
../detect-libc/bin/detect-libc.js

View File

@@ -0,0 +1,7 @@
.nyc_output
.travis.yml
coverage
test.js
node_modules
/.circleci
/tests/integration

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,78 @@
# detect-libc
Node.js module to detect the C standard library (libc) implementation
family and version in use on a given Linux system.
Provides a value suitable for use with the `LIBC` option of
[prebuild](https://www.npmjs.com/package/prebuild),
[prebuild-ci](https://www.npmjs.com/package/prebuild-ci) and
[prebuild-install](https://www.npmjs.com/package/prebuild-install),
therefore allowing build and provision of pre-compiled binaries
for musl-based Linux e.g. Alpine as well as glibc-based.
Currently supports libc detection of `glibc` and `musl`.
## Install
```sh
npm install detect-libc
```
## Usage
### API
```js
const { GLIBC, MUSL, family, version, isNonGlibcLinux } = require('detect-libc');
```
* `GLIBC` is a String containing the value "glibc" for comparison with `family`.
* `MUSL` is a String containing the value "musl" for comparison with `family`.
* `family` is a String representing the system libc family.
* `version` is a String representing the system libc version number.
* `isNonGlibcLinux` is a Boolean representing whether the system is a non-glibc Linux, e.g. Alpine.
### detect-libc command line tool
When run on a Linux system with a non-glibc libc,
the child command will be run with the `LIBC` environment variable
set to the relevant value.
On all other platforms will run the child command as-is.
The command line feature requires `spawnSync` provided by Node v0.12+.
```sh
detect-libc child-command
```
## Integrating with prebuild
```json
"scripts": {
"install": "detect-libc prebuild-install || node-gyp rebuild",
"test": "mocha && detect-libc prebuild-ci"
},
"dependencies": {
"detect-libc": "^1.0.2",
"prebuild-install": "^2.2.0"
},
"devDependencies": {
"prebuild": "^6.2.1",
"prebuild-ci": "^2.2.3"
}
```
## Licence
Copyright 2017 Lovell Fuller
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0.html)
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env node
'use strict';
var spawnSync = require('child_process').spawnSync;
var libc = require('../');
var spawnOptions = {
env: process.env,
shell: true,
stdio: 'inherit'
};
if (libc.isNonGlibcLinux) {
spawnOptions.env.LIBC = process.env.LIBC || libc.family;
}
process.exit(spawnSync(process.argv[2], process.argv.slice(3), spawnOptions).status);

View File

@@ -0,0 +1,92 @@
'use strict';
var platform = require('os').platform();
var spawnSync = require('child_process').spawnSync;
var readdirSync = require('fs').readdirSync;
var GLIBC = 'glibc';
var MUSL = 'musl';
var spawnOptions = {
encoding: 'utf8',
env: process.env
};
if (!spawnSync) {
spawnSync = function () {
return { status: 126, stdout: '', stderr: '' };
};
}
function contains (needle) {
return function (haystack) {
return haystack.indexOf(needle) !== -1;
};
}
function versionFromMuslLdd (out) {
return out.split(/[\r\n]+/)[1].trim().split(/\s/)[1];
}
function safeReaddirSync (path) {
try {
return readdirSync(path);
} catch (e) {}
return [];
}
var family = '';
var version = '';
var method = '';
if (platform === 'linux') {
// Try getconf
var glibc = spawnSync('getconf', ['GNU_LIBC_VERSION'], spawnOptions);
if (glibc.status === 0) {
family = GLIBC;
version = glibc.stdout.trim().split(' ')[1];
method = 'getconf';
} else {
// Try ldd
var ldd = spawnSync('ldd', ['--version'], spawnOptions);
if (ldd.status === 0 && ldd.stdout.indexOf(MUSL) !== -1) {
family = MUSL;
version = versionFromMuslLdd(ldd.stdout);
method = 'ldd';
} else if (ldd.status === 1 && ldd.stderr.indexOf(MUSL) !== -1) {
family = MUSL;
version = versionFromMuslLdd(ldd.stderr);
method = 'ldd';
} else {
// Try filesystem (family only)
var lib = safeReaddirSync('/lib');
if (lib.some(contains('-linux-gnu'))) {
family = GLIBC;
method = 'filesystem';
} else if (lib.some(contains('libc.musl-'))) {
family = MUSL;
method = 'filesystem';
} else if (lib.some(contains('ld-musl-'))) {
family = MUSL;
method = 'filesystem';
} else {
var usrSbin = safeReaddirSync('/usr/sbin');
if (usrSbin.some(contains('glibc'))) {
family = GLIBC;
method = 'filesystem';
}
}
}
}
}
var isNonGlibcLinux = (family !== '' && family !== GLIBC);
module.exports = {
GLIBC: GLIBC,
MUSL: MUSL,
family: family,
version: version,
method: method,
isNonGlibcLinux: isNonGlibcLinux
};

View File

@@ -0,0 +1,35 @@
{
"name": "detect-libc",
"version": "1.0.3",
"description": "Node.js module to detect the C standard library (libc) implementation family and version",
"main": "lib/detect-libc.js",
"bin": {
"detect-libc": "./bin/detect-libc.js"
},
"scripts": {
"test": "semistandard && nyc --reporter=lcov ava"
},
"repository": {
"type": "git",
"url": "git://github.com/lovell/detect-libc"
},
"keywords": [
"libc",
"glibc",
"musl"
],
"author": "Lovell Fuller <npm@lovell.info>",
"contributors": [
"Niklas Salmoukas <niklas@salmoukas.com>"
],
"license": "Apache-2.0",
"devDependencies": {
"ava": "^0.23.0",
"nyc": "^11.3.0",
"proxyquire": "^1.8.0",
"semistandard": "^11.0.0"
},
"engines": {
"node": ">=0.10"
}
}

View File

@@ -0,0 +1,9 @@
The MIT License (MIT)
Copyright (c) 2017 [Node.js API collaborators](https://github.com/nodejs/node-addon-api#collaborators)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,319 @@
NOTE: The default branch has been renamed!
master is now named main
If you have a local clone, you can update it by running:
```shell
git branch -m master main
git fetch origin
git branch -u origin/main main
```
# **node-addon-api module**
This module contains **header-only C++ wrapper classes** which simplify
the use of the C based [Node-API](https://nodejs.org/dist/latest/docs/api/n-api.html)
provided by Node.js when using C++. It provides a C++ object model
and exception handling semantics with low overhead.
There are three options for implementing addons: Node-API, nan, or direct
use of internal V8, libuv, and Node.js libraries. Unless there is a need for
direct access to functionality that is not exposed by Node-API as outlined
in [C/C++ addons](https://nodejs.org/dist/latest/docs/api/addons.html)
in Node.js core, use Node-API. Refer to
[C/C++ addons with Node-API](https://nodejs.org/dist/latest/docs/api/n-api.html)
for more information on Node-API.
Node-API is an ABI stable C interface provided by Node.js for building native
addons. It is independent of the underlying JavaScript runtime (e.g. V8 or ChakraCore)
and is maintained as part of Node.js itself. It is intended to insulate
native addons from changes in the underlying JavaScript engine and allow
modules compiled for one version to run on later versions of Node.js without
recompilation.
The `node-addon-api` module, which is not part of Node.js, preserves the benefits
of the Node-API as it consists only of inline code that depends only on the stable API
provided by Node-API. As such, modules built against one version of Node.js
using node-addon-api should run without having to be rebuilt with newer versions
of Node.js.
It is important to remember that *other* Node.js interfaces such as
`libuv` (included in a project via `#include <uv.h>`) are not ABI-stable across
Node.js major versions. Thus, an addon must use Node-API and/or `node-addon-api`
exclusively and build against a version of Node.js that includes an
implementation of Node-API (meaning an active LTS version of Node.js) in
order to benefit from ABI stability across Node.js major versions. Node.js
provides an [ABI stability guide][] containing a detailed explanation of ABI
stability in general, and the Node-API ABI stability guarantee in particular.
As new APIs are added to Node-API, node-addon-api must be updated to provide
wrappers for those new APIs. For this reason, node-addon-api provides
methods that allow callers to obtain the underlying Node-API handles so
direct calls to Node-API and the use of the objects/methods provided by
node-addon-api can be used together. For example, in order to be able
to use an API for which the node-addon-api does not yet provide a wrapper.
APIs exposed by node-addon-api are generally used to create and
manipulate JavaScript values. Concepts and operations generally map
to ideas specified in the **ECMA262 Language Specification**.
The [Node-API Resource](https://nodejs.github.io/node-addon-examples/) offers an
excellent orientation and tips for developers just getting started with Node-API
and node-addon-api.
- **[Setup](#setup)**
- **[API Documentation](#api)**
- **[Examples](#examples)**
- **[Tests](#tests)**
- **[More resource and info about native Addons](#resources)**
- **[Badges](#badges)**
- **[Code of Conduct](CODE_OF_CONDUCT.md)**
- **[Contributors](#contributors)**
- **[License](#license)**
## **Current version: 7.1.1**
(See [CHANGELOG.md](CHANGELOG.md) for complete Changelog)
[![NPM](https://nodei.co/npm/node-addon-api.png?downloads=true&downloadRank=true)](https://nodei.co/npm/node-addon-api/) [![NPM](https://nodei.co/npm-dl/node-addon-api.png?months=6&height=1)](https://nodei.co/npm/node-addon-api/)
<a name="setup"></a>
node-addon-api is based on [Node-API](https://nodejs.org/api/n-api.html) and supports using different Node-API versions.
This allows addons built with it to run with Node.js versions which support the targeted Node-API version.
**However** the node-addon-api support model is to support only the active LTS Node.js versions. This means that
every year there will be a new major which drops support for the Node.js LTS version which has gone out of service.
The oldest Node.js version supported by the current version of node-addon-api is Node.js 16.x.
## Setup
- [Installation and usage](doc/setup.md)
- [node-gyp](doc/node-gyp.md)
- [cmake-js](doc/cmake-js.md)
- [Conversion tool](doc/conversion-tool.md)
- [Checker tool](doc/checker-tool.md)
- [Generator](doc/generator.md)
- [Prebuild tools](doc/prebuild_tools.md)
<a name="api"></a>
### **API Documentation**
The following is the documentation for node-addon-api.
- [Full Class Hierarchy](doc/hierarchy.md)
- [Addon Structure](doc/addon.md)
- Data Types:
- [Env](doc/env.md)
- [CallbackInfo](doc/callbackinfo.md)
- [Reference](doc/reference.md)
- [Value](doc/value.md)
- [Name](doc/name.md)
- [Symbol](doc/symbol.md)
- [String](doc/string.md)
- [Number](doc/number.md)
- [Date](doc/date.md)
- [BigInt](doc/bigint.md)
- [Boolean](doc/boolean.md)
- [External](doc/external.md)
- [Object](doc/object.md)
- [Array](doc/array.md)
- [ObjectReference](doc/object_reference.md)
- [PropertyDescriptor](doc/property_descriptor.md)
- [Function](doc/function.md)
- [FunctionReference](doc/function_reference.md)
- [ObjectWrap](doc/object_wrap.md)
- [ClassPropertyDescriptor](doc/class_property_descriptor.md)
- [Buffer](doc/buffer.md)
- [ArrayBuffer](doc/array_buffer.md)
- [TypedArray](doc/typed_array.md)
- [TypedArrayOf](doc/typed_array_of.md)
- [DataView](doc/dataview.md)
- [Error Handling](doc/error_handling.md)
- [Error](doc/error.md)
- [TypeError](doc/type_error.md)
- [RangeError](doc/range_error.md)
- [SyntaxError](doc/syntax_error.md)
- [Object Lifetime Management](doc/object_lifetime_management.md)
- [HandleScope](doc/handle_scope.md)
- [EscapableHandleScope](doc/escapable_handle_scope.md)
- [Memory Management](doc/memory_management.md)
- [Async Operations](doc/async_operations.md)
- [AsyncWorker](doc/async_worker.md)
- [AsyncContext](doc/async_context.md)
- [AsyncWorker Variants](doc/async_worker_variants.md)
- [Thread-safe Functions](doc/threadsafe.md)
- [ThreadSafeFunction](doc/threadsafe_function.md)
- [TypedThreadSafeFunction](doc/typed_threadsafe_function.md)
- [Promises](doc/promises.md)
- [Version management](doc/version_management.md)
<a name="examples"></a>
### **Examples**
Are you new to **node-addon-api**? Take a look at our **[examples](https://github.com/nodejs/node-addon-examples)**
- **[Hello World](https://github.com/nodejs/node-addon-examples/tree/main/src/1-getting-started/1_hello_world)**
- **[Pass arguments to a function](https://github.com/nodejs/node-addon-examples/tree/main/src/1-getting-started/2_function_arguments/node-addon-api)**
- **[Callbacks](https://github.com/nodejs/node-addon-examples/tree/main/src/1-getting-started/3_callbacks/node-addon-api)**
- **[Object factory](https://github.com/nodejs/node-addon-examples/tree/main/src/1-getting-started/4_object_factory/node-addon-api)**
- **[Function factory](https://github.com/nodejs/node-addon-examples/tree/main/src/1-getting-started/5_function_factory/node-addon-api)**
- **[Wrapping C++ Object](https://github.com/nodejs/node-addon-examples/tree/main/src/1-getting-started/6_object_wrap/node-addon-api)**
- **[Factory of wrapped object](https://github.com/nodejs/node-addon-examples/tree/main/src/1-getting-started/7_factory_wrap/node-addon-api)**
- **[Passing wrapped object around](https://github.com/nodejs/node-addon-examples/tree/main/src/2-js-to-native-conversion/8_passing_wrapped/node-addon-api)**
<a name="tests"></a>
### **Tests**
To run the **node-addon-api** tests do:
```
npm install
npm test
```
To avoid testing the deprecated portions of the API run
```
npm install
npm test --disable-deprecated
```
To run the tests targeting a specific version of Node-API run
```
npm install
export NAPI_VERSION=X
npm test --NAPI_VERSION=X
```
where X is the version of Node-API you want to target.
To run a specific unit test, filter conditions are available
**Example:**
compile and run only tests on objectwrap.cc and objectwrap.js
```
npm run unit --filter=objectwrap
```
Multiple unit tests cane be selected with wildcards
**Example:**
compile and run all test files ending with "reference" -> function_reference.cc, object_reference.cc, reference.cc
```
npm run unit --filter=*reference
```
Multiple filter conditions can be joined to broaden the test selection
**Example:**
compile and run all tests under folders threadsafe_function and typed_threadsafe_function and also the objectwrap.cc file
npm run unit --filter='*function objectwrap'
### **Debug**
To run the **node-addon-api** tests with `--debug` option:
```
npm run-script dev
```
If you want a faster build, you might use the following option:
```
npm run-script dev:incremental
```
Take a look and get inspired by our **[test suite](https://github.com/nodejs/node-addon-api/tree/HEAD/test)**
### **Benchmarks**
You can run the available benchmarks using the following command:
```
npm run-script benchmark
```
See [benchmark/README.md](benchmark/README.md) for more details about running and adding benchmarks.
<a name="resources"></a>
### **More resource and info about native Addons**
- **[C++ Addons](https://nodejs.org/dist/latest/docs/api/addons.html)**
- **[Node-API](https://nodejs.org/dist/latest/docs/api/n-api.html)**
- **[Node-API - Next Generation Node API for Native Modules](https://youtu.be/-Oniup60Afs)**
- **[How We Migrated Realm JavaScript From NAN to Node-API](https://developer.mongodb.com/article/realm-javascript-nan-to-n-api)**
As node-addon-api's core mission is to expose the plain C Node-API as C++
wrappers, tools that facilitate n-api/node-addon-api providing more
convenient patterns for developing a Node.js add-on with n-api/node-addon-api
can be published to NPM as standalone packages. It is also recommended to tag
such packages with `node-addon-api` to provide more visibility to the community.
Quick links to NPM searches: [keywords:node-addon-api](https://www.npmjs.com/search?q=keywords%3Anode-addon-api).
<a name="other-bindings"></a>
### **Other bindings**
- **[napi-rs](https://napi.rs)** - (`Rust`)
<a name="badges"></a>
### **Badges**
The use of badges is recommended to indicate the minimum version of Node-API
required for the module. This helps to determine which Node.js major versions are
supported. Addon maintainers can consult the [Node-API support matrix][] to determine
which Node.js versions provide a given Node-API version. The following badges are
available:
![Node-API v1 Badge](https://github.com/nodejs/abi-stable-node/blob/doc/assets/Node-API%20v1%20Badge.svg)
![Node-API v2 Badge](https://github.com/nodejs/abi-stable-node/blob/doc/assets/Node-API%20v2%20Badge.svg)
![Node-API v3 Badge](https://github.com/nodejs/abi-stable-node/blob/doc/assets/Node-API%20v3%20Badge.svg)
![Node-API v4 Badge](https://github.com/nodejs/abi-stable-node/blob/doc/assets/Node-API%20v4%20Badge.svg)
![Node-API v5 Badge](https://github.com/nodejs/abi-stable-node/blob/doc/assets/Node-API%20v5%20Badge.svg)
![Node-API v6 Badge](https://github.com/nodejs/abi-stable-node/blob/doc/assets/Node-API%20v6%20Badge.svg)
![Node-API v7 Badge](https://github.com/nodejs/abi-stable-node/blob/doc/assets/Node-API%20v7%20Badge.svg)
![Node-API v8 Badge](https://github.com/nodejs/abi-stable-node/blob/doc/assets/Node-API%20v8%20Badge.svg)
![Node-API v9 Badge](https://github.com/nodejs/abi-stable-node/blob/doc/assets/Node-API%20v9%20Badge.svg)
![Node-API Experimental Version Badge](https://github.com/nodejs/abi-stable-node/blob/doc/assets/Node-API%20Experimental%20Version%20Badge.svg)
## **Contributing**
We love contributions from the community to **node-addon-api**!
See [CONTRIBUTING.md](CONTRIBUTING.md) for more details on our philosophy around extending this module.
<a name="contributors"></a>
## Team members
### Active
| Name | GitHub Link |
| ------------------- | ----------------------------------------------------- |
| Anna Henningsen | [addaleax](https://github.com/addaleax) |
| Chengzhong Wu | [legendecas](https://github.com/legendecas) |
| Jack Xia | [JckXia](https://github.com/JckXia) |
| Kevin Eady | [KevinEady](https://github.com/KevinEady) |
| Michael Dawson | [mhdawson](https://github.com/mhdawson) |
| Nicola Del Gobbo | [NickNaso](https://github.com/NickNaso) |
| Vladimir Morozov | [vmoroz](https://github.com/vmoroz) |
### Emeritus
| Name | GitHub Link |
| ------------------- | ----------------------------------------------------- |
| Arunesh Chandra | [aruneshchandra](https://github.com/aruneshchandra) |
| Benjamin Byholm | [kkoopa](https://github.com/kkoopa) |
| Gabriel Schulhof | [gabrielschulhof](https://github.com/gabrielschulhof) |
| Hitesh Kanwathirtha | [digitalinfinity](https://github.com/digitalinfinity) |
| Jason Ginchereau | [jasongin](https://github.com/jasongin) |
| Jim Schlight | [jschlight](https://github.com/jschlight) |
| Sampson Gao | [sampsongao](https://github.com/sampsongao) |
| Taylor Woll | [boingoing](https://github.com/boingoing) |
<a name="license"></a>
Licensed under [MIT](./LICENSE.md)
[ABI stability guide]: https://nodejs.org/en/docs/guides/abi-stability/
[Node-API support matrix]: https://nodejs.org/dist/latest/docs/api/n-api.html#n_api_n_api_version_matrix

View File

@@ -0,0 +1,20 @@
{
'variables': {
'NAPI_VERSION%': "<!(node -p \"process.env.NAPI_VERSION || process.versions.napi\")",
'disable_deprecated': "<!(node -p \"process.env['npm_config_disable_deprecated']\")"
},
'conditions': [
['NAPI_VERSION!=""', { 'defines': ['NAPI_VERSION=<@(NAPI_VERSION)'] } ],
['disable_deprecated=="true"', {
'defines': ['NODE_ADDON_API_DISABLE_DEPRECATED']
}],
['OS=="mac"', {
'cflags+': ['-fvisibility=hidden'],
'xcode_settings': {
'OTHER_CFLAGS': ['-fvisibility=hidden']
}
}]
],
'cflags': [ '-Werror', '-Wall', '-Wextra', '-Wpedantic', '-Wunused-parameter' ],
'cflags_cc': [ '-Werror', '-Wall', '-Wextra', '-Wpedantic', '-Wunused-parameter' ]
}

View File

@@ -0,0 +1,25 @@
{
'defines': [ 'NAPI_CPP_EXCEPTIONS' ],
'cflags!': [ '-fno-exceptions' ],
'cflags_cc!': [ '-fno-exceptions' ],
'conditions': [
["OS=='win'", {
"defines": [
"_HAS_EXCEPTIONS=1"
],
"msvs_settings": {
"VCCLCompilerTool": {
"ExceptionHandling": 1,
'EnablePREfast': 'true',
},
},
}],
["OS=='mac'", {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'CLANG_CXX_LIBRARY': 'libc++',
'MACOSX_DEPLOYMENT_TARGET': '10.7',
},
}],
],
}

View File

@@ -0,0 +1,12 @@
const path = require('path');
const includeDir = path.relative('.', __dirname);
module.exports = {
include: `"${__dirname}"`, // deprecated, can be removed as part of 4.0.0
include_dir: includeDir,
gyp: path.join(includeDir, 'node_api.gyp:nothing'), // deprecated.
targets: path.join(includeDir, 'node_addon_api.gyp'),
isNodeApiBuiltin: true,
needsFlag: false
};

View File

@@ -0,0 +1,186 @@
#ifndef SRC_NAPI_INL_DEPRECATED_H_
#define SRC_NAPI_INL_DEPRECATED_H_
////////////////////////////////////////////////////////////////////////////////
// PropertyDescriptor class
////////////////////////////////////////////////////////////////////////////////
template <typename Getter>
inline PropertyDescriptor PropertyDescriptor::Accessor(
const char* utf8name,
Getter getter,
napi_property_attributes attributes,
void* /*data*/) {
using CbData = details::CallbackData<Getter, Napi::Value>;
// TODO: Delete when the function is destroyed
auto callbackData = new CbData({getter, nullptr});
return PropertyDescriptor({utf8name,
nullptr,
nullptr,
CbData::Wrapper,
nullptr,
nullptr,
attributes,
callbackData});
}
template <typename Getter>
inline PropertyDescriptor PropertyDescriptor::Accessor(
const std::string& utf8name,
Getter getter,
napi_property_attributes attributes,
void* data) {
return Accessor(utf8name.c_str(), getter, attributes, data);
}
template <typename Getter>
inline PropertyDescriptor PropertyDescriptor::Accessor(
napi_value name,
Getter getter,
napi_property_attributes attributes,
void* /*data*/) {
using CbData = details::CallbackData<Getter, Napi::Value>;
// TODO: Delete when the function is destroyed
auto callbackData = new CbData({getter, nullptr});
return PropertyDescriptor({nullptr,
name,
nullptr,
CbData::Wrapper,
nullptr,
nullptr,
attributes,
callbackData});
}
template <typename Getter>
inline PropertyDescriptor PropertyDescriptor::Accessor(
Name name, Getter getter, napi_property_attributes attributes, void* data) {
napi_value nameValue = name;
return PropertyDescriptor::Accessor(nameValue, getter, attributes, data);
}
template <typename Getter, typename Setter>
inline PropertyDescriptor PropertyDescriptor::Accessor(
const char* utf8name,
Getter getter,
Setter setter,
napi_property_attributes attributes,
void* /*data*/) {
using CbData = details::AccessorCallbackData<Getter, Setter>;
// TODO: Delete when the function is destroyed
auto callbackData = new CbData({getter, setter, nullptr});
return PropertyDescriptor({utf8name,
nullptr,
nullptr,
CbData::GetterWrapper,
CbData::SetterWrapper,
nullptr,
attributes,
callbackData});
}
template <typename Getter, typename Setter>
inline PropertyDescriptor PropertyDescriptor::Accessor(
const std::string& utf8name,
Getter getter,
Setter setter,
napi_property_attributes attributes,
void* data) {
return Accessor(utf8name.c_str(), getter, setter, attributes, data);
}
template <typename Getter, typename Setter>
inline PropertyDescriptor PropertyDescriptor::Accessor(
napi_value name,
Getter getter,
Setter setter,
napi_property_attributes attributes,
void* /*data*/) {
using CbData = details::AccessorCallbackData<Getter, Setter>;
// TODO: Delete when the function is destroyed
auto callbackData = new CbData({getter, setter, nullptr});
return PropertyDescriptor({nullptr,
name,
nullptr,
CbData::GetterWrapper,
CbData::SetterWrapper,
nullptr,
attributes,
callbackData});
}
template <typename Getter, typename Setter>
inline PropertyDescriptor PropertyDescriptor::Accessor(
Name name,
Getter getter,
Setter setter,
napi_property_attributes attributes,
void* data) {
napi_value nameValue = name;
return PropertyDescriptor::Accessor(
nameValue, getter, setter, attributes, data);
}
template <typename Callable>
inline PropertyDescriptor PropertyDescriptor::Function(
const char* utf8name,
Callable cb,
napi_property_attributes attributes,
void* /*data*/) {
using ReturnType = decltype(cb(CallbackInfo(nullptr, nullptr)));
using CbData = details::CallbackData<Callable, ReturnType>;
// TODO: Delete when the function is destroyed
auto callbackData = new CbData({cb, nullptr});
return PropertyDescriptor({utf8name,
nullptr,
CbData::Wrapper,
nullptr,
nullptr,
nullptr,
attributes,
callbackData});
}
template <typename Callable>
inline PropertyDescriptor PropertyDescriptor::Function(
const std::string& utf8name,
Callable cb,
napi_property_attributes attributes,
void* data) {
return Function(utf8name.c_str(), cb, attributes, data);
}
template <typename Callable>
inline PropertyDescriptor PropertyDescriptor::Function(
napi_value name,
Callable cb,
napi_property_attributes attributes,
void* /*data*/) {
using ReturnType = decltype(cb(CallbackInfo(nullptr, nullptr)));
using CbData = details::CallbackData<Callable, ReturnType>;
// TODO: Delete when the function is destroyed
auto callbackData = new CbData({cb, nullptr});
return PropertyDescriptor({nullptr,
name,
CbData::Wrapper,
nullptr,
nullptr,
nullptr,
attributes,
callbackData});
}
template <typename Callable>
inline PropertyDescriptor PropertyDescriptor::Function(
Name name, Callable cb, napi_property_attributes attributes, void* data) {
napi_value nameValue = name;
return PropertyDescriptor::Function(nameValue, cb, attributes, data);
}
#endif // !SRC_NAPI_INL_DEPRECATED_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,32 @@
{
'targets': [
{
'target_name': 'node_addon_api',
'type': 'none',
'sources': [ 'napi.h', 'napi-inl.h' ],
'direct_dependent_settings': {
'include_dirs': [ '.' ],
'includes': ['noexcept.gypi'],
}
},
{
'target_name': 'node_addon_api_except',
'type': 'none',
'sources': [ 'napi.h', 'napi-inl.h' ],
'direct_dependent_settings': {
'include_dirs': [ '.' ],
'includes': ['except.gypi'],
}
},
{
'target_name': 'node_addon_api_maybe',
'type': 'none',
'sources': [ 'napi.h', 'napi-inl.h' ],
'direct_dependent_settings': {
'include_dirs': [ '.' ],
'includes': ['noexcept.gypi'],
'defines': ['NODE_ADDON_API_ENABLE_MAYBE']
}
},
]
}

View File

@@ -0,0 +1,9 @@
{
'targets': [
{
'target_name': 'nothing',
'type': 'static_library',
'sources': [ 'nothing.c' ]
}
]
}

View File

@@ -0,0 +1,26 @@
{
'defines': [ 'NAPI_DISABLE_CPP_EXCEPTIONS' ],
'cflags': [ '-fno-exceptions' ],
'cflags_cc': [ '-fno-exceptions' ],
'conditions': [
["OS=='win'", {
# _HAS_EXCEPTIONS is already defined and set to 0 in common.gypi
#"defines": [
# "_HAS_EXCEPTIONS=0"
#],
"msvs_settings": {
"VCCLCompilerTool": {
'ExceptionHandling': 0,
'EnablePREfast': 'true',
},
},
}],
["OS=='mac'", {
'xcode_settings': {
'CLANG_CXX_LIBRARY': 'libc++',
'MACOSX_DEPLOYMENT_TARGET': '10.7',
'GCC_ENABLE_CPP_EXCEPTIONS': 'NO',
},
}],
],
}

View File

View File

@@ -0,0 +1,21 @@
{
"versions": [
{
"version": "*",
"target": {
"node": "active"
},
"response": {
"type": "time-permitting",
"paid": false,
"contact": {
"name": "node-addon-api team",
"url": "https://github.com/nodejs/node-addon-api/issues"
}
},
"backing": [ { "project": "https://github.com/nodejs" },
{ "foundation": "https://openjsf.org/" }
]
}
]
}

View File

@@ -0,0 +1,480 @@
{
"bugs": {
"url": "https://github.com/nodejs/node-addon-api/issues"
},
"contributors": [
{
"name": "Abhishek Kumar Singh",
"url": "https://github.com/abhi11210646"
},
{
"name": "Alba Mendez",
"url": "https://github.com/jmendeth"
},
{
"name": "Alexander Floh",
"url": "https://github.com/alexanderfloh"
},
{
"name": "Ammar Faizi",
"url": "https://github.com/ammarfaizi2"
},
{
"name": "András Timár, Dr",
"url": "https://github.com/timarandras"
},
{
"name": "Andrew Petersen",
"url": "https://github.com/kirbysayshi"
},
{
"name": "Anisha Rohra",
"url": "https://github.com/anisha-rohra"
},
{
"name": "Anna Henningsen",
"url": "https://github.com/addaleax"
},
{
"name": "Arnaud Botella",
"url": "https://github.com/BotellaA"
},
{
"name": "Arunesh Chandra",
"url": "https://github.com/aruneshchandra"
},
{
"name": "Azlan Mukhtar",
"url": "https://github.com/azlan"
},
{
"name": "Ben Berman",
"url": "https://github.com/rivertam"
},
{
"name": "Benjamin Byholm",
"url": "https://github.com/kkoopa"
},
{
"name": "Bill Gallafent",
"url": "https://github.com/gallafent"
},
{
"name": "blagoev",
"url": "https://github.com/blagoev"
},
{
"name": "Bruce A. MacNaughton",
"url": "https://github.com/bmacnaughton"
},
{
"name": "Cory Mickelson",
"url": "https://github.com/corymickelson"
},
{
"name": "Daniel Bevenius",
"url": "https://github.com/danbev"
},
{
"name": "Dante Calderón",
"url": "https://github.com/dantehemerson"
},
{
"name": "Darshan Sen",
"url": "https://github.com/RaisinTen"
},
{
"name": "David Halls",
"url": "https://github.com/davedoesdev"
},
{
"name": "Deepak Rajamohan",
"url": "https://github.com/deepakrkris"
},
{
"name": "Dmitry Ashkadov",
"url": "https://github.com/dmitryash"
},
{
"name": "Dongjin Na",
"url": "https://github.com/nadongguri"
},
{
"name": "Doni Rubiagatra",
"url": "https://github.com/rubiagatra"
},
{
"name": "Eric Bickle",
"url": "https://github.com/ebickle"
},
{
"name": "extremeheat",
"url": "https://github.com/extremeheat"
},
{
"name": "Feng Yu",
"url": "https://github.com/F3n67u"
},
{
"name": "Ferdinand Holzer",
"url": "https://github.com/fholzer"
},
{
"name": "Gabriel Schulhof",
"url": "https://github.com/gabrielschulhof"
},
{
"name": "Guenter Sandner",
"url": "https://github.com/gms1"
},
{
"name": "Gus Caplan",
"url": "https://github.com/devsnek"
},
{
"name": "Helio Frota",
"url": "https://github.com/helio-frota"
},
{
"name": "Hitesh Kanwathirtha",
"url": "https://github.com/digitalinfinity"
},
{
"name": "ikokostya",
"url": "https://github.com/ikokostya"
},
{
"name": "Jack Xia",
"url": "https://github.com/JckXia"
},
{
"name": "Jake Barnes",
"url": "https://github.com/DuBistKomisch"
},
{
"name": "Jake Yoon",
"url": "https://github.com/yjaeseok"
},
{
"name": "Jason Ginchereau",
"url": "https://github.com/jasongin"
},
{
"name": "Jenny",
"url": "https://github.com/egg-bread"
},
{
"name": "Jeroen Janssen",
"url": "https://github.com/japj"
},
{
"name": "Jim Schlight",
"url": "https://github.com/jschlight"
},
{
"name": "Jinho Bang",
"url": "https://github.com/romandev"
},
{
"name": "José Expósito",
"url": "https://github.com/JoseExposito"
},
{
"name": "joshgarde",
"url": "https://github.com/joshgarde"
},
{
"name": "Julian Mesa",
"url": "https://github.com/julianmesa-gitkraken"
},
{
"name": "Kasumi Hanazuki",
"url": "https://github.com/hanazuki"
},
{
"name": "Kelvin",
"url": "https://github.com/kelvinhammond"
},
{
"name": "Kevin Eady",
"url": "https://github.com/KevinEady"
},
{
"name": "Kévin VOYER",
"url": "https://github.com/kecsou"
},
{
"name": "kidneysolo",
"url": "https://github.com/kidneysolo"
},
{
"name": "Koki Nishihara",
"url": "https://github.com/Nishikoh"
},
{
"name": "Konstantin Tarkus",
"url": "https://github.com/koistya"
},
{
"name": "Kyle Farnung",
"url": "https://github.com/kfarnung"
},
{
"name": "Kyle Kovacs",
"url": "https://github.com/nullromo"
},
{
"name": "legendecas",
"url": "https://github.com/legendecas"
},
{
"name": "LongYinan",
"url": "https://github.com/Brooooooklyn"
},
{
"name": "Lovell Fuller",
"url": "https://github.com/lovell"
},
{
"name": "Luciano Martorella",
"url": "https://github.com/lmartorella"
},
{
"name": "mastergberry",
"url": "https://github.com/mastergberry"
},
{
"name": "Mathias Küsel",
"url": "https://github.com/mathiask88"
},
{
"name": "Mathias Stearn",
"url": "https://github.com/RedBeard0531"
},
{
"name": "Matteo Collina",
"url": "https://github.com/mcollina"
},
{
"name": "Michael Dawson",
"url": "https://github.com/mhdawson"
},
{
"name": "Michael Price",
"url": "https://github.com/mikepricedev"
},
{
"name": "Michele Campus",
"url": "https://github.com/kYroL01"
},
{
"name": "Mikhail Cheshkov",
"url": "https://github.com/mcheshkov"
},
{
"name": "nempoBu4",
"url": "https://github.com/nempoBu4"
},
{
"name": "Nicola Del Gobbo",
"url": "https://github.com/NickNaso"
},
{
"name": "Nick Soggin",
"url": "https://github.com/iSkore"
},
{
"name": "Nikolai Vavilov",
"url": "https://github.com/seishun"
},
{
"name": "Nurbol Alpysbayev",
"url": "https://github.com/anurbol"
},
{
"name": "pacop",
"url": "https://github.com/pacop"
},
{
"name": "Peter Šándor",
"url": "https://github.com/petersandor"
},
{
"name": "Philipp Renoth",
"url": "https://github.com/DaAitch"
},
{
"name": "rgerd",
"url": "https://github.com/rgerd"
},
{
"name": "Richard Lau",
"url": "https://github.com/richardlau"
},
{
"name": "Rolf Timmermans",
"url": "https://github.com/rolftimmermans"
},
{
"name": "Ross Weir",
"url": "https://github.com/ross-weir"
},
{
"name": "Ryuichi Okumura",
"url": "https://github.com/okuryu"
},
{
"name": "Saint Gabriel",
"url": "https://github.com/chineduG"
},
{
"name": "Sampson Gao",
"url": "https://github.com/sampsongao"
},
{
"name": "Sam Roberts",
"url": "https://github.com/sam-github"
},
{
"name": "strager",
"url": "https://github.com/strager"
},
{
"name": "Taylor Woll",
"url": "https://github.com/boingoing"
},
{
"name": "Thomas Gentilhomme",
"url": "https://github.com/fraxken"
},
{
"name": "Tim Rach",
"url": "https://github.com/timrach"
},
{
"name": "Tobias Nießen",
"url": "https://github.com/tniessen"
},
{
"name": "todoroff",
"url": "https://github.com/todoroff"
},
{
"name": "Toyo Li",
"url": "https://github.com/toyobayashi"
},
{
"name": "Tux3",
"url": "https://github.com/tux3"
},
{
"name": "Vlad Velmisov",
"url": "https://github.com/Velmisov"
},
{
"name": "Vladimir Morozov",
"url": "https://github.com/vmoroz"
},
{
"name": "WenheLI",
"url": "https://github.com/WenheLI"
},
{
"name": "Xuguang Mei",
"url": "https://github.com/meixg"
},
{
"name": "Yohei Kishimoto",
"url": "https://github.com/morokosi"
},
{
"name": "Yulong Wang",
"url": "https://github.com/fs-eire"
},
{
"name": "Ziqiu Zhao",
"url": "https://github.com/ZzqiZQute"
},
{
"name": "Feng Yu",
"url": "https://github.com/F3n67u"
},
{
"name": "wanlu wang",
"url": "https://github.com/wanlu"
},
{
"name": "Caleb Hearon",
"url": "https://github.com/chearon"
},
{
"name": "Marx",
"url": "https://github.com/MarxJiao"
},
{
"name": "Ömer AKGÜL",
"url": "https://github.com/tuhalf"
}
],
"description": "Node.js API (Node-API)",
"devDependencies": {
"benchmark": "^2.1.4",
"bindings": "^1.5.0",
"clang-format": "^1.4.0",
"eslint": "^7.32.0",
"eslint-config-semistandard": "^16.0.0",
"eslint-config-standard": "^16.0.3",
"eslint-plugin-import": "^2.24.2",
"eslint-plugin-node": "^11.1.0",
"eslint-plugin-promise": "^5.1.0",
"fs-extra": "^11.1.1",
"path": "^0.12.7",
"pre-commit": "^1.2.2",
"safe-buffer": "^5.1.1"
},
"directories": {},
"gypfile": false,
"homepage": "https://github.com/nodejs/node-addon-api",
"keywords": [
"n-api",
"napi",
"addon",
"native",
"bindings",
"c",
"c++",
"nan",
"node-addon-api"
],
"license": "MIT",
"main": "index.js",
"name": "node-addon-api",
"readme": "README.md",
"repository": {
"type": "git",
"url": "git://github.com/nodejs/node-addon-api.git"
},
"files": [
"*.{c,h,gyp,gypi}",
"package-support.json",
"tools/"
],
"scripts": {
"prebenchmark": "node-gyp rebuild -C benchmark",
"benchmark": "node benchmark",
"pretest": "node-gyp rebuild -C test",
"test": "node test",
"test:debug": "node-gyp rebuild -C test --debug && NODE_API_BUILD_CONFIG=Debug node ./test/index.js",
"predev": "node-gyp rebuild -C test --debug",
"dev": "node test",
"predev:incremental": "node-gyp configure build -C test --debug",
"dev:incremental": "node test",
"doc": "doxygen doc/Doxyfile",
"lint": "node tools/eslint-format && node tools/clang-format",
"lint:fix": "node tools/clang-format --fix && node tools/eslint-format --fix"
},
"pre-commit": "lint",
"version": "7.1.1",
"support": true
}

View File

@@ -0,0 +1,73 @@
# Tools
## clang-format
The clang-format checking tools is designed to check changed lines of code compared to given git-refs.
## Migration Script
The migration tool is designed to reduce repetitive work in the migration process. However, the script is not aiming to convert every thing for you. There are usually some small fixes and major reconstruction required.
### How To Use
To run the conversion script, first make sure you have the latest `node-addon-api` in your `node_modules` directory.
```
npm install node-addon-api
```
Then run the script passing your project directory
```
node ./node_modules/node-addon-api/tools/conversion.js ./
```
After finish, recompile and debug things that are missed by the script.
### Quick Fixes
Here is the list of things that can be fixed easily.
1. Change your methods' return value to void if it doesn't return value to JavaScript.
2. Use `.` to access attribute or to invoke member function in Napi::Object instead of `->`.
3. `Napi::New(env, value);` to `Napi::[Type]::New(env, value);
### Major Reconstructions
The implementation of `Napi::ObjectWrap` is significantly different from NAN's. `Napi::ObjectWrap` takes a pointer to the wrapped object and creates a reference to the wrapped object inside ObjectWrap constructor. `Napi::ObjectWrap` also associates wrapped object's instance methods to Javascript module instead of static methods like NAN.
So if you use Nan::ObjectWrap in your module, you will need to execute the following steps.
1. Convert your [ClassName]::New function to a constructor function that takes a `Napi::CallbackInfo`. Declare it as
```
[ClassName](const Napi::CallbackInfo& info);
```
and define it as
```
[ClassName]::[ClassName](const Napi::CallbackInfo& info) : Napi::ObjectWrap<[ClassName]>(info){
...
}
```
This way, the `Napi::ObjectWrap` constructor will be invoked after the object has been instantiated and `Napi::ObjectWrap` can use the `this` pointer to create a reference to the wrapped object.
2. Move your original constructor code into the new constructor. Delete your original constructor.
3. In your class initialization function, associate native methods in the following way.
```
Napi::FunctionReference constructor;
void [ClassName]::Init(Napi::Env env, Napi::Object exports, Napi::Object module) {
Napi::HandleScope scope(env);
Napi::Function ctor = DefineClass(env, "Canvas", {
InstanceMethod<&[ClassName]::Func1>("Func1"),
InstanceMethod<&[ClassName]::Func2>("Func2"),
InstanceAccessor<&[ClassName]::ValueGetter>("Value"),
StaticMethod<&[ClassName]::StaticMethod>("MethodName"),
InstanceValue("Value", Napi::[Type]::New(env, value)),
});
constructor = Napi::Persistent(ctor);
constructor .SuppressDestruct();
exports.Set("[ClassName]", ctor);
}
```
4. In function where you need to Unwrap the ObjectWrap in NAN like `[ClassName]* native = Nan::ObjectWrap::Unwrap<[ClassName]>(info.This());`, use `this` pointer directly as the unwrapped object as each ObjectWrap instance is associated with a unique object instance.
If you still find issues after following this guide, please leave us an issue describing your problem and we will try to resolve it.

View File

@@ -0,0 +1,99 @@
'use strict';
// Descend into a directory structure and, for each file matching *.node, output
// based on the imports found in the file whether it's an N-API module or not.
const fs = require('fs');
const path = require('path');
// Read the output of the command, break it into lines, and use the reducer to
// decide whether the file is an N-API module or not.
function checkFile (file, command, argv, reducer) {
const child = require('child_process').spawn(command, argv, {
stdio: ['inherit', 'pipe', 'inherit']
});
let leftover = '';
let isNapi;
child.stdout.on('data', (chunk) => {
if (isNapi === undefined) {
chunk = (leftover + chunk.toString()).split(/[\r\n]+/);
leftover = chunk.pop();
isNapi = chunk.reduce(reducer, isNapi);
if (isNapi !== undefined) {
child.kill();
}
}
});
child.on('close', (code, signal) => {
if ((code === null && signal !== null) || (code !== 0)) {
console.log(
command + ' exited with code: ' + code + ' and signal: ' + signal);
} else {
// Green if it's a N-API module, red otherwise.
console.log(
'\x1b[' + (isNapi ? '42' : '41') + 'm' +
(isNapi ? ' N-API' : 'Not N-API') +
'\x1b[0m: ' + file);
}
});
}
// Use nm -a to list symbols.
function checkFileUNIX (file) {
checkFile(file, 'nm', ['-a', file], (soFar, line) => {
if (soFar === undefined) {
line = line.match(/([0-9a-f]*)? ([a-zA-Z]) (.*$)/);
if (line[2] === 'U') {
if (/^napi/.test(line[3])) {
soFar = true;
}
}
}
return soFar;
});
}
// Use dumpbin /imports to list symbols.
function checkFileWin32 (file) {
checkFile(file, 'dumpbin', ['/imports', file], (soFar, line) => {
if (soFar === undefined) {
line = line.match(/([0-9a-f]*)? +([a-zA-Z0-9]) (.*$)/);
if (line && /^napi/.test(line[line.length - 1])) {
soFar = true;
}
}
return soFar;
});
}
// Descend into a directory structure and pass each file ending in '.node' to
// one of the above checks, depending on the OS.
function recurse (top) {
fs.readdir(top, (error, items) => {
if (error) {
throw new Error('error reading directory ' + top + ': ' + error);
}
items.forEach((item) => {
item = path.join(top, item);
fs.stat(item, ((item) => (error, stats) => {
if (error) {
throw new Error('error about ' + item + ': ' + error);
}
if (stats.isDirectory()) {
recurse(item);
} else if (/[.]node$/.test(item) &&
// Explicitly ignore files called 'nothing.node' because they are
// artefacts of node-addon-api having identified a version of
// Node.js that ships with a correct implementation of N-API.
path.basename(item) !== 'nothing.node') {
process.platform === 'win32'
? checkFileWin32(item)
: checkFileUNIX(item);
}
})(item));
});
});
}
// Start with the directory given on the command line or the current directory
// if nothing was given.
recurse(process.argv.length > 3 ? process.argv[2] : '.');

View File

@@ -0,0 +1,71 @@
#!/usr/bin/env node
const spawn = require('child_process').spawnSync;
const path = require('path');
const filesToCheck = ['*.h', '*.cc'];
const FORMAT_START = process.env.FORMAT_START || 'main';
function main (args) {
let fix = false;
while (args.length > 0) {
switch (args[0]) {
case '-f':
case '--fix':
fix = true;
break;
default:
}
args.shift();
}
const clangFormatPath = path.dirname(require.resolve('clang-format'));
const binary = process.platform === 'win32'
? 'node_modules\\.bin\\clang-format.cmd'
: 'node_modules/.bin/clang-format';
const options = ['--binary=' + binary, '--style=file'];
if (fix) {
options.push(FORMAT_START);
} else {
options.push('--diff', FORMAT_START);
}
const gitClangFormatPath = path.join(clangFormatPath, 'bin/git-clang-format');
const result = spawn(
'python',
[gitClangFormatPath, ...options, '--', ...filesToCheck],
{ encoding: 'utf-8' }
);
if (result.stderr) {
console.error('Error running git-clang-format:', result.stderr);
return 2;
}
const clangFormatOutput = result.stdout.trim();
// Bail fast if in fix mode.
if (fix) {
console.log(clangFormatOutput);
return 0;
}
// Detect if there is any complains from clang-format
if (
clangFormatOutput !== '' &&
clangFormatOutput !== 'no modified files to format' &&
clangFormatOutput !== 'clang-format did not modify any files'
) {
console.error(clangFormatOutput);
const fixCmd = 'npm run lint:fix';
console.error(`
ERROR: please run "${fixCmd}" to format changes in your commit
Note that when running the command locally, please keep your local
main branch and working branch up to date with nodejs/node-addon-api
to exclude un-related complains.
Or you can run "env FORMAT_START=upstream/main ${fixCmd}".`);
return 1;
}
}
if (require.main === module) {
process.exitCode = main(process.argv.slice(2));
}

View File

@@ -0,0 +1,301 @@
#! /usr/bin/env node
'use strict';
const fs = require('fs');
const path = require('path');
const args = process.argv.slice(2);
const dir = args[0];
if (!dir) {
console.log('Usage: node ' + path.basename(__filename) + ' <target-dir>');
process.exit(1);
}
const NodeApiVersion = require('../package.json').version;
const disable = args[1];
let ConfigFileOperations;
if (disable !== '--disable' && dir !== '--disable') {
ConfigFileOperations = {
'package.json': [
[/([ ]*)"dependencies": {/g, '$1"dependencies": {\n$1 "node-addon-api": "' + NodeApiVersion + '",'],
[/[ ]*"nan": *"[^"]+"(,|)[\n\r]/g, '']
],
'binding.gyp': [
[/([ ]*)'include_dirs': \[/g, '$1\'include_dirs\': [\n$1 \'<!(node -p "require(\\\'node-addon-api\\\').include_dir")\','],
[/([ ]*)"include_dirs": \[/g, '$1"include_dirs": [\n$1 "<!(node -p \\"require(\'node-addon-api\').include_dir\\")",'],
[/[ ]*("|')<!\(node -e ("|'|\\"|\\')require\(("|'|\\"|\\')nan("|'|\\"|\\')\)("|'|\\"|\\')\)("|')(,|)[\r\n]/g, ''],
[/([ ]*)("|')target_name("|'): ("|')(.+?)("|'),/g, '$1$2target_name$2: $4$5$6,\n $2cflags!$2: [ $2-fno-exceptions$2 ],\n $2cflags_cc!$2: [ $2-fno-exceptions$2 ],\n $2xcode_settings$2: { $2GCC_ENABLE_CPP_EXCEPTIONS$2: $2YES$2,\n $2CLANG_CXX_LIBRARY$2: $2libc++$2,\n $2MACOSX_DEPLOYMENT_TARGET$2: $210.7$2,\n },\n $2msvs_settings$2: {\n $2VCCLCompilerTool$2: { $2ExceptionHandling$2: 1 },\n },']
]
};
} else {
ConfigFileOperations = {
'package.json': [
[/([ ]*)"dependencies": {/g, '$1"dependencies": {\n$1 "node-addon-api": "' + NodeApiVersion + '",'],
[/[ ]*"nan": *"[^"]+"(,|)[\n\r]/g, '']
],
'binding.gyp': [
[/([ ]*)'include_dirs': \[/g, '$1\'include_dirs\': [\n$1 \'<!(node -p "require(\\\'node-addon-api\\\').include_dir")\','],
[/([ ]*)"include_dirs": \[/g, '$1"include_dirs": [\n$1 "<!(node -p \'require(\\"node-addon-api\\").include_dir\')",'],
[/[ ]*("|')<!\(node -e ("|'|\\"|\\')require\(("|'|\\"|\\')nan("|'|\\"|\\')\)("|'|\\"|\\')\)("|')(,|)[\r\n]/g, ''],
[/([ ]*)("|')target_name("|'): ("|')(.+?)("|'),/g, '$1$2target_name$2: $4$5$6,\n $2cflags!$2: [ $2-fno-exceptions$2 ],\n $2cflags_cc!$2: [ $2-fno-exceptions$2 ],\n $2defines$2: [ $2NAPI_DISABLE_CPP_EXCEPTIONS$2 ],\n $2conditions$2: [\n [\'OS=="win"\', { $2defines$2: [ $2_HAS_EXCEPTIONS=1$2 ] }]\n ]']
]
};
}
const SourceFileOperations = [
[/Nan::SetMethod\(target,[\s]*"(.*)"[\s]*,[\s]*([^)]+)\)/g, 'exports.Set(Napi::String::New(env, "$1"), Napi::Function::New(env, $2))'],
[/v8::Local<v8::FunctionTemplate>\s+(\w+)\s*=\s*Nan::New<FunctionTemplate>\([\w\d:]+\);(?:\w+->Reset\(\1\))?\s+\1->SetClassName\(Nan::String::New\("(\w+)"\)\);/g, 'Napi::Function $1 = DefineClass(env, "$2", {'],
[/Local<FunctionTemplate>\s+(\w+)\s*=\s*Nan::New<FunctionTemplate>\([\w\d:]+\);\s+(\w+)\.Reset\((\1)\);\s+\1->SetClassName\((Nan::String::New|Nan::New<(v8::)*String>)\("(.+?)"\)\);/g, 'Napi::Function $1 = DefineClass(env, "$6", {'],
[/Local<FunctionTemplate>\s+(\w+)\s*=\s*Nan::New<FunctionTemplate>\([\w\d:]+\);(?:\w+->Reset\(\1\))?\s+\1->SetClassName\(Nan::String::New\("(\w+)"\)\);/g, 'Napi::Function $1 = DefineClass(env, "$2", {'],
[/Nan::New<v8::FunctionTemplate>\(([\w\d:]+)\)->GetFunction\(\)/g, 'Napi::Function::New(env, $1)'],
[/Nan::New<FunctionTemplate>\(([\w\d:]+)\)->GetFunction()/g, 'Napi::Function::New(env, $1);'],
[/Nan::New<v8::FunctionTemplate>\(([\w\d:]+)\)/g, 'Napi::Function::New(env, $1)'],
[/Nan::New<FunctionTemplate>\(([\w\d:]+)\)/g, 'Napi::Function::New(env, $1)'],
// FunctionTemplate to FunctionReference
[/Nan::Persistent<(v8::)*FunctionTemplate>/g, 'Napi::FunctionReference'],
[/Nan::Persistent<(v8::)*Function>/g, 'Napi::FunctionReference'],
[/v8::Local<v8::FunctionTemplate>/g, 'Napi::FunctionReference'],
[/Local<FunctionTemplate>/g, 'Napi::FunctionReference'],
[/v8::FunctionTemplate/g, 'Napi::FunctionReference'],
[/FunctionTemplate/g, 'Napi::FunctionReference'],
[/([ ]*)Nan::SetPrototypeMethod\(\w+, "(\w+)", (\w+)\);/g, '$1InstanceMethod("$2", &$3),'],
[/([ ]*)(?:\w+\.Reset\(\w+\);\s+)?\(target\)\.Set\("(\w+)",\s*Nan::GetFunction\((\w+)\)\);/gm,
'});\n\n' +
'$1constructor = Napi::Persistent($3);\n' +
'$1constructor.SuppressDestruct();\n' +
'$1target.Set("$2", $3);'],
// TODO: Other attribute combinations
[/static_cast<PropertyAttribute>\(ReadOnly\s*\|\s*DontDelete\)/gm,
'static_cast<napi_property_attributes>(napi_enumerable | napi_configurable)'],
[/([\w\d:<>]+?)::Cast\((.+?)\)/g, '$2.As<$1>()'],
[/\*Nan::Utf8String\(([^)]+)\)/g, '$1->As<Napi::String>().Utf8Value().c_str()'],
[/Nan::Utf8String +(\w+)\(([^)]+)\)/g, 'std::string $1 = $2.As<Napi::String>()'],
[/Nan::Utf8String/g, 'std::string'],
[/v8::String::Utf8Value (.+?)\((.+?)\)/g, 'Napi::String $1(env, $2)'],
[/String::Utf8Value (.+?)\((.+?)\)/g, 'Napi::String $1(env, $2)'],
[/\.length\(\)/g, '.Length()'],
[/Nan::MakeCallback\(([^,]+),[\s\\]+([^,]+),/gm, '$2.MakeCallback($1,'],
[/class\s+(\w+)\s*:\s*public\s+Nan::ObjectWrap/g, 'class $1 : public Napi::ObjectWrap<$1>'],
[/(\w+)\(([^)]*)\)\s*:\s*Nan::ObjectWrap\(\)\s*(,)?/gm, '$1($2) : Napi::ObjectWrap<$1>()$3'],
// HandleOKCallback to OnOK
[/HandleOKCallback/g, 'OnOK'],
// HandleErrorCallback to OnError
[/HandleErrorCallback/g, 'OnError'],
// ex. .As<Function>() to .As<Napi::Object>()
[/\.As<v8::(Value|Boolean|String|Number|Object|Array|Symbol|External|Function)>\(\)/g, '.As<Napi::$1>()'],
[/\.As<(Value|Boolean|String|Number|Object|Array|Symbol|External|Function)>\(\)/g, '.As<Napi::$1>()'],
// ex. Nan::New<Number>(info[0]) to Napi::Number::New(info[0])
[/Nan::New<(v8::)*Integer>\((.+?)\)/g, 'Napi::Number::New(env, $2)'],
[/Nan::New\(([0-9.]+)\)/g, 'Napi::Number::New(env, $1)'],
[/Nan::New<(v8::)*String>\("(.+?)"\)/g, 'Napi::String::New(env, "$2")'],
[/Nan::New\("(.+?)"\)/g, 'Napi::String::New(env, "$1")'],
[/Nan::New<(v8::)*(.+?)>\(\)/g, 'Napi::$2::New(env)'],
[/Nan::New<(.+?)>\(\)/g, 'Napi::$1::New(env)'],
[/Nan::New<(v8::)*(.+?)>\(/g, 'Napi::$2::New(env, '],
[/Nan::New<(.+?)>\(/g, 'Napi::$1::New(env, '],
[/Nan::NewBuffer\(/g, 'Napi::Buffer<char>::New(env, '],
// TODO: Properly handle this
[/Nan::New\(/g, 'Napi::New(env, '],
[/\.IsInt32\(\)/g, '.IsNumber()'],
[/->IsInt32\(\)/g, '.IsNumber()'],
[/(.+?)->BooleanValue\(\)/g, '$1.As<Napi::Boolean>().Value()'],
[/(.+?)->Int32Value\(\)/g, '$1.As<Napi::Number>().Int32Value()'],
[/(.+?)->Uint32Value\(\)/g, '$1.As<Napi::Number>().Uint32Value()'],
[/(.+?)->IntegerValue\(\)/g, '$1.As<Napi::Number>().Int64Value()'],
[/(.+?)->NumberValue\(\)/g, '$1.As<Napi::Number>().DoubleValue()'],
// ex. Nan::To<bool>(info[0]) to info[0].Value()
[/Nan::To<v8::(Boolean|String|Number|Object|Array|Symbol|Function)>\((.+?)\)/g, '$2.To<Napi::$1>()'],
[/Nan::To<(Boolean|String|Number|Object|Array|Symbol|Function)>\((.+?)\)/g, '$2.To<Napi::$1>()'],
// ex. Nan::To<bool>(info[0]) to info[0].As<Napi::Boolean>().Value()
[/Nan::To<bool>\((.+?)\)/g, '$1.As<Napi::Boolean>().Value()'],
// ex. Nan::To<int>(info[0]) to info[0].As<Napi::Number>().Int32Value()
[/Nan::To<int>\((.+?)\)/g, '$1.As<Napi::Number>().Int32Value()'],
// ex. Nan::To<int32_t>(info[0]) to info[0].As<Napi::Number>().Int32Value()
[/Nan::To<int32_t>\((.+?)\)/g, '$1.As<Napi::Number>().Int32Value()'],
// ex. Nan::To<uint32_t>(info[0]) to info[0].As<Napi::Number>().Uint32Value()
[/Nan::To<uint32_t>\((.+?)\)/g, '$1.As<Napi::Number>().Uint32Value()'],
// ex. Nan::To<int64_t>(info[0]) to info[0].As<Napi::Number>().Int64Value()
[/Nan::To<int64_t>\((.+?)\)/g, '$1.As<Napi::Number>().Int64Value()'],
// ex. Nan::To<float>(info[0]) to info[0].As<Napi::Number>().FloatValue()
[/Nan::To<float>\((.+?)\)/g, '$1.As<Napi::Number>().FloatValue()'],
// ex. Nan::To<double>(info[0]) to info[0].As<Napi::Number>().DoubleValue()
[/Nan::To<double>\((.+?)\)/g, '$1.As<Napi::Number>().DoubleValue()'],
[/Nan::New\((\w+)\)->HasInstance\((\w+)\)/g, '$2.InstanceOf($1.Value())'],
[/Nan::Has\(([^,]+),\s*/gm, '($1).Has('],
[/\.Has\([\s|\\]*Nan::New<(v8::)*String>\(([^)]+)\)\)/gm, '.Has($1)'],
[/\.Has\([\s|\\]*Nan::New\(([^)]+)\)\)/gm, '.Has($1)'],
[/Nan::Get\(([^,]+),\s*/gm, '($1).Get('],
[/\.Get\([\s|\\]*Nan::New<(v8::)*String>\(([^)]+)\)\)/gm, '.Get($1)'],
[/\.Get\([\s|\\]*Nan::New\(([^)]+)\)\)/gm, '.Get($1)'],
[/Nan::Set\(([^,]+),\s*/gm, '($1).Set('],
[/\.Set\([\s|\\]*Nan::New<(v8::)*String>\(([^)]+)\)\s*,/gm, '.Set($1,'],
[/\.Set\([\s|\\]*Nan::New\(([^)]+)\)\s*,/gm, '.Set($1,'],
// ex. node::Buffer::HasInstance(info[0]) to info[0].IsBuffer()
[/node::Buffer::HasInstance\((.+?)\)/g, '$1.IsBuffer()'],
// ex. node::Buffer::Length(info[0]) to info[0].Length()
[/node::Buffer::Length\((.+?)\)/g, '$1.As<Napi::Buffer<char>>().Length()'],
// ex. node::Buffer::Data(info[0]) to info[0].Data()
[/node::Buffer::Data\((.+?)\)/g, '$1.As<Napi::Buffer<char>>().Data()'],
[/Nan::CopyBuffer\(/g, 'Napi::Buffer::Copy(env, '],
// Nan::AsyncQueueWorker(worker)
[/Nan::AsyncQueueWorker\((.+)\);/g, '$1.Queue();'],
[/Nan::(Undefined|Null|True|False)\(\)/g, 'env.$1()'],
// Nan::ThrowError(error) to Napi::Error::New(env, error).ThrowAsJavaScriptException()
[/([ ]*)return Nan::Throw(\w*?)Error\((.+?)\);/g, '$1Napi::$2Error::New(env, $3).ThrowAsJavaScriptException();\n$1return env.Null();'],
[/Nan::Throw(\w*?)Error\((.+?)\);\n(\s*)return;/g, 'Napi::$1Error::New(env, $2).ThrowAsJavaScriptException();\n$3return env.Null();'],
[/Nan::Throw(\w*?)Error\((.+?)\);/g, 'Napi::$1Error::New(env, $2).ThrowAsJavaScriptException();\n'],
// Nan::RangeError(error) to Napi::RangeError::New(env, error)
[/Nan::(\w*?)Error\((.+)\)/g, 'Napi::$1Error::New(env, $2)'],
[/Nan::Set\((.+?),\n* *(.+?),\n* *(.+?),\n* *(.+?)\)/g, '$1.Set($2, $3, $4)'],
[/Nan::(Escapable)?HandleScope\s+(\w+)\s*;/g, 'Napi::$1HandleScope $2(env);'],
[/Nan::(Escapable)?HandleScope/g, 'Napi::$1HandleScope'],
[/Nan::ForceSet\(([^,]+), ?/g, '$1->DefineProperty('],
[/\.ForceSet\(Napi::String::New\(env, "(\w+)"\),\s*?/g, '.DefineProperty("$1", '],
// [ /Nan::GetPropertyNames\(([^,]+)\)/, '$1->GetPropertyNames()' ],
[/Nan::Equals\(([^,]+),/g, '$1.StrictEquals('],
[/(.+)->Set\(/g, '$1.Set('],
[/Nan::Callback/g, 'Napi::FunctionReference'],
[/Nan::Persistent<Object>/g, 'Napi::ObjectReference'],
[/Nan::ADDON_REGISTER_FUNCTION_ARGS_TYPE target/g, 'Napi::Env& env, Napi::Object& target'],
[/(\w+)\*\s+(\w+)\s*=\s*Nan::ObjectWrap::Unwrap<\w+>\(info\.This\(\)\);/g, '$1* $2 = this;'],
[/Nan::ObjectWrap::Unwrap<(\w+)>\((.*)\);/g, '$2.Unwrap<$1>();'],
[/Nan::NAN_METHOD_RETURN_TYPE/g, 'void'],
[/NAN_INLINE/g, 'inline'],
[/Nan::NAN_METHOD_ARGS_TYPE/g, 'const Napi::CallbackInfo&'],
[/NAN_METHOD\(([\w\d:]+?)\)/g, 'Napi::Value $1(const Napi::CallbackInfo& info)'],
[/static\s*NAN_GETTER\(([\w\d:]+?)\)/g, 'Napi::Value $1(const Napi::CallbackInfo& info)'],
[/NAN_GETTER\(([\w\d:]+?)\)/g, 'Napi::Value $1(const Napi::CallbackInfo& info)'],
[/static\s*NAN_SETTER\(([\w\d:]+?)\)/g, 'void $1(const Napi::CallbackInfo& info, const Napi::Value& value)'],
[/NAN_SETTER\(([\w\d:]+?)\)/g, 'void $1(const Napi::CallbackInfo& info, const Napi::Value& value)'],
[/void Init\((v8::)*Local<(v8::)*Object> exports\)/g, 'Napi::Object Init(Napi::Env env, Napi::Object exports)'],
[/NAN_MODULE_INIT\(([\w\d:]+?)\);/g, 'Napi::Object $1(Napi::Env env, Napi::Object exports);'],
[/NAN_MODULE_INIT\(([\w\d:]+?)\)/g, 'Napi::Object $1(Napi::Env env, Napi::Object exports)'],
[/::(Init(?:ialize)?)\(target\)/g, '::$1(env, target, module)'],
[/constructor_template/g, 'constructor'],
[/Nan::FunctionCallbackInfo<(v8::)?Value>[ ]*& [ ]*info\)[ ]*{\n*([ ]*)/gm, 'Napi::CallbackInfo& info) {\n$2Napi::Env env = info.Env();\n$2'],
[/Nan::FunctionCallbackInfo<(v8::)*Value>\s*&\s*info\);/g, 'Napi::CallbackInfo& info);'],
[/Nan::FunctionCallbackInfo<(v8::)*Value>\s*&/g, 'Napi::CallbackInfo&'],
[/Buffer::HasInstance\(([^)]+)\)/g, '$1.IsBuffer()'],
[/info\[(\d+)\]->/g, 'info[$1].'],
[/info\[([\w\d]+)\]->/g, 'info[$1].'],
[/info\.This\(\)->/g, 'info.This().'],
[/->Is(Object|String|Int32|Number)\(\)/g, '.Is$1()'],
[/info.GetReturnValue\(\).SetUndefined\(\)/g, 'return env.Undefined()'],
[/info\.GetReturnValue\(\)\.Set\(((\n|.)+?)\);/g, 'return $1;'],
// ex. Local<Value> to Napi::Value
[/v8::Local<v8::(Value|Boolean|String|Number|Object|Array|Symbol|External|Function)>/g, 'Napi::$1'],
[/Local<(Value|Boolean|String|Number|Object|Array|Symbol|External|Function)>/g, 'Napi::$1'],
// Declare an env in helper functions that take a Napi::Value
[/(\w+)\(Napi::Value (\w+)(,\s*[^()]+)?\)\s*{\n*([ ]*)/gm, '$1(Napi::Value $2$3) {\n$4Napi::Env env = $2.Env();\n$4'],
// delete #include <node.h> and/or <v8.h>
[/#include +(<|")(?:node|nan).h("|>)/g, '#include $1napi.h$2\n#include $1uv.h$2'],
// NODE_MODULE to NODE_API_MODULE
[/NODE_MODULE/g, 'NODE_API_MODULE'],
[/Nan::/g, 'Napi::'],
[/nan.h/g, 'napi.h'],
// delete .FromJust()
[/\.FromJust\(\)/g, ''],
// delete .ToLocalCheck()
[/\.ToLocalChecked\(\)/g, ''],
[/^.*->SetInternalFieldCount\(.*$/gm, ''],
// replace using node; and/or using v8; to using Napi;
[/using (node|v8);/g, 'using Napi;'],
[/using namespace (node|Nan|v8);/g, 'using namespace Napi;'],
// delete using v8::Local;
[/using v8::Local;\n/g, ''],
// replace using v8::XXX; with using Napi::XXX
[/using v8::([A-Za-z]+);/g, 'using Napi::$1;']
];
const paths = listFiles(dir);
paths.forEach(function (dirEntry) {
const filename = dirEntry.split('\\').pop().split('/').pop();
// Check whether the file is a source file or a config file
// then execute function accordingly
const sourcePattern = /.+\.h|.+\.cc|.+\.cpp/;
if (sourcePattern.test(filename)) {
convertFile(dirEntry, SourceFileOperations);
} else if (ConfigFileOperations[filename] != null) {
convertFile(dirEntry, ConfigFileOperations[filename]);
}
});
function listFiles (dir, filelist) {
const files = fs.readdirSync(dir);
filelist = filelist || [];
files.forEach(function (file) {
if (file === 'node_modules') {
return;
}
if (fs.statSync(path.join(dir, file)).isDirectory()) {
filelist = listFiles(path.join(dir, file), filelist);
} else {
filelist.push(path.join(dir, file));
}
});
return filelist;
}
function convert (content, operations) {
for (let i = 0; i < operations.length; i++) {
const operation = operations[i];
content = content.replace(operation[0], operation[1]);
}
return content;
}
function convertFile (fileName, operations) {
fs.readFile(fileName, 'utf-8', function (err, file) {
if (err) throw err;
file = convert(file, operations);
fs.writeFile(fileName, file, function (err) {
if (err) throw err;
});
});
}

View File

@@ -0,0 +1,79 @@
#!/usr/bin/env node
const spawn = require('child_process').spawnSync;
const filesToCheck = '*.js';
const FORMAT_START = process.env.FORMAT_START || 'main';
const IS_WIN = process.platform === 'win32';
const ESLINT_PATH = IS_WIN ? 'node_modules\\.bin\\eslint.cmd' : 'node_modules/.bin/eslint';
function main (args) {
let fix = false;
while (args.length > 0) {
switch (args[0]) {
case '-f':
case '--fix':
fix = true;
break;
default:
}
args.shift();
}
// Check js files that change on unstaged file
const fileUnStaged = spawn(
'git',
['diff', '--name-only', '--diff-filter=d', FORMAT_START, filesToCheck],
{
encoding: 'utf-8'
}
);
// Check js files that change on staged file
const fileStaged = spawn(
'git',
['diff', '--name-only', '--cached', '--diff-filter=d', FORMAT_START, filesToCheck],
{
encoding: 'utf-8'
}
);
const options = [
...fileStaged.stdout.split('\n').filter((f) => f !== ''),
...fileUnStaged.stdout.split('\n').filter((f) => f !== '')
];
if (fix) {
options.push('--fix');
}
const result = spawn(ESLINT_PATH, [...options], {
encoding: 'utf-8'
});
if (result.error && result.error.errno === 'ENOENT') {
console.error('Eslint not found! Eslint is supposed to be found at ', ESLINT_PATH);
return 2;
}
if (result.status === 1) {
console.error('Eslint error:', result.stdout);
const fixCmd = 'npm run lint:fix';
console.error(`ERROR: please run "${fixCmd}" to format changes in your commit
Note that when running the command locally, please keep your local
main branch and working branch up to date with nodejs/node-addon-api
to exclude un-related complains.
Or you can run "env FORMAT_START=upstream/main ${fixCmd}".
Also fix JS files by yourself if necessary.`);
return 1;
}
if (result.stderr) {
console.error('Error running eslint:', result.stderr);
return 2;
}
}
if (require.main === module) {
process.exitCode = main(process.argv.slice(2));
}

88
node_modules/@parcel/watcher/package.json generated vendored Normal file
View File

@@ -0,0 +1,88 @@
{
"name": "@parcel/watcher",
"version": "2.5.1",
"main": "index.js",
"types": "index.d.ts",
"repository": {
"type": "git",
"url": "https://github.com/parcel-bundler/watcher.git"
},
"description": "A native C++ Node module for querying and subscribing to filesystem events. Used by Parcel 2.",
"license": "MIT",
"publishConfig": {
"access": "public"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/parcel"
},
"files": [
"index.js",
"index.js.flow",
"index.d.ts",
"wrapper.js",
"package.json",
"README.md",
"LICENSE",
"src",
"scripts/build-from-source.js",
"binding.gyp"
],
"scripts": {
"prebuild": "prebuildify --napi --strip --tag-libc",
"format": "prettier --write \"./**/*.{js,json,md}\"",
"build": "node-gyp rebuild",
"install": "node scripts/build-from-source.js",
"test": "mocha"
},
"engines": {
"node": ">= 10.0.0"
},
"husky": {
"hooks": {
"pre-commit": "lint-staged"
}
},
"lint-staged": {
"*.{js,json,md}": [
"prettier --write",
"git add"
]
},
"dependencies": {
"detect-libc": "^1.0.3",
"is-glob": "^4.0.3",
"micromatch": "^4.0.5",
"node-addon-api": "^7.0.0"
},
"devDependencies": {
"esbuild": "^0.19.8",
"fs-extra": "^10.0.0",
"husky": "^7.0.2",
"lint-staged": "^11.1.2",
"mocha": "^9.1.1",
"napi-wasm": "^1.1.0",
"prebuildify": "^6.0.1",
"prettier": "^2.3.2"
},
"binary": {
"napi_versions": [
3
]
},
"optionalDependencies": {
"@parcel/watcher-darwin-x64": "2.5.1",
"@parcel/watcher-darwin-arm64": "2.5.1",
"@parcel/watcher-win32-x64": "2.5.1",
"@parcel/watcher-win32-arm64": "2.5.1",
"@parcel/watcher-win32-ia32": "2.5.1",
"@parcel/watcher-linux-x64-glibc": "2.5.1",
"@parcel/watcher-linux-x64-musl": "2.5.1",
"@parcel/watcher-linux-arm64-glibc": "2.5.1",
"@parcel/watcher-linux-arm64-musl": "2.5.1",
"@parcel/watcher-linux-arm-glibc": "2.5.1",
"@parcel/watcher-linux-arm-musl": "2.5.1",
"@parcel/watcher-android-arm64": "2.5.1",
"@parcel/watcher-freebsd-x64": "2.5.1"
}
}

View File

@@ -0,0 +1,13 @@
#!/usr/bin/env node
const {spawn} = require('child_process');
if (process.env.npm_config_build_from_source === 'true') {
build();
}
function build() {
spawn('node-gyp', ['rebuild'], { stdio: 'inherit', shell: true }).on('exit', function (code) {
process.exit(code);
});
}

182
node_modules/@parcel/watcher/src/Backend.cc generated vendored Normal file
View File

@@ -0,0 +1,182 @@
#ifdef FS_EVENTS
#include "macos/FSEventsBackend.hh"
#endif
#ifdef WATCHMAN
#include "watchman/WatchmanBackend.hh"
#endif
#ifdef WINDOWS
#include "windows/WindowsBackend.hh"
#endif
#ifdef INOTIFY
#include "linux/InotifyBackend.hh"
#endif
#ifdef KQUEUE
#include "kqueue/KqueueBackend.hh"
#endif
#ifdef __wasm32__
#include "wasm/WasmBackend.hh"
#endif
#include "shared/BruteForceBackend.hh"
#include "Backend.hh"
#include <unordered_map>
static std::unordered_map<std::string, std::shared_ptr<Backend>> sharedBackends;
std::shared_ptr<Backend> getBackend(std::string backend) {
// Use FSEvents on macOS by default.
// Use watchman by default if available on other platforms.
// Fall back to brute force.
#ifdef FS_EVENTS
if (backend == "fs-events" || backend == "default") {
return std::make_shared<FSEventsBackend>();
}
#endif
#ifdef WATCHMAN
if ((backend == "watchman" || backend == "default") && WatchmanBackend::checkAvailable()) {
return std::make_shared<WatchmanBackend>();
}
#endif
#ifdef WINDOWS
if (backend == "windows" || backend == "default") {
return std::make_shared<WindowsBackend>();
}
#endif
#ifdef INOTIFY
if (backend == "inotify" || backend == "default") {
return std::make_shared<InotifyBackend>();
}
#endif
#ifdef KQUEUE
if (backend == "kqueue" || backend == "default") {
return std::make_shared<KqueueBackend>();
}
#endif
#ifdef __wasm32__
if (backend == "wasm" || backend == "default") {
return std::make_shared<WasmBackend>();
}
#endif
if (backend == "brute-force" || backend == "default") {
return std::make_shared<BruteForceBackend>();
}
return nullptr;
}
std::shared_ptr<Backend> Backend::getShared(std::string backend) {
auto found = sharedBackends.find(backend);
if (found != sharedBackends.end()) {
return found->second;
}
auto result = getBackend(backend);
if (!result) {
return getShared("default");
}
result->run();
sharedBackends.emplace(backend, result);
return result;
}
void removeShared(Backend *backend) {
for (auto it = sharedBackends.begin(); it != sharedBackends.end(); it++) {
if (it->second.get() == backend) {
sharedBackends.erase(it);
break;
}
}
// Free up memory.
if (sharedBackends.size() == 0) {
sharedBackends.rehash(0);
}
}
void Backend::run() {
#ifndef __wasm32__
mThread = std::thread([this] () {
try {
start();
} catch (std::exception &err) {
handleError(err);
}
});
if (mThread.joinable()) {
mStartedSignal.wait();
}
#else
try {
start();
} catch (std::exception &err) {
handleError(err);
}
#endif
}
void Backend::notifyStarted() {
mStartedSignal.notify();
}
void Backend::start() {
notifyStarted();
}
Backend::~Backend() {
#ifndef __wasm32__
// Wait for thread to stop
if (mThread.joinable()) {
// If the backend is being destroyed from the thread itself, detach, otherwise join.
if (mThread.get_id() == std::this_thread::get_id()) {
mThread.detach();
} else {
mThread.join();
}
}
#endif
}
void Backend::watch(WatcherRef watcher) {
std::unique_lock<std::mutex> lock(mMutex);
auto res = mSubscriptions.find(watcher);
if (res == mSubscriptions.end()) {
try {
this->subscribe(watcher);
mSubscriptions.insert(watcher);
} catch (std::exception &err) {
unref();
throw;
}
}
}
void Backend::unwatch(WatcherRef watcher) {
std::unique_lock<std::mutex> lock(mMutex);
size_t deleted = mSubscriptions.erase(watcher);
if (deleted > 0) {
this->unsubscribe(watcher);
unref();
}
}
void Backend::unref() {
if (mSubscriptions.size() == 0) {
removeShared(this);
}
}
void Backend::handleWatcherError(WatcherError &err) {
unwatch(err.mWatcher);
err.mWatcher->notifyError(err);
}
void Backend::handleError(std::exception &err) {
std::unique_lock<std::mutex> lock(mMutex);
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end(); it++) {
(*it)->notifyError(err);
}
removeShared(this);
}

37
node_modules/@parcel/watcher/src/Backend.hh generated vendored Normal file
View File

@@ -0,0 +1,37 @@
#ifndef BACKEND_H
#define BACKEND_H
#include "Event.hh"
#include "Watcher.hh"
#include "Signal.hh"
#include <thread>
class Backend {
public:
virtual ~Backend();
void run();
void notifyStarted();
virtual void start();
virtual void writeSnapshot(WatcherRef watcher, std::string *snapshotPath) = 0;
virtual void getEventsSince(WatcherRef watcher, std::string *snapshotPath) = 0;
virtual void subscribe(WatcherRef watcher) = 0;
virtual void unsubscribe(WatcherRef watcher) = 0;
static std::shared_ptr<Backend> getShared(std::string backend);
void watch(WatcherRef watcher);
void unwatch(WatcherRef watcher);
void unref();
void handleWatcherError(WatcherError &err);
std::mutex mMutex;
std::thread mThread;
private:
std::unordered_set<WatcherRef> mSubscriptions;
Signal mStartedSignal;
void handleError(std::exception &err);
};
#endif

113
node_modules/@parcel/watcher/src/Debounce.cc generated vendored Normal file
View File

@@ -0,0 +1,113 @@
#include "Debounce.hh"
#ifdef __wasm32__
extern "C" void on_timeout(void *ctx) {
Debounce *debounce = (Debounce *)ctx;
debounce->notify();
}
#endif
std::shared_ptr<Debounce> Debounce::getShared() {
static std::weak_ptr<Debounce> sharedInstance;
std::shared_ptr<Debounce> shared = sharedInstance.lock();
if (!shared) {
shared = std::make_shared<Debounce>();
sharedInstance = shared;
}
return shared;
}
Debounce::Debounce() {
mRunning = true;
#ifndef __wasm32__
mThread = std::thread([this] () {
loop();
});
#endif
}
Debounce::~Debounce() {
mRunning = false;
#ifndef __wasm32__
mWaitSignal.notify();
mThread.join();
#endif
}
void Debounce::add(void *key, std::function<void()> cb) {
std::unique_lock<std::mutex> lock(mMutex);
mCallbacks.emplace(key, cb);
}
void Debounce::remove(void *key) {
std::unique_lock<std::mutex> lock(mMutex);
mCallbacks.erase(key);
}
void Debounce::trigger() {
std::unique_lock<std::mutex> lock(mMutex);
#ifdef __wasm32__
notifyIfReady();
#else
mWaitSignal.notify();
#endif
}
#ifndef __wasm32__
void Debounce::loop() {
while (mRunning) {
mWaitSignal.wait();
if (!mRunning) {
break;
}
notifyIfReady();
}
}
#endif
void Debounce::notifyIfReady() {
if (!mRunning) {
return;
}
// If we haven't seen an event in more than the maximum wait time, notify callbacks immediately
// to ensure that we don't wait forever. Otherwise, wait for the minimum wait time and batch
// subsequent fast changes. This also means the first file change in a batch is notified immediately,
// separately from the rest of the batch. This seems like an acceptable tradeoff if the common case
// is that only a single file was updated at a time.
auto time = std::chrono::steady_clock::now();
if ((time - mLastTime) > std::chrono::milliseconds(MAX_WAIT_TIME)) {
mLastTime = time;
notify();
} else {
wait();
}
}
void Debounce::wait() {
#ifdef __wasm32__
clear_timeout(mTimeout);
mTimeout = set_timeout(MIN_WAIT_TIME, this);
#else
auto status = mWaitSignal.waitFor(std::chrono::milliseconds(MIN_WAIT_TIME));
if (mRunning && (status == std::cv_status::timeout)) {
notify();
}
#endif
}
void Debounce::notify() {
std::unique_lock<std::mutex> lock(mMutex);
mLastTime = std::chrono::steady_clock::now();
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
auto cb = it->second;
cb();
}
#ifndef __wasm32__
mWaitSignal.reset();
#endif
}

49
node_modules/@parcel/watcher/src/Debounce.hh generated vendored Normal file
View File

@@ -0,0 +1,49 @@
#ifndef DEBOUNCE_H
#define DEBOUNCE_H
#include <thread>
#include <unordered_map>
#include <functional>
#include "Signal.hh"
#define MIN_WAIT_TIME 50
#define MAX_WAIT_TIME 500
#ifdef __wasm32__
extern "C" {
int set_timeout(int ms, void *ctx);
void clear_timeout(int timeout);
void on_timeout(void *ctx);
};
#endif
class Debounce {
public:
static std::shared_ptr<Debounce> getShared();
Debounce();
~Debounce();
void add(void *key, std::function<void()> cb);
void remove(void *key);
void trigger();
void notify();
private:
bool mRunning;
std::mutex mMutex;
#ifdef __wasm32__
int mTimeout;
#else
Signal mWaitSignal;
std::thread mThread;
#endif
std::unordered_map<void *, std::function<void()>> mCallbacks;
std::chrono::time_point<std::chrono::steady_clock> mLastTime;
void loop();
void notifyIfReady();
void wait();
};
#endif

152
node_modules/@parcel/watcher/src/DirTree.cc generated vendored Normal file
View File

@@ -0,0 +1,152 @@
#include "DirTree.hh"
#include <inttypes.h>
static std::mutex mDirCacheMutex;
static std::unordered_map<std::string, std::weak_ptr<DirTree>> dirTreeCache;
struct DirTreeDeleter {
void operator()(DirTree *tree) {
std::lock_guard<std::mutex> lock(mDirCacheMutex);
dirTreeCache.erase(tree->root);
delete tree;
// Free up memory.
if (dirTreeCache.size() == 0) {
dirTreeCache.rehash(0);
}
}
};
std::shared_ptr<DirTree> DirTree::getCached(std::string root) {
std::lock_guard<std::mutex> lock(mDirCacheMutex);
auto found = dirTreeCache.find(root);
std::shared_ptr<DirTree> tree;
// Use cached tree, or create an empty one.
if (found != dirTreeCache.end()) {
tree = found->second.lock();
} else {
tree = std::shared_ptr<DirTree>(new DirTree(root), DirTreeDeleter());
dirTreeCache.emplace(root, tree);
}
return tree;
}
DirTree::DirTree(std::string root, FILE *f) : root(root), isComplete(true) {
size_t size;
if (fscanf(f, "%zu", &size)) {
for (size_t i = 0; i < size; i++) {
DirEntry entry(f);
entries.emplace(entry.path, entry);
}
}
}
// Internal find method that has no lock
DirEntry *DirTree::_find(std::string path) {
auto found = entries.find(path);
if (found == entries.end()) {
return NULL;
}
return &found->second;
}
DirEntry *DirTree::add(std::string path, uint64_t mtime, bool isDir) {
std::lock_guard<std::mutex> lock(mMutex);
DirEntry entry(path, mtime, isDir);
auto it = entries.emplace(entry.path, entry);
return &it.first->second;
}
DirEntry *DirTree::find(std::string path) {
std::lock_guard<std::mutex> lock(mMutex);
return _find(path);
}
DirEntry *DirTree::update(std::string path, uint64_t mtime) {
std::lock_guard<std::mutex> lock(mMutex);
DirEntry *found = _find(path);
if (found) {
found->mtime = mtime;
}
return found;
}
void DirTree::remove(std::string path) {
std::lock_guard<std::mutex> lock(mMutex);
DirEntry *found = _find(path);
// Remove all sub-entries if this is a directory
if (found && found->isDir) {
std::string pathStart = path + DIR_SEP;
for (auto it = entries.begin(); it != entries.end();) {
if (it->first.rfind(pathStart, 0) == 0) {
it = entries.erase(it);
} else {
it++;
}
}
}
entries.erase(path);
}
void DirTree::write(FILE *f) {
std::lock_guard<std::mutex> lock(mMutex);
fprintf(f, "%zu\n", entries.size());
for (auto it = entries.begin(); it != entries.end(); it++) {
it->second.write(f);
}
}
void DirTree::getChanges(DirTree *snapshot, EventList &events) {
std::lock_guard<std::mutex> lock(mMutex);
std::lock_guard<std::mutex> snapshotLock(snapshot->mMutex);
for (auto it = entries.begin(); it != entries.end(); it++) {
auto found = snapshot->entries.find(it->first);
if (found == snapshot->entries.end()) {
events.create(it->second.path);
} else if (found->second.mtime != it->second.mtime && !found->second.isDir && !it->second.isDir) {
events.update(it->second.path);
}
}
for (auto it = snapshot->entries.begin(); it != snapshot->entries.end(); it++) {
size_t count = entries.count(it->first);
if (count == 0) {
events.remove(it->second.path);
}
}
}
DirEntry::DirEntry(std::string p, uint64_t t, bool d) {
path = p;
mtime = t;
isDir = d;
state = NULL;
}
DirEntry::DirEntry(FILE *f) {
size_t size;
if (fscanf(f, "%zu", &size)) {
path.resize(size);
if (fread(&path[0], sizeof(char), size, f)) {
int d = 0;
fscanf(f, "%" PRIu64 " %d\n", &mtime, &d);
isDir = d == 1;
}
}
}
void DirEntry::write(FILE *f) const {
fprintf(f, "%zu%s%" PRIu64 " %d\n", path.size(), path.c_str(), mtime, isDir);
}

50
node_modules/@parcel/watcher/src/DirTree.hh generated vendored Normal file
View File

@@ -0,0 +1,50 @@
#ifndef DIR_TREE_H
#define DIR_TREE_H
#include <string>
#include <unordered_map>
#include <memory>
#include "Event.hh"
#ifdef _WIN32
#define DIR_SEP "\\"
#else
#define DIR_SEP "/"
#endif
struct DirEntry {
std::string path;
uint64_t mtime;
bool isDir;
mutable void *state;
DirEntry(std::string p, uint64_t t, bool d);
DirEntry(FILE *f);
void write(FILE *f) const;
bool operator==(const DirEntry &other) const {
return path == other.path;
}
};
class DirTree {
public:
static std::shared_ptr<DirTree> getCached(std::string root);
DirTree(std::string root) : root(root), isComplete(false) {}
DirTree(std::string root, FILE *f);
DirEntry *add(std::string path, uint64_t mtime, bool isDir);
DirEntry *find(std::string path);
DirEntry *update(std::string path, uint64_t mtime);
void remove(std::string path);
void write(FILE *f);
void getChanges(DirTree *snapshot, EventList &events);
std::mutex mMutex;
std::string root;
bool isComplete;
std::unordered_map<std::string, DirEntry> entries;
private:
DirEntry *_find(std::string path);
};
#endif

109
node_modules/@parcel/watcher/src/Event.hh generated vendored Normal file
View File

@@ -0,0 +1,109 @@
#ifndef EVENT_H
#define EVENT_H
#include <string>
#include <node_api.h>
#include "wasm/include.h"
#include <napi.h>
#include <mutex>
#include <map>
#include <optional>
using namespace Napi;
struct Event {
std::string path;
bool isCreated;
bool isDeleted;
Event(std::string path) : path(path), isCreated(false), isDeleted(false) {}
Value toJS(const Env& env) {
EscapableHandleScope scope(env);
Object res = Object::New(env);
std::string type = isCreated ? "create" : isDeleted ? "delete" : "update";
res.Set(String::New(env, "path"), String::New(env, path.c_str()));
res.Set(String::New(env, "type"), String::New(env, type.c_str()));
return scope.Escape(res);
}
};
class EventList {
public:
void create(std::string path) {
std::lock_guard<std::mutex> l(mMutex);
Event *event = internalUpdate(path);
if (event->isDeleted) {
// Assume update event when rapidly removed and created
// https://github.com/parcel-bundler/watcher/issues/72
event->isDeleted = false;
} else {
event->isCreated = true;
}
}
Event *update(std::string path) {
std::lock_guard<std::mutex> l(mMutex);
return internalUpdate(path);
}
void remove(std::string path) {
std::lock_guard<std::mutex> l(mMutex);
Event *event = internalUpdate(path);
event->isDeleted = true;
}
size_t size() {
std::lock_guard<std::mutex> l(mMutex);
return mEvents.size();
}
std::vector<Event> getEvents() {
std::lock_guard<std::mutex> l(mMutex);
std::vector<Event> eventsCloneVector;
for(auto it = mEvents.begin(); it != mEvents.end(); ++it) {
if (!(it->second.isCreated && it->second.isDeleted)) {
eventsCloneVector.push_back(it->second);
}
}
return eventsCloneVector;
}
void clear() {
std::lock_guard<std::mutex> l(mMutex);
mEvents.clear();
mError.reset();
}
void error(std::string err) {
std::lock_guard<std::mutex> l(mMutex);
if (!mError.has_value()) {
mError.emplace(err);
}
}
bool hasError() {
std::lock_guard<std::mutex> l(mMutex);
return mError.has_value();
}
std::string getError() {
std::lock_guard<std::mutex> l(mMutex);
return mError.value_or("");
}
private:
mutable std::mutex mMutex;
std::map<std::string, Event> mEvents;
std::optional<std::string> mError;
Event *internalUpdate(std::string path) {
auto found = mEvents.find(path);
if (found == mEvents.end()) {
auto it = mEvents.emplace(path, Event(path));
return &it.first->second;
}
return &found->second;
}
};
#endif

22
node_modules/@parcel/watcher/src/Glob.cc generated vendored Normal file
View File

@@ -0,0 +1,22 @@
#include "Glob.hh"
#ifdef __wasm32__
extern "C" bool wasm_regex_match(const char *s, const char *regex);
#endif
Glob::Glob(std::string raw) {
mRaw = raw;
mHash = std::hash<std::string>()(raw);
#ifndef __wasm32__
mRegex = std::regex(raw);
#endif
}
bool Glob::isIgnored(std::string relative_path) const {
// Use native JS regex engine for wasm to reduce binary size.
#ifdef __wasm32__
return wasm_regex_match(relative_path.c_str(), mRaw.c_str());
#else
return std::regex_match(relative_path, mRegex);
#endif
}

34
node_modules/@parcel/watcher/src/Glob.hh generated vendored Normal file
View File

@@ -0,0 +1,34 @@
#ifndef GLOB_H
#define GLOB_H
#include <unordered_set>
#include <regex>
struct Glob {
std::size_t mHash;
std::string mRaw;
#ifndef __wasm32__
std::regex mRegex;
#endif
Glob(std::string raw);
bool operator==(const Glob &other) const {
return mHash == other.mHash;
}
bool isIgnored(std::string relative_path) const;
};
namespace std
{
template <>
struct hash<Glob>
{
size_t operator()(const Glob& g) const {
return g.mHash;
}
};
}
#endif

101
node_modules/@parcel/watcher/src/PromiseRunner.hh generated vendored Normal file
View File

@@ -0,0 +1,101 @@
#ifndef PROMISE_RUNNER_H
#define PROMISE_RUNNER_H
#include <node_api.h>
#include "wasm/include.h"
#include <napi.h>
using namespace Napi;
class PromiseRunner {
public:
const Env env;
Promise::Deferred deferred;
PromiseRunner(Env env) : env(env), deferred(Promise::Deferred::New(env)) {
napi_status status = napi_create_async_work(env, nullptr, env.Undefined(),
onExecute, onWorkComplete, this, &work);
if (status != napi_ok) {
work = nullptr;
const napi_extended_error_info *error_info = 0;
napi_get_last_error_info(env, &error_info);
if (error_info->error_message) {
Error::New(env, error_info->error_message).ThrowAsJavaScriptException();
} else {
Error::New(env).ThrowAsJavaScriptException();
}
}
}
virtual ~PromiseRunner() {}
Value queue() {
if (work) {
napi_status status = napi_queue_async_work(env, work);
if (status != napi_ok) {
onError(Error::New(env));
}
}
return deferred.Promise();
}
private:
napi_async_work work;
std::string error;
static void onExecute(napi_env env, void *this_pointer) {
PromiseRunner* self = (PromiseRunner*) this_pointer;
try {
self->execute();
} catch (std::exception &err) {
self->error = err.what();
}
}
static void onWorkComplete(napi_env env, napi_status status, void *this_pointer) {
PromiseRunner* self = (PromiseRunner*) this_pointer;
if (status != napi_cancelled) {
HandleScope scope(self->env);
if (status == napi_ok) {
status = napi_delete_async_work(self->env, self->work);
if (status == napi_ok) {
if (self->error.size() == 0) {
self->onOK();
} else {
self->onError(Error::New(self->env, self->error));
}
delete self;
return;
}
}
}
// fallthrough for error handling
const napi_extended_error_info *error_info = 0;
napi_get_last_error_info(env, &error_info);
if (error_info->error_message){
self->onError(Error::New(env, error_info->error_message));
} else {
self->onError(Error::New(env));
}
delete self;
}
virtual void execute() {}
virtual Value getResult() {
return env.Null();
}
void onOK() {
HandleScope scope(env);
Value result = getResult();
deferred.Resolve(result);
}
void onError(const Error &e) {
deferred.Reject(e.Value());
}
};
#endif

46
node_modules/@parcel/watcher/src/Signal.hh generated vendored Normal file
View File

@@ -0,0 +1,46 @@
#ifndef SIGNAL_H
#define SIGNAL_H
#include <mutex>
#include <condition_variable>
class Signal {
public:
Signal() : mFlag(false), mWaiting(false) {}
void wait() {
std::unique_lock<std::mutex> lock(mMutex);
while (!mFlag) {
mWaiting = true;
mCond.wait(lock);
}
}
std::cv_status waitFor(std::chrono::milliseconds ms) {
std::unique_lock<std::mutex> lock(mMutex);
return mCond.wait_for(lock, ms);
}
void notify() {
std::unique_lock<std::mutex> lock(mMutex);
mFlag = true;
mCond.notify_all();
}
void reset() {
std::unique_lock<std::mutex> lock(mMutex);
mFlag = false;
mWaiting = false;
}
bool isWaiting() {
return mWaiting;
}
private:
bool mFlag;
bool mWaiting;
std::mutex mMutex;
std::condition_variable mCond;
};
#endif

237
node_modules/@parcel/watcher/src/Watcher.cc generated vendored Normal file
View File

@@ -0,0 +1,237 @@
#include "Watcher.hh"
#include <unordered_set>
using namespace Napi;
struct WatcherHash {
std::size_t operator() (WatcherRef const &k) const {
return std::hash<std::string>()(k->mDir);
}
};
struct WatcherCompare {
size_t operator() (WatcherRef const &a, WatcherRef const &b) const {
return *a == *b;
}
};
static std::unordered_set<WatcherRef , WatcherHash, WatcherCompare> sharedWatchers;
WatcherRef Watcher::getShared(std::string dir, std::unordered_set<std::string> ignorePaths, std::unordered_set<Glob> ignoreGlobs) {
WatcherRef watcher = std::make_shared<Watcher>(dir, ignorePaths, ignoreGlobs);
auto found = sharedWatchers.find(watcher);
if (found != sharedWatchers.end()) {
return *found;
}
sharedWatchers.insert(watcher);
return watcher;
}
void removeShared(Watcher *watcher) {
for (auto it = sharedWatchers.begin(); it != sharedWatchers.end(); it++) {
if (it->get() == watcher) {
sharedWatchers.erase(it);
break;
}
}
// Free up memory.
if (sharedWatchers.size() == 0) {
sharedWatchers.rehash(0);
}
}
Watcher::Watcher(std::string dir, std::unordered_set<std::string> ignorePaths, std::unordered_set<Glob> ignoreGlobs)
: mDir(dir),
mIgnorePaths(ignorePaths),
mIgnoreGlobs(ignoreGlobs) {
mDebounce = Debounce::getShared();
mDebounce->add(this, [this] () {
triggerCallbacks();
});
}
Watcher::~Watcher() {
mDebounce->remove(this);
}
void Watcher::wait() {
std::unique_lock<std::mutex> lk(mMutex);
mCond.wait(lk);
}
void Watcher::notify() {
std::unique_lock<std::mutex> lk(mMutex);
mCond.notify_all();
if (mCallbacks.size() > 0 && mEvents.size() > 0) {
// We must release our lock before calling into the debouncer
// to avoid a deadlock: the debouncer thread itself will require
// our lock from its thread when calling into `triggerCallbacks`
// while holding its own debouncer lock.
lk.unlock();
mDebounce->trigger();
}
}
struct CallbackData {
std::string error;
std::vector<Event> events;
CallbackData(std::string error, std::vector<Event> events) : error(error), events(events) {}
};
Value callbackEventsToJS(const Env &env, std::vector<Event> &events) {
EscapableHandleScope scope(env);
Array arr = Array::New(env, events.size());
size_t currentEventIndex = 0;
for (auto eventIterator = events.begin(); eventIterator != events.end(); eventIterator++) {
arr.Set(currentEventIndex++, eventIterator->toJS(env));
}
return scope.Escape(arr);
}
void callJSFunction(Napi::Env env, Function jsCallback, CallbackData *data) {
HandleScope scope(env);
auto err = data->error.size() > 0 ? Error::New(env, data->error).Value() : env.Null();
auto events = callbackEventsToJS(env, data->events);
jsCallback.Call({err, events});
delete data;
// Throw errors from the callback as fatal exceptions
// If we don't handle these node segfaults...
if (env.IsExceptionPending()) {
Napi::Error err = env.GetAndClearPendingException();
napi_fatal_exception(env, err.Value());
}
}
void Watcher::notifyError(std::exception &err) {
std::unique_lock<std::mutex> lk(mMutex);
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
CallbackData *data = new CallbackData(err.what(), {});
it->tsfn.BlockingCall(data, callJSFunction);
}
clearCallbacks();
}
// This function is called from the debounce thread.
void Watcher::triggerCallbacks() {
std::unique_lock<std::mutex> lk(mMutex);
if (mCallbacks.size() > 0 && (mEvents.size() > 0 || mEvents.hasError())) {
auto error = mEvents.getError();
auto events = mEvents.getEvents();
mEvents.clear();
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
it->tsfn.BlockingCall(new CallbackData(error, events), callJSFunction);
}
}
}
// This should be called from the JavaScript thread.
bool Watcher::watch(Function callback) {
std::unique_lock<std::mutex> lk(mMutex);
auto it = findCallback(callback);
if (it != mCallbacks.end()) {
return false;
}
auto tsfn = ThreadSafeFunction::New(
callback.Env(),
callback,
"Watcher callback",
0, // Unlimited queue
1 // Initial thread count
);
mCallbacks.push_back(Callback {
tsfn,
Napi::Persistent(callback),
std::this_thread::get_id()
});
return true;
}
// This should be called from the JavaScript thread.
std::vector<Callback>::iterator Watcher::findCallback(Function callback) {
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
// Only consider callbacks created by the same thread, or V8 will panic.
if (it->threadId == std::this_thread::get_id() && it->ref.Value() == callback) {
return it;
}
}
return mCallbacks.end();
}
// This should be called from the JavaScript thread.
bool Watcher::unwatch(Function callback) {
std::unique_lock<std::mutex> lk(mMutex);
bool removed = false;
auto it = findCallback(callback);
if (it != mCallbacks.end()) {
it->tsfn.Release();
it->ref.Unref();
mCallbacks.erase(it);
removed = true;
}
if (removed && mCallbacks.size() == 0) {
unref();
return true;
}
return false;
}
void Watcher::unref() {
if (mCallbacks.size() == 0) {
removeShared(this);
}
}
void Watcher::destroy() {
std::unique_lock<std::mutex> lk(mMutex);
clearCallbacks();
}
// Private because it doesn't lock.
void Watcher::clearCallbacks() {
for (auto it = mCallbacks.begin(); it != mCallbacks.end(); it++) {
it->tsfn.Release();
it->ref.Unref();
}
mCallbacks.clear();
unref();
}
bool Watcher::isIgnored(std::string path) {
for (auto it = mIgnorePaths.begin(); it != mIgnorePaths.end(); it++) {
auto dir = *it + DIR_SEP;
if (*it == path || path.compare(0, dir.size(), dir) == 0) {
return true;
}
}
auto basePath = mDir + DIR_SEP;
if (path.rfind(basePath, 0) != 0) {
return false;
}
auto relativePath = path.substr(basePath.size());
for (auto it = mIgnoreGlobs.begin(); it != mIgnoreGlobs.end(); it++) {
if (it->isIgnored(relativePath)) {
return true;
}
}
return false;
}

73
node_modules/@parcel/watcher/src/Watcher.hh generated vendored Normal file
View File

@@ -0,0 +1,73 @@
#ifndef WATCHER_H
#define WATCHER_H
#include <condition_variable>
#include <unordered_set>
#include <set>
#include <node_api.h>
#include "Glob.hh"
#include "Event.hh"
#include "Debounce.hh"
#include "DirTree.hh"
#include "Signal.hh"
using namespace Napi;
struct Watcher;
using WatcherRef = std::shared_ptr<Watcher>;
struct Callback {
Napi::ThreadSafeFunction tsfn;
Napi::FunctionReference ref;
std::thread::id threadId;
};
class WatcherState {
public:
virtual ~WatcherState() = default;
};
struct Watcher {
std::string mDir;
std::unordered_set<std::string> mIgnorePaths;
std::unordered_set<Glob> mIgnoreGlobs;
EventList mEvents;
std::shared_ptr<WatcherState> state;
Watcher(std::string dir, std::unordered_set<std::string> ignorePaths, std::unordered_set<Glob> ignoreGlobs);
~Watcher();
bool operator==(const Watcher &other) const {
return mDir == other.mDir && mIgnorePaths == other.mIgnorePaths && mIgnoreGlobs == other.mIgnoreGlobs;
}
void wait();
void notify();
void notifyError(std::exception &err);
bool watch(Function callback);
bool unwatch(Function callback);
void unref();
bool isIgnored(std::string path);
void destroy();
static WatcherRef getShared(std::string dir, std::unordered_set<std::string> ignorePaths, std::unordered_set<Glob> ignoreGlobs);
private:
std::mutex mMutex;
std::condition_variable mCond;
std::vector<Callback> mCallbacks;
std::shared_ptr<Debounce> mDebounce;
std::vector<Callback>::iterator findCallback(Function callback);
void clearCallbacks();
void triggerCallbacks();
};
class WatcherError : public std::runtime_error {
public:
WatcherRef mWatcher;
WatcherError(std::string msg, WatcherRef watcher) : std::runtime_error(msg), mWatcher(watcher) {}
WatcherError(const char *msg, WatcherRef watcher) : std::runtime_error(msg), mWatcher(watcher) {}
};
#endif

268
node_modules/@parcel/watcher/src/binding.cc generated vendored Normal file
View File

@@ -0,0 +1,268 @@
#include <unordered_set>
#include <node_api.h>
#include "wasm/include.h"
#include <napi.h>
#include "Glob.hh"
#include "Event.hh"
#include "Backend.hh"
#include "Watcher.hh"
#include "PromiseRunner.hh"
using namespace Napi;
std::unordered_set<std::string> getIgnorePaths(Env env, Value opts) {
std::unordered_set<std::string> result;
if (opts.IsObject()) {
Value v = opts.As<Object>().Get(String::New(env, "ignorePaths"));
if (v.IsArray()) {
Array items = v.As<Array>();
for (size_t i = 0; i < items.Length(); i++) {
Value item = items.Get(Number::New(env, i));
if (item.IsString()) {
result.insert(std::string(item.As<String>().Utf8Value().c_str()));
}
}
}
}
return result;
}
std::unordered_set<Glob> getIgnoreGlobs(Env env, Value opts) {
std::unordered_set<Glob> result;
if (opts.IsObject()) {
Value v = opts.As<Object>().Get(String::New(env, "ignoreGlobs"));
if (v.IsArray()) {
Array items = v.As<Array>();
for (size_t i = 0; i < items.Length(); i++) {
Value item = items.Get(Number::New(env, i));
if (item.IsString()) {
auto key = item.As<String>().Utf8Value();
try {
result.emplace(key);
} catch (const std::regex_error& e) {
Error::New(env, e.what()).ThrowAsJavaScriptException();
}
}
}
}
}
return result;
}
std::shared_ptr<Backend> getBackend(Env env, Value opts) {
Value b = opts.As<Object>().Get(String::New(env, "backend"));
std::string backendName;
if (b.IsString()) {
backendName = std::string(b.As<String>().Utf8Value().c_str());
}
return Backend::getShared(backendName);
}
class WriteSnapshotRunner : public PromiseRunner {
public:
WriteSnapshotRunner(Env env, Value dir, Value snap, Value opts)
: PromiseRunner(env),
snapshotPath(std::string(snap.As<String>().Utf8Value().c_str())) {
watcher = Watcher::getShared(
std::string(dir.As<String>().Utf8Value().c_str()),
getIgnorePaths(env, opts),
getIgnoreGlobs(env, opts)
);
backend = getBackend(env, opts);
}
~WriteSnapshotRunner() {
watcher->unref();
backend->unref();
}
private:
std::shared_ptr<Backend> backend;
WatcherRef watcher;
std::string snapshotPath;
void execute() override {
backend->writeSnapshot(watcher, &snapshotPath);
}
};
class GetEventsSinceRunner : public PromiseRunner {
public:
GetEventsSinceRunner(Env env, Value dir, Value snap, Value opts)
: PromiseRunner(env),
snapshotPath(std::string(snap.As<String>().Utf8Value().c_str())) {
watcher = std::make_shared<Watcher>(
std::string(dir.As<String>().Utf8Value().c_str()),
getIgnorePaths(env, opts),
getIgnoreGlobs(env, opts)
);
backend = getBackend(env, opts);
}
~GetEventsSinceRunner() {
watcher->unref();
backend->unref();
}
private:
std::shared_ptr<Backend> backend;
WatcherRef watcher;
std::string snapshotPath;
void execute() override {
backend->getEventsSince(watcher, &snapshotPath);
if (watcher->mEvents.hasError()) {
throw std::runtime_error(watcher->mEvents.getError());
}
}
Value getResult() override {
std::vector<Event> events = watcher->mEvents.getEvents();
Array eventsArray = Array::New(env, events.size());
size_t i = 0;
for (auto it = events.begin(); it != events.end(); it++) {
eventsArray.Set(i++, it->toJS(env));
}
return eventsArray;
}
};
template<class Runner>
Value queueSnapshotWork(const CallbackInfo& info) {
Env env = info.Env();
if (info.Length() < 1 || !info[0].IsString()) {
TypeError::New(env, "Expected a string").ThrowAsJavaScriptException();
return env.Null();
}
if (info.Length() < 2 || !info[1].IsString()) {
TypeError::New(env, "Expected a string").ThrowAsJavaScriptException();
return env.Null();
}
if (info.Length() >= 3 && !info[2].IsObject()) {
TypeError::New(env, "Expected an object").ThrowAsJavaScriptException();
return env.Null();
}
Runner *runner = new Runner(info.Env(), info[0], info[1], info[2]);
return runner->queue();
}
Value writeSnapshot(const CallbackInfo& info) {
return queueSnapshotWork<WriteSnapshotRunner>(info);
}
Value getEventsSince(const CallbackInfo& info) {
return queueSnapshotWork<GetEventsSinceRunner>(info);
}
class SubscribeRunner : public PromiseRunner {
public:
SubscribeRunner(Env env, Value dir, Value fn, Value opts) : PromiseRunner(env) {
watcher = Watcher::getShared(
std::string(dir.As<String>().Utf8Value().c_str()),
getIgnorePaths(env, opts),
getIgnoreGlobs(env, opts)
);
backend = getBackend(env, opts);
watcher->watch(fn.As<Function>());
}
private:
WatcherRef watcher;
std::shared_ptr<Backend> backend;
FunctionReference callback;
void execute() override {
try {
backend->watch(watcher);
} catch (std::exception &err) {
watcher->destroy();
throw;
}
}
};
class UnsubscribeRunner : public PromiseRunner {
public:
UnsubscribeRunner(Env env, Value dir, Value fn, Value opts) : PromiseRunner(env) {
watcher = Watcher::getShared(
std::string(dir.As<String>().Utf8Value().c_str()),
getIgnorePaths(env, opts),
getIgnoreGlobs(env, opts)
);
backend = getBackend(env, opts);
shouldUnwatch = watcher->unwatch(fn.As<Function>());
}
private:
WatcherRef watcher;
std::shared_ptr<Backend> backend;
bool shouldUnwatch;
void execute() override {
if (shouldUnwatch) {
backend->unwatch(watcher);
}
}
};
template<class Runner>
Value queueSubscriptionWork(const CallbackInfo& info) {
Env env = info.Env();
if (info.Length() < 1 || !info[0].IsString()) {
TypeError::New(env, "Expected a string").ThrowAsJavaScriptException();
return env.Null();
}
if (info.Length() < 2 || !info[1].IsFunction()) {
TypeError::New(env, "Expected a function").ThrowAsJavaScriptException();
return env.Null();
}
if (info.Length() >= 3 && !info[2].IsObject()) {
TypeError::New(env, "Expected an object").ThrowAsJavaScriptException();
return env.Null();
}
Runner *runner = new Runner(info.Env(), info[0], info[1], info[2]);
return runner->queue();
}
Value subscribe(const CallbackInfo& info) {
return queueSubscriptionWork<SubscribeRunner>(info);
}
Value unsubscribe(const CallbackInfo& info) {
return queueSubscriptionWork<UnsubscribeRunner>(info);
}
Object Init(Env env, Object exports) {
exports.Set(
String::New(env, "writeSnapshot"),
Function::New(env, writeSnapshot)
);
exports.Set(
String::New(env, "getEventsSince"),
Function::New(env, getEventsSince)
);
exports.Set(
String::New(env, "subscribe"),
Function::New(env, subscribe)
);
exports.Set(
String::New(env, "unsubscribe"),
Function::New(env, unsubscribe)
);
return exports;
}
NODE_API_MODULE(watcher, Init)

View File

@@ -0,0 +1,306 @@
#include <memory>
#include <poll.h>
#include <unistd.h>
#include <libgen.h>
#include <dirent.h>
#include <fcntl.h>
#include <sys/stat.h>
#include "KqueueBackend.hh"
#if __APPLE__
#define st_mtim st_mtimespec
#endif
#if !defined(O_EVTONLY)
#define O_EVTONLY O_RDONLY
#endif
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
void KqueueBackend::start() {
if ((mKqueue = kqueue()) < 0) {
throw std::runtime_error(std::string("Unable to open kqueue: ") + strerror(errno));
}
// Create a pipe that we will write to when we want to end the thread.
int err = pipe(mPipe);
if (err == -1) {
throw std::runtime_error(std::string("Unable to open pipe: ") + strerror(errno));
}
// Subscribe kqueue to this pipe.
struct kevent ev;
EV_SET(
&ev,
mPipe[0],
EVFILT_READ,
EV_ADD | EV_CLEAR,
0,
0,
0
);
if (kevent(mKqueue, &ev, 1, NULL, 0, 0)) {
close(mPipe[0]);
close(mPipe[1]);
throw std::runtime_error(std::string("Unable to watch pipe: ") + strerror(errno));
}
notifyStarted();
struct kevent events[128];
while (true) {
int event_count = kevent(mKqueue, NULL, 0, events, 128, 0);
if (event_count < 0 || events[0].flags == EV_ERROR) {
throw std::runtime_error(std::string("kevent error: ") + strerror(errno));
}
// Track all of the watchers that are touched so we can notify them at the end of the events.
std::unordered_set<WatcherRef> watchers;
for (int i = 0; i < event_count; i++) {
int flags = events[i].fflags;
int fd = events[i].ident;
if (fd == mPipe[0]) {
// pipe was written to. break out of the loop.
goto done;
}
auto it = mFdToEntry.find(fd);
if (it == mFdToEntry.end()) {
// If fd wasn't in our map, we may have already stopped watching it. Ignore the event.
continue;
}
DirEntry *entry = it->second;
if (flags & NOTE_WRITE && entry && entry->isDir) {
// If a write occurred on a directory, we have to diff the contents of that
// directory to determine what file was added/deleted.
compareDir(fd, entry->path, watchers);
} else {
std::vector<KqueueSubscription *> subs = findSubscriptions(entry->path);
for (auto it = subs.begin(); it != subs.end(); it++) {
KqueueSubscription *sub = *it;
watchers.insert(sub->watcher);
if (flags & (NOTE_DELETE | NOTE_RENAME | NOTE_REVOKE)) {
sub->watcher->mEvents.remove(sub->path);
sub->tree->remove(sub->path);
mFdToEntry.erase((int)(size_t)entry->state);
mSubscriptions.erase(sub->path);
} else if (flags & (NOTE_WRITE | NOTE_ATTRIB | NOTE_EXTEND)) {
struct stat st;
lstat(sub->path.c_str(), &st);
if (entry->mtime != CONVERT_TIME(st.st_mtim)) {
entry->mtime = CONVERT_TIME(st.st_mtim);
sub->watcher->mEvents.update(sub->path);
}
}
}
}
}
for (auto it = watchers.begin(); it != watchers.end(); it++) {
(*it)->notify();
}
}
done:
close(mPipe[0]);
close(mPipe[1]);
mEndedSignal.notify();
}
KqueueBackend::~KqueueBackend() {
write(mPipe[1], "X", 1);
mEndedSignal.wait();
}
void KqueueBackend::subscribe(WatcherRef watcher) {
// Build a full directory tree recursively, and watch each directory.
std::shared_ptr<DirTree> tree = getTree(watcher);
for (auto it = tree->entries.begin(); it != tree->entries.end(); it++) {
bool success = watchDir(watcher, it->second.path, tree);
if (!success) {
throw WatcherError(std::string("error watching " + watcher->mDir + ": " + strerror(errno)), watcher);
}
}
}
bool KqueueBackend::watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree) {
if (watcher->isIgnored(path)) {
return false;
}
DirEntry *entry = tree->find(path);
if (!entry) {
return false;
}
KqueueSubscription sub = {
.watcher = watcher,
.path = path,
.tree = tree
};
if (!entry->state) {
int fd = open(path.c_str(), O_EVTONLY);
if (fd <= 0) {
return false;
}
struct kevent event;
EV_SET(
&event,
fd,
EVFILT_VNODE,
EV_ADD | EV_CLEAR | EV_ENABLE,
NOTE_DELETE | NOTE_WRITE | NOTE_EXTEND | NOTE_ATTRIB | NOTE_RENAME | NOTE_REVOKE,
0,
0
);
if (kevent(mKqueue, &event, 1, NULL, 0, 0)) {
close(fd);
return false;
}
entry->state = (void *)(size_t)fd;
mFdToEntry.emplace(fd, entry);
}
sub.fd = (int)(size_t)entry->state;
mSubscriptions.emplace(path, sub);
return true;
}
std::vector<KqueueSubscription *> KqueueBackend::findSubscriptions(std::string &path) {
// Find the subscriptions affected by this path.
// Copy pointers to them into a vector so that modifying mSubscriptions doesn't invalidate the iterator.
auto range = mSubscriptions.equal_range(path);
std::vector<KqueueSubscription *> subs;
for (auto it = range.first; it != range.second; it++) {
subs.push_back(&it->second);
}
return subs;
}
bool KqueueBackend::compareDir(int fd, std::string &path, std::unordered_set<WatcherRef> &watchers) {
// macOS doesn't support fdclosedir, so we have to duplicate the file descriptor
// to ensure the closedir doesn't also stop watching.
#if __APPLE__
fd = dup(fd);
#endif
DIR *dir = fdopendir(fd);
if (dir == NULL) {
return false;
}
// fdopendir doesn't rewind to the beginning.
rewinddir(dir);
std::vector<KqueueSubscription *> subs = findSubscriptions(path);
std::string dirStart = path + DIR_SEP;
std::unordered_set<std::shared_ptr<DirTree>> trees;
for (auto it = subs.begin(); it != subs.end(); it++) {
trees.emplace((*it)->tree);
}
std::unordered_set<std::string> entries;
struct dirent *entry;
while ((entry = readdir(dir))) {
if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) {
continue;
}
std::string fullpath = dirStart + entry->d_name;
entries.emplace(fullpath);
for (auto it = trees.begin(); it != trees.end(); it++) {
std::shared_ptr<DirTree> tree = *it;
if (!tree->find(fullpath)) {
struct stat st;
fstatat(fd, entry->d_name, &st, AT_SYMLINK_NOFOLLOW);
tree->add(fullpath, CONVERT_TIME(st.st_mtim), S_ISDIR(st.st_mode));
// Notify all watchers with the same tree.
for (auto i = subs.begin(); i != subs.end(); i++) {
KqueueSubscription *sub = *i;
if (sub->tree == tree) {
if (sub->watcher->isIgnored(fullpath)) {
continue;
}
sub->watcher->mEvents.create(fullpath);
watchers.emplace(sub->watcher);
bool success = watchDir(sub->watcher, fullpath, sub->tree);
if (!success) {
sub->tree->remove(fullpath);
return false;
}
}
}
}
}
}
for (auto it = trees.begin(); it != trees.end(); it++) {
std::shared_ptr<DirTree> tree = *it;
for (auto entry = tree->entries.begin(); entry != tree->entries.end();) {
if (
entry->first.rfind(dirStart, 0) == 0 &&
entry->first.find(DIR_SEP, dirStart.length()) == std::string::npos &&
entries.count(entry->first) == 0
) {
// Notify all watchers with the same tree.
for (auto i = subs.begin(); i != subs.end(); i++) {
if ((*i)->tree == tree) {
KqueueSubscription *sub = *i;
if (!sub->watcher->isIgnored(entry->first)) {
sub->watcher->mEvents.remove(entry->first);
watchers.emplace(sub->watcher);
}
}
}
mFdToEntry.erase((int)(size_t)entry->second.state);
mSubscriptions.erase(entry->first);
entry = tree->entries.erase(entry);
} else {
entry++;
}
}
}
#if __APPLE__
closedir(dir);
#else
fdclosedir(dir);
#endif
return true;
}
void KqueueBackend::unsubscribe(WatcherRef watcher) {
// Find any subscriptions pointing to this watcher, and remove them.
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end();) {
if (it->second.watcher.get() == watcher.get()) {
if (mSubscriptions.count(it->first) == 1) {
// Closing the file descriptor automatically unwatches it in the kqueue.
close(it->second.fd);
mFdToEntry.erase(it->second.fd);
}
it = mSubscriptions.erase(it);
} else {
it++;
}
}
}

View File

@@ -0,0 +1,35 @@
#ifndef KQUEUE_H
#define KQUEUE_H
#include <unordered_map>
#include <sys/event.h>
#include "../shared/BruteForceBackend.hh"
#include "../DirTree.hh"
#include "../Signal.hh"
struct KqueueSubscription {
WatcherRef watcher;
std::string path;
std::shared_ptr<DirTree> tree;
int fd;
};
class KqueueBackend : public BruteForceBackend {
public:
void start() override;
~KqueueBackend();
void subscribe(WatcherRef watcher) override;
void unsubscribe(WatcherRef watcher) override;
private:
int mKqueue;
int mPipe[2];
std::unordered_multimap<std::string, KqueueSubscription> mSubscriptions;
std::unordered_map<int, DirEntry *> mFdToEntry;
Signal mEndedSignal;
bool watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree);
bool compareDir(int fd, std::string &dir, std::unordered_set<WatcherRef> &watchers);
std::vector<KqueueSubscription *> findSubscriptions(std::string &path);
};
#endif

View File

@@ -0,0 +1,232 @@
#include <memory>
#include <poll.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/stat.h>
#include "InotifyBackend.hh"
#define INOTIFY_MASK \
IN_ATTRIB | IN_CREATE | IN_DELETE | \
IN_DELETE_SELF | IN_MODIFY | IN_MOVE_SELF | IN_MOVED_FROM | \
IN_MOVED_TO | IN_DONT_FOLLOW | IN_ONLYDIR | IN_EXCL_UNLINK
#define BUFFER_SIZE 8192
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
void InotifyBackend::start() {
// Create a pipe that we will write to when we want to end the thread.
int err = pipe2(mPipe, O_CLOEXEC | O_NONBLOCK);
if (err == -1) {
throw std::runtime_error(std::string("Unable to open pipe: ") + strerror(errno));
}
// Init inotify file descriptor.
mInotify = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
if (mInotify == -1) {
throw std::runtime_error(std::string("Unable to initialize inotify: ") + strerror(errno));
}
pollfd pollfds[2];
pollfds[0].fd = mPipe[0];
pollfds[0].events = POLLIN;
pollfds[0].revents = 0;
pollfds[1].fd = mInotify;
pollfds[1].events = POLLIN;
pollfds[1].revents = 0;
notifyStarted();
// Loop until we get an event from the pipe.
while (true) {
int result = poll(pollfds, 2, 500);
if (result < 0) {
throw std::runtime_error(std::string("Unable to poll: ") + strerror(errno));
}
if (pollfds[0].revents) {
break;
}
if (pollfds[1].revents) {
handleEvents();
}
}
close(mPipe[0]);
close(mPipe[1]);
close(mInotify);
mEndedSignal.notify();
}
InotifyBackend::~InotifyBackend() {
write(mPipe[1], "X", 1);
mEndedSignal.wait();
}
// This function is called by Backend::watch which takes a lock on mMutex
void InotifyBackend::subscribe(WatcherRef watcher) {
// Build a full directory tree recursively, and watch each directory.
std::shared_ptr<DirTree> tree = getTree(watcher);
for (auto it = tree->entries.begin(); it != tree->entries.end(); it++) {
if (it->second.isDir) {
bool success = watchDir(watcher, it->second.path, tree);
if (!success) {
throw WatcherError(std::string("inotify_add_watch on '") + it->second.path + std::string("' failed: ") + strerror(errno), watcher);
}
}
}
}
bool InotifyBackend::watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree) {
int wd = inotify_add_watch(mInotify, path.c_str(), INOTIFY_MASK);
if (wd == -1) {
return false;
}
std::shared_ptr<InotifySubscription> sub = std::make_shared<InotifySubscription>();
sub->tree = tree;
sub->path = path;
sub->watcher = watcher;
mSubscriptions.emplace(wd, sub);
return true;
}
void InotifyBackend::handleEvents() {
char buf[BUFFER_SIZE] __attribute__ ((aligned(__alignof__(struct inotify_event))));;
struct inotify_event *event;
// Track all of the watchers that are touched so we can notify them at the end of the events.
std::unordered_set<WatcherRef> watchers;
while (true) {
int n = read(mInotify, &buf, BUFFER_SIZE);
if (n < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
break;
}
throw std::runtime_error(std::string("Error reading from inotify: ") + strerror(errno));
}
if (n == 0) {
break;
}
for (char *ptr = buf; ptr < buf + n; ptr += sizeof(*event) + event->len) {
event = (struct inotify_event *)ptr;
if ((event->mask & IN_Q_OVERFLOW) == IN_Q_OVERFLOW) {
// overflow
continue;
}
handleEvent(event, watchers);
}
}
for (auto it = watchers.begin(); it != watchers.end(); it++) {
(*it)->notify();
}
}
void InotifyBackend::handleEvent(struct inotify_event *event, std::unordered_set<WatcherRef> &watchers) {
std::unique_lock<std::mutex> lock(mMutex);
// Find the subscriptions for this watch descriptor
auto range = mSubscriptions.equal_range(event->wd);
std::unordered_set<std::shared_ptr<InotifySubscription>> set;
for (auto it = range.first; it != range.second; it++) {
set.insert(it->second);
}
for (auto it = set.begin(); it != set.end(); it++) {
if (handleSubscription(event, *it)) {
watchers.insert((*it)->watcher);
}
}
}
bool InotifyBackend::handleSubscription(struct inotify_event *event, std::shared_ptr<InotifySubscription> sub) {
// Build full path and check if its in our ignore list.
std::shared_ptr<Watcher> watcher = sub->watcher;
std::string path = std::string(sub->path);
bool isDir = event->mask & IN_ISDIR;
if (event->len > 0) {
path += "/" + std::string(event->name);
}
if (watcher->isIgnored(path)) {
return false;
}
// If this is a create, check if it's a directory and start watching if it is.
// In any case, keep the directory tree up to date.
if (event->mask & (IN_CREATE | IN_MOVED_TO)) {
watcher->mEvents.create(path);
struct stat st;
// Use lstat to avoid resolving symbolic links that we cannot watch anyway
// https://github.com/parcel-bundler/watcher/issues/76
lstat(path.c_str(), &st);
DirEntry *entry = sub->tree->add(path, CONVERT_TIME(st.st_mtim), S_ISDIR(st.st_mode));
if (entry->isDir) {
bool success = watchDir(watcher, path, sub->tree);
if (!success) {
sub->tree->remove(path);
return false;
}
}
} else if (event->mask & (IN_MODIFY | IN_ATTRIB)) {
watcher->mEvents.update(path);
struct stat st;
stat(path.c_str(), &st);
sub->tree->update(path, CONVERT_TIME(st.st_mtim));
} else if (event->mask & (IN_DELETE | IN_DELETE_SELF | IN_MOVED_FROM | IN_MOVE_SELF)) {
bool isSelfEvent = (event->mask & (IN_DELETE_SELF | IN_MOVE_SELF));
// Ignore delete/move self events unless this is the recursive watch root
if (isSelfEvent && path != watcher->mDir) {
return false;
}
// If the entry being deleted/moved is a directory, remove it from the list of subscriptions
// XXX: self events don't have the IN_ISDIR mask
if (isSelfEvent || isDir) {
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end();) {
if (it->second->path == path) {
it = mSubscriptions.erase(it);
} else {
++it;
}
}
}
watcher->mEvents.remove(path);
sub->tree->remove(path);
}
return true;
}
// This function is called by Backend::unwatch which takes a lock on mMutex
void InotifyBackend::unsubscribe(WatcherRef watcher) {
// Find any subscriptions pointing to this watcher, and remove them.
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end();) {
if (it->second->watcher.get() == watcher.get()) {
if (mSubscriptions.count(it->first) == 1) {
int err = inotify_rm_watch(mInotify, it->first);
if (err == -1) {
throw WatcherError(std::string("Unable to remove watcher: ") + strerror(errno), watcher);
}
}
it = mSubscriptions.erase(it);
} else {
it++;
}
}
}

View File

@@ -0,0 +1,34 @@
#ifndef INOTIFY_H
#define INOTIFY_H
#include <unordered_map>
#include <sys/inotify.h>
#include "../shared/BruteForceBackend.hh"
#include "../DirTree.hh"
#include "../Signal.hh"
struct InotifySubscription {
std::shared_ptr<DirTree> tree;
std::string path;
WatcherRef watcher;
};
class InotifyBackend : public BruteForceBackend {
public:
void start() override;
~InotifyBackend();
void subscribe(WatcherRef watcher) override;
void unsubscribe(WatcherRef watcher) override;
private:
int mPipe[2];
int mInotify;
std::unordered_multimap<int, std::shared_ptr<InotifySubscription>> mSubscriptions;
Signal mEndedSignal;
bool watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree);
void handleEvents();
void handleEvent(struct inotify_event *event, std::unordered_set<WatcherRef> &watchers);
bool handleSubscription(struct inotify_event *event, std::shared_ptr<InotifySubscription> sub);
};
#endif

View File

@@ -0,0 +1,338 @@
#include <CoreServices/CoreServices.h>
#include <sys/stat.h>
#include <string>
#include <fstream>
#include <unordered_set>
#include "../Event.hh"
#include "../Backend.hh"
#include "./FSEventsBackend.hh"
#include "../Watcher.hh"
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
#define IGNORED_FLAGS (kFSEventStreamEventFlagItemIsHardlink | kFSEventStreamEventFlagItemIsLastHardlink | kFSEventStreamEventFlagItemIsSymlink | kFSEventStreamEventFlagItemIsDir | kFSEventStreamEventFlagItemIsFile)
void stopStream(FSEventStreamRef stream, CFRunLoopRef runLoop) {
FSEventStreamStop(stream);
FSEventStreamUnscheduleFromRunLoop(stream, runLoop, kCFRunLoopDefaultMode);
FSEventStreamInvalidate(stream);
FSEventStreamRelease(stream);
}
// macOS has a case insensitive file system by default. In order to detect
// file renames that only affect case, we need to get the canonical path
// and compare it with the input path to determine if a file was created or deleted.
bool pathExists(char *path) {
int fd = open(path, O_RDONLY | O_SYMLINK);
if (fd == -1) {
return false;
}
char buf[PATH_MAX];
if (fcntl(fd, F_GETPATH, buf) == -1) {
close(fd);
return false;
}
bool res = strncmp(path, buf, PATH_MAX) == 0;
close(fd);
return res;
}
class State: public WatcherState {
public:
FSEventStreamRef stream;
std::shared_ptr<DirTree> tree;
uint64_t since;
};
void FSEventsCallback(
ConstFSEventStreamRef streamRef,
void *clientCallBackInfo,
size_t numEvents,
void *eventPaths,
const FSEventStreamEventFlags eventFlags[],
const FSEventStreamEventId eventIds[]
) {
char **paths = (char **)eventPaths;
std::shared_ptr<Watcher>& watcher = *static_cast<std::shared_ptr<Watcher> *>(clientCallBackInfo);
EventList& list = watcher->mEvents;
if (watcher->state == nullptr) {
return;
}
auto stateGuard = watcher->state;
auto* state = static_cast<State*>(stateGuard.get());
uint64_t since = state->since;
bool deletedRoot = false;
for (size_t i = 0; i < numEvents; ++i) {
bool isCreated = (eventFlags[i] & kFSEventStreamEventFlagItemCreated) == kFSEventStreamEventFlagItemCreated;
bool isRemoved = (eventFlags[i] & kFSEventStreamEventFlagItemRemoved) == kFSEventStreamEventFlagItemRemoved;
bool isModified = (eventFlags[i] & kFSEventStreamEventFlagItemModified) == kFSEventStreamEventFlagItemModified ||
(eventFlags[i] & kFSEventStreamEventFlagItemInodeMetaMod) == kFSEventStreamEventFlagItemInodeMetaMod ||
(eventFlags[i] & kFSEventStreamEventFlagItemFinderInfoMod) == kFSEventStreamEventFlagItemFinderInfoMod ||
(eventFlags[i] & kFSEventStreamEventFlagItemChangeOwner) == kFSEventStreamEventFlagItemChangeOwner ||
(eventFlags[i] & kFSEventStreamEventFlagItemXattrMod) == kFSEventStreamEventFlagItemXattrMod;
bool isRenamed = (eventFlags[i] & kFSEventStreamEventFlagItemRenamed) == kFSEventStreamEventFlagItemRenamed;
bool isDone = (eventFlags[i] & kFSEventStreamEventFlagHistoryDone) == kFSEventStreamEventFlagHistoryDone;
bool isDir = (eventFlags[i] & kFSEventStreamEventFlagItemIsDir) == kFSEventStreamEventFlagItemIsDir;
if (eventFlags[i] & kFSEventStreamEventFlagMustScanSubDirs) {
if (eventFlags[i] & kFSEventStreamEventFlagUserDropped) {
list.error("Events were dropped by the FSEvents client. File system must be re-scanned.");
} else if (eventFlags[i] & kFSEventStreamEventFlagKernelDropped) {
list.error("Events were dropped by the kernel. File system must be re-scanned.");
} else {
list.error("Too many events. File system must be re-scanned.");
}
}
if (isDone) {
watcher->notify();
break;
}
auto ignoredFlags = IGNORED_FLAGS;
if (__builtin_available(macOS 10.13, *)) {
ignoredFlags |= kFSEventStreamEventFlagItemCloned;
}
// If we don't care about any of the flags that are set, ignore this event.
if ((eventFlags[i] & ~ignoredFlags) == 0) {
continue;
}
// FSEvents exclusion paths only apply to files, not directories.
if (watcher->isIgnored(paths[i])) {
continue;
}
// Handle unambiguous events first
if (isCreated && !(isRemoved || isModified || isRenamed)) {
state->tree->add(paths[i], 0, isDir);
list.create(paths[i]);
} else if (isRemoved && !(isCreated || isModified || isRenamed)) {
state->tree->remove(paths[i]);
list.remove(paths[i]);
if (paths[i] == watcher->mDir) {
deletedRoot = true;
}
} else if (isModified && !(isCreated || isRemoved || isRenamed)) {
struct stat file;
if (stat(paths[i], &file)) {
continue;
}
// Ignore if mtime is the same as the last event.
// This prevents duplicate events from being emitted.
// If tv_nsec is zero, the file system probably only has second-level
// granularity so allow the even through in that case.
uint64_t mtime = CONVERT_TIME(file.st_mtimespec);
DirEntry *entry = state->tree->find(paths[i]);
if (entry && mtime == entry->mtime && file.st_mtimespec.tv_nsec != 0) {
continue;
}
if (entry) {
// Update mtime.
entry->mtime = mtime;
} else {
// Add to tree if this path has not been discovered yet.
state->tree->add(paths[i], mtime, S_ISDIR(file.st_mode));
}
list.update(paths[i]);
} else {
// If multiple flags were set, then we need to call `stat` to determine if the file really exists.
// This helps disambiguate creates, updates, and deletes.
struct stat file;
if (stat(paths[i], &file) || !pathExists(paths[i])) {
// File does not exist, so we have to assume it was removed. This is not exact since the
// flags set by fsevents get coalesced together (e.g. created & deleted), so there is no way to
// know whether the create and delete both happened since our snapshot (in which case
// we'd rather ignore this event completely). This will result in some extra delete events
// being emitted for files we don't know about, but that is the best we can do.
state->tree->remove(paths[i]);
list.remove(paths[i]);
if (paths[i] == watcher->mDir) {
deletedRoot = true;
}
continue;
}
// If the file was modified, and existed before, then this is an update, otherwise a create.
uint64_t ctime = CONVERT_TIME(file.st_birthtimespec);
uint64_t mtime = CONVERT_TIME(file.st_mtimespec);
DirEntry *entry = !since ? state->tree->find(paths[i]) : NULL;
if (entry && entry->mtime == mtime && file.st_mtimespec.tv_nsec != 0) {
continue;
}
// Some mounted file systems report a creation time of 0/unix epoch which we special case.
if (isModified && (entry || (ctime <= since && ctime != 0))) {
state->tree->update(paths[i], mtime);
list.update(paths[i]);
} else {
state->tree->add(paths[i], mtime, S_ISDIR(file.st_mode));
list.create(paths[i]);
}
}
}
if (!since) {
watcher->notify();
}
// Stop watching if the root directory was deleted.
if (deletedRoot) {
stopStream((FSEventStreamRef)streamRef, CFRunLoopGetCurrent());
watcher->state = nullptr;
}
}
void checkWatcher(WatcherRef watcher) {
struct stat file;
if (stat(watcher->mDir.c_str(), &file)) {
throw WatcherError(strerror(errno), watcher);
}
if (!S_ISDIR(file.st_mode)) {
throw WatcherError(strerror(ENOTDIR), watcher);
}
}
void FSEventsBackend::startStream(WatcherRef watcher, FSEventStreamEventId id) {
checkWatcher(watcher);
CFAbsoluteTime latency = 0.001;
CFStringRef fileWatchPath = CFStringCreateWithCString(
NULL,
watcher->mDir.c_str(),
kCFStringEncodingUTF8
);
CFArrayRef pathsToWatch = CFArrayCreate(
NULL,
(const void **)&fileWatchPath,
1,
NULL
);
// Make a watcher reference we can pass into the callback. This ensures bumped ref-count.
std::shared_ptr<Watcher>* callbackWatcher = new std::shared_ptr<Watcher> (watcher);
FSEventStreamContext callbackInfo {0, static_cast<void*> (callbackWatcher), nullptr, nullptr, nullptr};
FSEventStreamRef stream = FSEventStreamCreate(
NULL,
&FSEventsCallback,
&callbackInfo,
pathsToWatch,
id,
latency,
kFSEventStreamCreateFlagFileEvents
);
CFMutableArrayRef exclusions = CFArrayCreateMutable(NULL, watcher->mIgnorePaths.size(), NULL);
for (auto it = watcher->mIgnorePaths.begin(); it != watcher->mIgnorePaths.end(); it++) {
CFStringRef path = CFStringCreateWithCString(
NULL,
it->c_str(),
kCFStringEncodingUTF8
);
CFArrayAppendValue(exclusions, (const void *)path);
}
FSEventStreamSetExclusionPaths(stream, exclusions);
FSEventStreamScheduleWithRunLoop(stream, mRunLoop, kCFRunLoopDefaultMode);
bool started = FSEventStreamStart(stream);
CFRelease(pathsToWatch);
CFRelease(fileWatchPath);
if (!started) {
FSEventStreamRelease(stream);
throw WatcherError("Error starting FSEvents stream", watcher);
}
auto stateGuard = watcher->state;
State* s = static_cast<State*>(stateGuard.get());
s->tree = std::make_shared<DirTree>(watcher->mDir);
s->stream = stream;
}
void FSEventsBackend::start() {
mRunLoop = CFRunLoopGetCurrent();
CFRetain(mRunLoop);
// Unlock once run loop has started.
CFRunLoopPerformBlock(mRunLoop, kCFRunLoopDefaultMode, ^ {
notifyStarted();
});
CFRunLoopWakeUp(mRunLoop);
CFRunLoopRun();
}
FSEventsBackend::~FSEventsBackend() {
std::unique_lock<std::mutex> lock(mMutex);
CFRunLoopStop(mRunLoop);
CFRelease(mRunLoop);
}
void FSEventsBackend::writeSnapshot(WatcherRef watcher, std::string *snapshotPath) {
std::unique_lock<std::mutex> lock(mMutex);
checkWatcher(watcher);
FSEventStreamEventId id = FSEventsGetCurrentEventId();
std::ofstream ofs(*snapshotPath);
ofs << id;
ofs << "\n";
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
ofs << CONVERT_TIME(now);
}
void FSEventsBackend::getEventsSince(WatcherRef watcher, std::string *snapshotPath) {
std::unique_lock<std::mutex> lock(mMutex);
std::ifstream ifs(*snapshotPath);
if (ifs.fail()) {
return;
}
FSEventStreamEventId id;
uint64_t since;
ifs >> id;
ifs >> since;
auto s = std::make_shared<State>();
s->since = since;
watcher->state = s;
startStream(watcher, id);
watcher->wait();
stopStream(s->stream, mRunLoop);
watcher->state = nullptr;
}
// This function is called by Backend::watch which takes a lock on mMutex
void FSEventsBackend::subscribe(WatcherRef watcher) {
auto s = std::make_shared<State>();
s->since = 0;
watcher->state = s;
startStream(watcher, kFSEventStreamEventIdSinceNow);
}
// This function is called by Backend::unwatch which takes a lock on mMutex
void FSEventsBackend::unsubscribe(WatcherRef watcher) {
auto stateGuard = watcher->state;
State* s = static_cast<State*>(stateGuard.get());
if (s != nullptr) {
stopStream(s->stream, mRunLoop);
watcher->state = nullptr;
}
}

View File

@@ -0,0 +1,20 @@
#ifndef FS_EVENTS_H
#define FS_EVENTS_H
#include <CoreServices/CoreServices.h>
#include "../Backend.hh"
class FSEventsBackend : public Backend {
public:
void start() override;
~FSEventsBackend();
void writeSnapshot(WatcherRef watcher, std::string *snapshotPath) override;
void getEventsSince(WatcherRef watcher, std::string *snapshotPath) override;
void subscribe(WatcherRef watcher) override;
void unsubscribe(WatcherRef watcher) override;
private:
void startStream(WatcherRef watcher, FSEventStreamEventId id);
CFRunLoopRef mRunLoop;
};
#endif

View File

@@ -0,0 +1,41 @@
#include <string>
#include "../DirTree.hh"
#include "../Event.hh"
#include "./BruteForceBackend.hh"
std::shared_ptr<DirTree> BruteForceBackend::getTree(WatcherRef watcher, bool shouldRead) {
auto tree = DirTree::getCached(watcher->mDir);
// If the tree is not complete, read it if needed.
if (!tree->isComplete && shouldRead) {
readTree(watcher, tree);
tree->isComplete = true;
}
return tree;
}
void BruteForceBackend::writeSnapshot(WatcherRef watcher, std::string *snapshotPath) {
std::unique_lock<std::mutex> lock(mMutex);
auto tree = getTree(watcher);
FILE *f = fopen(snapshotPath->c_str(), "w");
if (!f) {
throw std::runtime_error(std::string("Unable to open snapshot file: ") + strerror(errno));
}
tree->write(f);
fclose(f);
}
void BruteForceBackend::getEventsSince(WatcherRef watcher, std::string *snapshotPath) {
std::unique_lock<std::mutex> lock(mMutex);
FILE *f = fopen(snapshotPath->c_str(), "r");
if (!f) {
throw std::runtime_error(std::string("Unable to open snapshot file: ") + strerror(errno));
}
DirTree snapshot{watcher->mDir, f};
auto now = getTree(watcher);
now->getChanges(&snapshot, watcher->mEvents);
fclose(f);
}

View File

@@ -0,0 +1,25 @@
#ifndef BRUTE_FORCE_H
#define BRUTE_FORCE_H
#include "../Backend.hh"
#include "../DirTree.hh"
#include "../Watcher.hh"
class BruteForceBackend : public Backend {
public:
void writeSnapshot(WatcherRef watcher, std::string *snapshotPath) override;
void getEventsSince(WatcherRef watcher, std::string *snapshotPath) override;
void subscribe(WatcherRef watcher) override {
throw "Brute force backend doesn't support subscriptions.";
}
void unsubscribe(WatcherRef watcher) override {
throw "Brute force backend doesn't support subscriptions.";
}
std::shared_ptr<DirTree> getTree(WatcherRef watcher, bool shouldRead = true);
private:
void readTree(WatcherRef watcher, std::shared_ptr<DirTree> tree);
};
#endif

50
node_modules/@parcel/watcher/src/unix/fts.cc generated vendored Normal file
View File

@@ -0,0 +1,50 @@
#include <string>
// weird error on linux
#ifdef __THROW
#undef __THROW
#endif
#define __THROW
#include <fts.h>
#include <sys/stat.h>
#include "../DirTree.hh"
#include "../shared/BruteForceBackend.hh"
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
#if __APPLE__
#define st_mtim st_mtimespec
#endif
void BruteForceBackend::readTree(WatcherRef watcher, std::shared_ptr<DirTree> tree) {
char *paths[2] {(char *)watcher->mDir.c_str(), NULL};
FTS *fts = fts_open(paths, FTS_NOCHDIR | FTS_PHYSICAL, NULL);
if (!fts) {
throw WatcherError(strerror(errno), watcher);
}
FTSENT *node;
bool isRoot = true;
while ((node = fts_read(fts)) != NULL) {
if (node->fts_errno) {
fts_close(fts);
throw WatcherError(strerror(node->fts_errno), watcher);
}
if (isRoot && !(node->fts_info & FTS_D)) {
fts_close(fts);
throw WatcherError(strerror(ENOTDIR), watcher);
}
if (watcher->isIgnored(std::string(node->fts_path))) {
fts_set(fts, node, FTS_SKIP);
continue;
}
tree->add(node->fts_path, CONVERT_TIME(node->fts_statp->st_mtim), (node->fts_info & FTS_D) == FTS_D);
isRoot = false;
}
fts_close(fts);
}

77
node_modules/@parcel/watcher/src/unix/legacy.cc generated vendored Normal file
View File

@@ -0,0 +1,77 @@
#include <string>
// weird error on linux
#ifdef __THROW
#undef __THROW
#endif
#define __THROW
#ifdef _LIBC
# include <include/sys/stat.h>
#else
# include <sys/stat.h>
#endif
#include <dirent.h>
#include <unistd.h>
#include <fcntl.h>
#include "../DirTree.hh"
#include "../shared/BruteForceBackend.hh"
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
#if __APPLE__
#define st_mtim st_mtimespec
#endif
#define ISDOT(a) (a[0] == '.' && (!a[1] || (a[1] == '.' && !a[2])))
void iterateDir(WatcherRef watcher, const std::shared_ptr <DirTree> tree, const char *relative, int parent_fd, const std::string &dirname) {
int open_flags = (O_RDONLY | O_CLOEXEC | O_DIRECTORY | O_NOCTTY | O_NONBLOCK | O_NOFOLLOW);
int new_fd = openat(parent_fd, relative, open_flags);
if (new_fd == -1) {
if (errno == EACCES) {
return; // ignore insufficient permissions
}
throw WatcherError(strerror(errno), watcher);
}
struct stat rootAttributes;
fstatat(new_fd, ".", &rootAttributes, AT_SYMLINK_NOFOLLOW);
tree->add(dirname, CONVERT_TIME(rootAttributes.st_mtim), true);
if (DIR *dir = fdopendir(new_fd)) {
while (struct dirent *ent = (errno = 0, readdir(dir))) {
if (ISDOT(ent->d_name)) continue;
std::string fullPath = dirname + "/" + ent->d_name;
if (!watcher->isIgnored(fullPath)) {
struct stat attrib;
fstatat(new_fd, ent->d_name, &attrib, AT_SYMLINK_NOFOLLOW);
bool isDir = ent->d_type == DT_DIR;
if (isDir) {
iterateDir(watcher, tree, ent->d_name, new_fd, fullPath);
} else {
tree->add(fullPath, CONVERT_TIME(attrib.st_mtim), isDir);
}
}
}
closedir(dir);
} else {
close(new_fd);
}
if (errno) {
throw WatcherError(strerror(errno), watcher);
}
}
void BruteForceBackend::readTree(WatcherRef watcher, std::shared_ptr <DirTree> tree) {
int fd = open(watcher->mDir.c_str(), O_RDONLY);
if (fd) {
iterateDir(watcher, tree, ".", fd, watcher->mDir);
close(fd);
}
}

132
node_modules/@parcel/watcher/src/wasm/WasmBackend.cc generated vendored Normal file
View File

@@ -0,0 +1,132 @@
#include <sys/stat.h>
#include "WasmBackend.hh"
#define CONVERT_TIME(ts) ((uint64_t)ts.tv_sec * 1000000000 + ts.tv_nsec)
void WasmBackend::start() {
notifyStarted();
}
void WasmBackend::subscribe(WatcherRef watcher) {
// Build a full directory tree recursively, and watch each directory.
std::shared_ptr<DirTree> tree = getTree(watcher);
for (auto it = tree->entries.begin(); it != tree->entries.end(); it++) {
if (it->second.isDir) {
watchDir(watcher, it->second.path, tree);
}
}
}
void WasmBackend::watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree) {
int wd = wasm_backend_add_watch(path.c_str(), (void *)this);
std::shared_ptr<WasmSubscription> sub = std::make_shared<WasmSubscription>();
sub->tree = tree;
sub->path = path;
sub->watcher = watcher;
mSubscriptions.emplace(wd, sub);
}
extern "C" void wasm_backend_event_handler(void *backend, int wd, int type, char *filename) {
WasmBackend *b = (WasmBackend *)(backend);
b->handleEvent(wd, type, filename);
}
void WasmBackend::handleEvent(int wd, int type, char *filename) {
// Find the subscriptions for this watch descriptor
auto range = mSubscriptions.equal_range(wd);
std::unordered_set<std::shared_ptr<WasmSubscription>> set;
for (auto it = range.first; it != range.second; it++) {
set.insert(it->second);
}
for (auto it = set.begin(); it != set.end(); it++) {
if (handleSubscription(type, filename, *it)) {
(*it)->watcher->notify();
}
}
}
bool WasmBackend::handleSubscription(int type, char *filename, std::shared_ptr<WasmSubscription> sub) {
// Build full path and check if its in our ignore list.
WatcherRef watcher = sub->watcher;
std::string path = std::string(sub->path);
if (filename[0] != '\0') {
path += "/" + std::string(filename);
}
if (watcher->isIgnored(path)) {
return false;
}
if (type == 1) {
struct stat st;
stat(path.c_str(), &st);
sub->tree->update(path, CONVERT_TIME(st.st_mtim));
watcher->mEvents.update(path);
} else if (type == 2) {
// Determine if this is a create or delete depending on if the file exists or not.
struct stat st;
if (lstat(path.c_str(), &st)) {
// If the entry being deleted/moved is a directory, remove it from the list of subscriptions
DirEntry *entry = sub->tree->find(path);
if (!entry) {
return false;
}
if (entry->isDir) {
std::string pathStart = path + DIR_SEP;
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end();) {
if (it->second->path == path || it->second->path.rfind(pathStart, 0) == 0) {
wasm_backend_remove_watch(it->first);
it = mSubscriptions.erase(it);
} else {
++it;
}
}
// Remove all sub-entries
for (auto it = sub->tree->entries.begin(); it != sub->tree->entries.end();) {
if (it->first.rfind(pathStart, 0) == 0) {
watcher->mEvents.remove(it->first);
it = sub->tree->entries.erase(it);
} else {
it++;
}
}
}
watcher->mEvents.remove(path);
sub->tree->remove(path);
} else if (sub->tree->find(path)) {
sub->tree->update(path, CONVERT_TIME(st.st_mtim));
watcher->mEvents.update(path);
} else {
watcher->mEvents.create(path);
// If this is a create, check if it's a directory and start watching if it is.
DirEntry *entry = sub->tree->add(path, CONVERT_TIME(st.st_mtim), S_ISDIR(st.st_mode));
if (entry->isDir) {
watchDir(watcher, path, sub->tree);
}
}
}
return true;
}
void WasmBackend::unsubscribe(WatcherRef watcher) {
// Find any subscriptions pointing to this watcher, and remove them.
for (auto it = mSubscriptions.begin(); it != mSubscriptions.end();) {
if (it->second->watcher.get() == watcher.get()) {
if (mSubscriptions.count(it->first) == 1) {
wasm_backend_remove_watch(it->first);
}
it = mSubscriptions.erase(it);
} else {
it++;
}
}
}

34
node_modules/@parcel/watcher/src/wasm/WasmBackend.hh generated vendored Normal file
View File

@@ -0,0 +1,34 @@
#ifndef WASM_H
#define WASM_H
#include <unordered_map>
#include "../shared/BruteForceBackend.hh"
#include "../DirTree.hh"
extern "C" {
int wasm_backend_add_watch(const char *filename, void *backend);
void wasm_backend_remove_watch(int wd);
void wasm_backend_event_handler(void *backend, int wd, int type, char *filename);
};
struct WasmSubscription {
std::shared_ptr<DirTree> tree;
std::string path;
WatcherRef watcher;
};
class WasmBackend : public BruteForceBackend {
public:
void start() override;
void subscribe(WatcherRef watcher) override;
void unsubscribe(WatcherRef watcher) override;
void handleEvent(int wd, int type, char *filename);
private:
int mWasm;
std::unordered_multimap<int, std::shared_ptr<WasmSubscription>> mSubscriptions;
void watchDir(WatcherRef watcher, std::string path, std::shared_ptr<DirTree> tree);
bool handleSubscription(int type, char *filename, std::shared_ptr<WasmSubscription> sub);
};
#endif

74
node_modules/@parcel/watcher/src/wasm/include.h generated vendored Normal file
View File

@@ -0,0 +1,74 @@
/*
Copyright Node.js contributors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*/
// Node does not include the headers for these functions when compiling for WASM, so add them here.
#ifdef __wasm32__
extern "C" {
NAPI_EXTERN napi_status NAPI_CDECL
napi_create_threadsafe_function(napi_env env,
napi_value func,
napi_value async_resource,
napi_value async_resource_name,
size_t max_queue_size,
size_t initial_thread_count,
void* thread_finalize_data,
napi_finalize thread_finalize_cb,
void* context,
napi_threadsafe_function_call_js call_js_cb,
napi_threadsafe_function* result);
NAPI_EXTERN napi_status NAPI_CDECL napi_get_threadsafe_function_context(
napi_threadsafe_function func, void** result);
NAPI_EXTERN napi_status NAPI_CDECL
napi_call_threadsafe_function(napi_threadsafe_function func,
void* data,
napi_threadsafe_function_call_mode is_blocking);
NAPI_EXTERN napi_status NAPI_CDECL
napi_acquire_threadsafe_function(napi_threadsafe_function func);
NAPI_EXTERN napi_status NAPI_CDECL napi_release_threadsafe_function(
napi_threadsafe_function func, napi_threadsafe_function_release_mode mode);
NAPI_EXTERN napi_status NAPI_CDECL
napi_unref_threadsafe_function(napi_env env, napi_threadsafe_function func);
NAPI_EXTERN napi_status NAPI_CDECL
napi_ref_threadsafe_function(napi_env env, napi_threadsafe_function func);
NAPI_EXTERN napi_status NAPI_CDECL
napi_create_async_work(napi_env env,
napi_value async_resource,
napi_value async_resource_name,
napi_async_execute_callback execute,
napi_async_complete_callback complete,
void* data,
napi_async_work* result);
NAPI_EXTERN napi_status NAPI_CDECL napi_delete_async_work(napi_env env,
napi_async_work work);
NAPI_EXTERN napi_status NAPI_CDECL napi_queue_async_work(napi_env env,
napi_async_work work);
NAPI_EXTERN napi_status NAPI_CDECL napi_cancel_async_work(napi_env env,
napi_async_work work);
}
#endif

302
node_modules/@parcel/watcher/src/watchman/BSER.cc generated vendored Normal file
View File

@@ -0,0 +1,302 @@
#include <stdint.h>
#include "./BSER.hh"
BSERType decodeType(std::istream &iss) {
int8_t type;
iss.read(reinterpret_cast<char*>(&type), sizeof(type));
return (BSERType) type;
}
void expectType(std::istream &iss, BSERType expected) {
BSERType got = decodeType(iss);
if (got != expected) {
throw std::runtime_error("Unexpected BSER type");
}
}
void encodeType(std::ostream &oss, BSERType type) {
int8_t t = (int8_t)type;
oss.write(reinterpret_cast<char*>(&t), sizeof(t));
}
template<typename T>
class Value : public BSERValue {
public:
T value;
Value(T val) {
value = val;
}
Value() {}
};
class BSERInteger : public Value<int64_t> {
public:
BSERInteger(int64_t value) : Value(value) {}
BSERInteger(std::istream &iss) {
int8_t int8;
int16_t int16;
int32_t int32;
int64_t int64;
BSERType type = decodeType(iss);
switch (type) {
case BSER_INT8:
iss.read(reinterpret_cast<char*>(&int8), sizeof(int8));
value = int8;
break;
case BSER_INT16:
iss.read(reinterpret_cast<char*>(&int16), sizeof(int16));
value = int16;
break;
case BSER_INT32:
iss.read(reinterpret_cast<char*>(&int32), sizeof(int32));
value = int32;
break;
case BSER_INT64:
iss.read(reinterpret_cast<char*>(&int64), sizeof(int64));
value = int64;
break;
default:
throw std::runtime_error("Invalid BSER int type");
}
}
int64_t intValue() override {
return value;
}
void encode(std::ostream &oss) override {
if (value <= INT8_MAX) {
encodeType(oss, BSER_INT8);
int8_t v = (int8_t)value;
oss.write(reinterpret_cast<char*>(&v), sizeof(v));
} else if (value <= INT16_MAX) {
encodeType(oss, BSER_INT16);
int16_t v = (int16_t)value;
oss.write(reinterpret_cast<char*>(&v), sizeof(v));
} else if (value <= INT32_MAX) {
encodeType(oss, BSER_INT32);
int32_t v = (int32_t)value;
oss.write(reinterpret_cast<char*>(&v), sizeof(v));
} else {
encodeType(oss, BSER_INT64);
oss.write(reinterpret_cast<char*>(&value), sizeof(value));
}
}
};
class BSERArray : public Value<BSER::Array> {
public:
BSERArray() : Value() {}
BSERArray(BSER::Array value) : Value(value) {}
BSERArray(std::istream &iss) {
expectType(iss, BSER_ARRAY);
int64_t len = BSERInteger(iss).intValue();
for (int64_t i = 0; i < len; i++) {
value.push_back(BSER(iss));
}
}
BSER::Array arrayValue() override {
return value;
}
void encode(std::ostream &oss) override {
encodeType(oss, BSER_ARRAY);
BSERInteger(value.size()).encode(oss);
for (auto it = value.begin(); it != value.end(); it++) {
it->encode(oss);
}
}
};
class BSERString : public Value<std::string> {
public:
BSERString(std::string value) : Value(value) {}
BSERString(std::istream &iss) {
expectType(iss, BSER_STRING);
int64_t len = BSERInteger(iss).intValue();
value.resize(len);
iss.read(&value[0], len);
}
std::string stringValue() override {
return value;
}
void encode(std::ostream &oss) override {
encodeType(oss, BSER_STRING);
BSERInteger(value.size()).encode(oss);
oss << value;
}
};
class BSERObject : public Value<BSER::Object> {
public:
BSERObject() : Value() {}
BSERObject(BSER::Object value) : Value(value) {}
BSERObject(std::istream &iss) {
expectType(iss, BSER_OBJECT);
int64_t len = BSERInteger(iss).intValue();
for (int64_t i = 0; i < len; i++) {
auto key = BSERString(iss).stringValue();
auto val = BSER(iss);
value.emplace(key, val);
}
}
BSER::Object objectValue() override {
return value;
}
void encode(std::ostream &oss) override {
encodeType(oss, BSER_OBJECT);
BSERInteger(value.size()).encode(oss);
for (auto it = value.begin(); it != value.end(); it++) {
BSERString(it->first).encode(oss);
it->second.encode(oss);
}
}
};
class BSERDouble : public Value<double> {
public:
BSERDouble(double value) : Value(value) {}
BSERDouble(std::istream &iss) {
expectType(iss, BSER_REAL);
iss.read(reinterpret_cast<char*>(&value), sizeof(value));
}
double doubleValue() override {
return value;
}
void encode(std::ostream &oss) override {
encodeType(oss, BSER_REAL);
oss.write(reinterpret_cast<char*>(&value), sizeof(value));
}
};
class BSERBoolean : public Value<bool> {
public:
BSERBoolean(bool value) : Value(value) {}
bool boolValue() override { return value; }
void encode(std::ostream &oss) override {
int8_t t = value == true ? BSER_BOOL_TRUE : BSER_BOOL_FALSE;
oss.write(reinterpret_cast<char*>(&t), sizeof(t));
}
};
class BSERNull : public Value<bool> {
public:
BSERNull() : Value(false) {}
void encode(std::ostream &oss) override {
encodeType(oss, BSER_NULL);
}
};
std::shared_ptr<BSERArray> decodeTemplate(std::istream &iss) {
expectType(iss, BSER_TEMPLATE);
auto keys = BSERArray(iss).arrayValue();
auto len = BSERInteger(iss).intValue();
std::shared_ptr<BSERArray> arr = std::make_shared<BSERArray>();
for (int64_t i = 0; i < len; i++) {
BSER::Object obj;
for (auto it = keys.begin(); it != keys.end(); it++) {
if (iss.peek() == 0x0c) {
iss.ignore(1);
continue;
}
auto val = BSER(iss);
obj.emplace(it->stringValue(), val);
}
arr->value.push_back(obj);
}
return arr;
}
BSER::BSER(std::istream &iss) {
BSERType type = decodeType(iss);
iss.unget();
switch (type) {
case BSER_ARRAY:
m_ptr = std::make_shared<BSERArray>(iss);
break;
case BSER_OBJECT:
m_ptr = std::make_shared<BSERObject>(iss);
break;
case BSER_STRING:
m_ptr = std::make_shared<BSERString>(iss);
break;
case BSER_INT8:
case BSER_INT16:
case BSER_INT32:
case BSER_INT64:
m_ptr = std::make_shared<BSERInteger>(iss);
break;
case BSER_REAL:
m_ptr = std::make_shared<BSERDouble>(iss);
break;
case BSER_BOOL_TRUE:
iss.ignore(1);
m_ptr = std::make_shared<BSERBoolean>(true);
break;
case BSER_BOOL_FALSE:
iss.ignore(1);
m_ptr = std::make_shared<BSERBoolean>(false);
break;
case BSER_NULL:
iss.ignore(1);
m_ptr = std::make_shared<BSERNull>();
break;
case BSER_TEMPLATE:
m_ptr = decodeTemplate(iss);
break;
default:
throw std::runtime_error("unknown BSER type");
}
}
BSER::BSER() : m_ptr(std::make_shared<BSERNull>()) {}
BSER::BSER(BSER::Array value) : m_ptr(std::make_shared<BSERArray>(value)) {}
BSER::BSER(BSER::Object value) : m_ptr(std::make_shared<BSERObject>(value)) {}
BSER::BSER(const char *value) : m_ptr(std::make_shared<BSERString>(value)) {}
BSER::BSER(std::string value) : m_ptr(std::make_shared<BSERString>(value)) {}
BSER::BSER(int64_t value) : m_ptr(std::make_shared<BSERInteger>(value)) {}
BSER::BSER(double value) : m_ptr(std::make_shared<BSERDouble>(value)) {}
BSER::BSER(bool value) : m_ptr(std::make_shared<BSERBoolean>(value)) {}
BSER::Array BSER::arrayValue() { return m_ptr->arrayValue(); }
BSER::Object BSER::objectValue() { return m_ptr->objectValue(); }
std::string BSER::stringValue() { return m_ptr->stringValue(); }
int64_t BSER::intValue() { return m_ptr->intValue(); }
double BSER::doubleValue() { return m_ptr->doubleValue(); }
bool BSER::boolValue() { return m_ptr->boolValue(); }
void BSER::encode(std::ostream &oss) {
m_ptr->encode(oss);
}
int64_t BSER::decodeLength(std::istream &iss) {
char pdu[2];
if (!iss.read(pdu, 2) || pdu[0] != 0 || pdu[1] != 1) {
throw std::runtime_error("Invalid BSER");
}
return BSERInteger(iss).intValue();
}
std::string BSER::encode() {
std::ostringstream oss(std::ios_base::binary);
encode(oss);
std::ostringstream res(std::ios_base::binary);
res.write("\x00\x01", 2);
BSERInteger(oss.str().size()).encode(res);
res << oss.str();
return res.str();
}

69
node_modules/@parcel/watcher/src/watchman/BSER.hh generated vendored Normal file
View File

@@ -0,0 +1,69 @@
#ifndef BSER_H
#define BSER_H
#include <string>
#include <sstream>
#include <vector>
#include <unordered_map>
#include <memory>
enum BSERType {
BSER_ARRAY = 0x00,
BSER_OBJECT = 0x01,
BSER_STRING = 0x02,
BSER_INT8 = 0x03,
BSER_INT16 = 0x04,
BSER_INT32 = 0x05,
BSER_INT64 = 0x06,
BSER_REAL = 0x07,
BSER_BOOL_TRUE = 0x08,
BSER_BOOL_FALSE = 0x09,
BSER_NULL = 0x0a,
BSER_TEMPLATE = 0x0b
};
class BSERValue;
class BSER {
public:
typedef std::vector<BSER> Array;
typedef std::unordered_map<std::string, BSER> Object;
BSER();
BSER(BSER::Array value);
BSER(BSER::Object value);
BSER(std::string value);
BSER(const char *value);
BSER(int64_t value);
BSER(double value);
BSER(bool value);
BSER(std::istream &iss);
BSER::Array arrayValue();
BSER::Object objectValue();
std::string stringValue();
int64_t intValue();
double doubleValue();
bool boolValue();
void encode(std::ostream &oss);
static int64_t decodeLength(std::istream &iss);
std::string encode();
private:
std::shared_ptr<BSERValue> m_ptr;
};
class BSERValue {
protected:
friend class BSER;
virtual BSER::Array arrayValue() { return BSER::Array(); }
virtual BSER::Object objectValue() { return BSER::Object(); }
virtual std::string stringValue() { return std::string(); }
virtual int64_t intValue() { return 0; }
virtual double doubleValue() { return 0; }
virtual bool boolValue() { return false; }
virtual void encode(std::ostream &oss) {}
virtual ~BSERValue() {}
};
#endif

175
node_modules/@parcel/watcher/src/watchman/IPC.hh generated vendored Normal file
View File

@@ -0,0 +1,175 @@
#ifndef IPC_H
#define IPC_H
#include <string>
#include <stdlib.h>
#ifdef _WIN32
#include <winsock2.h>
#include <windows.h>
#else
#include <unistd.h>
#include <sys/socket.h>
#include <sys/un.h>
#endif
class IPC {
public:
IPC(std::string path) {
mStopped = false;
#ifdef _WIN32
while (true) {
mPipe = CreateFile(
path.data(), // pipe name
GENERIC_READ | GENERIC_WRITE, // read and write access
0, // no sharing
NULL, // default security attributes
OPEN_EXISTING, // opens existing pipe
FILE_FLAG_OVERLAPPED, // attributes
NULL // no template file
);
if (mPipe != INVALID_HANDLE_VALUE) {
break;
}
if (GetLastError() != ERROR_PIPE_BUSY) {
throw std::runtime_error("Could not open pipe");
}
// Wait for pipe to become available if it is busy
if (!WaitNamedPipe(path.data(), 30000)) {
throw std::runtime_error("Error waiting for pipe");
}
}
mReader = CreateEvent(NULL, true, false, NULL);
mWriter = CreateEvent(NULL, true, false, NULL);
#else
struct sockaddr_un addr;
memset(&addr, 0, sizeof(addr));
addr.sun_family = AF_UNIX;
strncpy(addr.sun_path, path.c_str(), sizeof(addr.sun_path) - 1);
mSock = socket(AF_UNIX, SOCK_STREAM, 0);
if (connect(mSock, (struct sockaddr *) &addr, sizeof(struct sockaddr_un))) {
throw std::runtime_error("Error connecting to socket");
}
#endif
}
~IPC() {
mStopped = true;
#ifdef _WIN32
CancelIo(mPipe);
CloseHandle(mPipe);
CloseHandle(mReader);
CloseHandle(mWriter);
#else
shutdown(mSock, SHUT_RDWR);
#endif
}
void write(std::string buf) {
#ifdef _WIN32
OVERLAPPED overlapped;
overlapped.hEvent = mWriter;
bool success = WriteFile(
mPipe, // pipe handle
buf.data(), // message
buf.size(), // message length
NULL, // bytes written
&overlapped // overlapped
);
if (mStopped) {
return;
}
if (!success) {
if (GetLastError() != ERROR_IO_PENDING) {
throw std::runtime_error("Write error");
}
}
DWORD written;
success = GetOverlappedResult(mPipe, &overlapped, &written, true);
if (!success) {
throw std::runtime_error("GetOverlappedResult failed");
}
if (written != buf.size()) {
throw std::runtime_error("Wrong number of bytes written");
}
#else
int r = 0;
for (unsigned int i = 0; i != buf.size(); i += r) {
r = ::write(mSock, &buf[i], buf.size() - i);
if (r == -1) {
if (errno == EAGAIN) {
r = 0;
} else if (mStopped) {
return;
} else {
throw std::runtime_error("Write error");
}
}
}
#endif
}
int read(char *buf, size_t len) {
#ifdef _WIN32
OVERLAPPED overlapped;
overlapped.hEvent = mReader;
bool success = ReadFile(
mPipe, // pipe handle
buf, // buffer to receive reply
len, // size of buffer
NULL, // number of bytes read
&overlapped // overlapped
);
if (!success && !mStopped) {
if (GetLastError() != ERROR_IO_PENDING) {
throw std::runtime_error("Read error");
}
}
DWORD read = 0;
success = GetOverlappedResult(mPipe, &overlapped, &read, true);
if (!success && !mStopped) {
throw std::runtime_error("GetOverlappedResult failed");
}
return read;
#else
int r = ::read(mSock, buf, len);
if (r == 0 && !mStopped) {
throw std::runtime_error("Socket ended unexpectedly");
}
if (r < 0) {
if (mStopped) {
return 0;
}
throw std::runtime_error(strerror(errno));
}
return r;
#endif
}
private:
bool mStopped;
#ifdef _WIN32
HANDLE mPipe;
HANDLE mReader;
HANDLE mWriter;
#else
int mSock;
#endif
};
#endif

View File

@@ -0,0 +1,338 @@
#include <string>
#include <fstream>
#include <stdlib.h>
#include <algorithm>
#include "../DirTree.hh"
#include "../Event.hh"
#include "./BSER.hh"
#include "./WatchmanBackend.hh"
#ifdef _WIN32
#include "../windows/win_utils.hh"
#define S_ISDIR(mode) ((mode & _S_IFDIR) == _S_IFDIR)
#define popen _popen
#define pclose _pclose
#else
#include <sys/stat.h>
#define normalizePath(dir) dir
#endif
template<typename T>
BSER readBSER(T &&do_read) {
std::stringstream oss;
char buffer[256];
int r;
int64_t len = -1;
do {
// Start by reading a minimal amount of data in order to decode the length.
// After that, attempt to read the remaining length, up to the buffer size.
r = do_read(buffer, len == -1 ? 20 : (len < 256 ? len : 256));
oss << std::string(buffer, r);
if (len == -1) {
uint64_t l = BSER::decodeLength(oss);
len = l + oss.tellg();
}
len -= r;
} while (len > 0);
return BSER(oss);
}
std::string getSockPath() {
auto var = getenv("WATCHMAN_SOCK");
if (var && *var) {
return std::string(var);
}
FILE *fp = popen("watchman --output-encoding=bser get-sockname", "r");
if (fp == NULL || errno == ECHILD) {
throw std::runtime_error("Failed to execute watchman");
}
BSER b = readBSER([fp] (char *buf, size_t len) {
return fread(buf, sizeof(char), len, fp);
});
pclose(fp);
auto objValue = b.objectValue();
auto foundSockname = objValue.find("sockname");
if (foundSockname == objValue.end()) {
throw std::runtime_error("sockname not found");
}
return foundSockname->second.stringValue();
}
std::unique_ptr<IPC> watchmanConnect() {
std::string path = getSockPath();
return std::unique_ptr<IPC>(new IPC(path));
}
BSER watchmanRead(IPC *ipc) {
return readBSER([ipc] (char *buf, size_t len) {
return ipc->read(buf, len);
});
}
BSER::Object WatchmanBackend::watchmanRequest(BSER b) {
std::string cmd = b.encode();
mIPC->write(cmd);
mRequestSignal.notify();
mResponseSignal.wait();
mResponseSignal.reset();
if (!mError.empty()) {
std::runtime_error err = std::runtime_error(mError);
mError = std::string();
throw err;
}
return mResponse;
}
void WatchmanBackend::watchmanWatch(std::string dir) {
std::vector<BSER> cmd;
cmd.push_back("watch");
cmd.push_back(normalizePath(dir));
watchmanRequest(cmd);
}
bool WatchmanBackend::checkAvailable() {
try {
watchmanConnect();
return true;
} catch (std::exception &err) {
return false;
}
}
void handleFiles(WatcherRef watcher, BSER::Object obj) {
auto found = obj.find("files");
if (found == obj.end()) {
throw WatcherError("Error reading changes from watchman", watcher);
}
auto files = found->second.arrayValue();
for (auto it = files.begin(); it != files.end(); it++) {
auto file = it->objectValue();
auto name = file.find("name")->second.stringValue();
#ifdef _WIN32
std::replace(name.begin(), name.end(), '/', '\\');
#endif
auto mode = file.find("mode")->second.intValue();
auto isNew = file.find("new")->second.boolValue();
auto exists = file.find("exists")->second.boolValue();
auto path = watcher->mDir + DIR_SEP + name;
if (watcher->isIgnored(path)) {
continue;
}
if (isNew && exists) {
watcher->mEvents.create(path);
} else if (exists && !S_ISDIR(mode)) {
watcher->mEvents.update(path);
} else if (!isNew && !exists) {
watcher->mEvents.remove(path);
}
}
}
void WatchmanBackend::handleSubscription(BSER::Object obj) {
std::unique_lock<std::mutex> lock(mMutex);
auto subscription = obj.find("subscription")->second.stringValue();
auto it = mSubscriptions.find(subscription);
if (it == mSubscriptions.end()) {
return;
}
auto watcher = it->second;
try {
handleFiles(watcher, obj);
watcher->notify();
} catch (WatcherError &err) {
handleWatcherError(err);
}
}
void WatchmanBackend::start() {
mIPC = watchmanConnect();
notifyStarted();
while (true) {
// If there are no subscriptions we are reading, wait for a request.
if (mSubscriptions.size() == 0) {
mRequestSignal.wait();
mRequestSignal.reset();
}
// Break out of loop if we are stopped.
if (mStopped) {
break;
}
// Attempt to read from the socket.
// If there is an error and we are stopped, break.
BSER b;
try {
b = watchmanRead(&*mIPC);
} catch (std::exception &err) {
if (mStopped) {
break;
} else if (mResponseSignal.isWaiting()) {
mError = err.what();
mResponseSignal.notify();
} else {
// Throwing causes the backend to be destroyed, but we never reach the code below to notify the signal
mEndedSignal.notify();
throw;
}
}
auto obj = b.objectValue();
auto error = obj.find("error");
if (error != obj.end()) {
mError = error->second.stringValue();
mResponseSignal.notify();
continue;
}
// If this message is for a subscription, handle it, otherwise notify the request.
auto subscription = obj.find("subscription");
if (subscription != obj.end()) {
handleSubscription(obj);
} else {
mResponse = obj;
mResponseSignal.notify();
}
}
mEndedSignal.notify();
}
WatchmanBackend::~WatchmanBackend() {
// Mark the watcher as stopped, close the socket, and trigger the lock.
// This will cause the read loop to be broken and the thread to exit.
mStopped = true;
mIPC.reset();
mRequestSignal.notify();
// If not ended yet, wait.
mEndedSignal.wait();
}
std::string WatchmanBackend::clock(WatcherRef watcher) {
BSER::Array cmd;
cmd.push_back("clock");
cmd.push_back(normalizePath(watcher->mDir));
BSER::Object obj = watchmanRequest(cmd);
auto found = obj.find("clock");
if (found == obj.end()) {
throw WatcherError("Error reading clock from watchman", watcher);
}
return found->second.stringValue();
}
void WatchmanBackend::writeSnapshot(WatcherRef watcher, std::string *snapshotPath) {
std::unique_lock<std::mutex> lock(mMutex);
watchmanWatch(watcher->mDir);
std::ofstream ofs(*snapshotPath);
ofs << clock(watcher);
}
void WatchmanBackend::getEventsSince(WatcherRef watcher, std::string *snapshotPath) {
std::unique_lock<std::mutex> lock(mMutex);
std::ifstream ifs(*snapshotPath);
if (ifs.fail()) {
return;
}
watchmanWatch(watcher->mDir);
std::string clock;
ifs >> clock;
BSER::Array cmd;
cmd.push_back("since");
cmd.push_back(normalizePath(watcher->mDir));
cmd.push_back(clock);
BSER::Object obj = watchmanRequest(cmd);
handleFiles(watcher, obj);
}
std::string getId(WatcherRef watcher) {
std::ostringstream id;
id << "parcel-";
id << static_cast<void*>(watcher.get());
return id.str();
}
// This function is called by Backend::watch which takes a lock on mMutex
void WatchmanBackend::subscribe(WatcherRef watcher) {
watchmanWatch(watcher->mDir);
std::string id = getId(watcher);
BSER::Array cmd;
cmd.push_back("subscribe");
cmd.push_back(normalizePath(watcher->mDir));
cmd.push_back(id);
BSER::Array fields;
fields.push_back("name");
fields.push_back("mode");
fields.push_back("exists");
fields.push_back("new");
BSER::Object opts;
opts.emplace("fields", fields);
opts.emplace("since", clock(watcher));
if (watcher->mIgnorePaths.size() > 0) {
BSER::Array ignore;
BSER::Array anyOf;
anyOf.push_back("anyof");
for (auto it = watcher->mIgnorePaths.begin(); it != watcher->mIgnorePaths.end(); it++) {
std::string pathStart = watcher->mDir + DIR_SEP;
if (it->rfind(pathStart, 0) == 0) {
auto relative = it->substr(pathStart.size());
BSER::Array dirname;
dirname.push_back("dirname");
dirname.push_back(relative);
anyOf.push_back(dirname);
}
}
ignore.push_back("not");
ignore.push_back(anyOf);
opts.emplace("expression", ignore);
}
cmd.push_back(opts);
watchmanRequest(cmd);
mSubscriptions.emplace(id, watcher);
mRequestSignal.notify();
}
// This function is called by Backend::unwatch which takes a lock on mMutex
void WatchmanBackend::unsubscribe(WatcherRef watcher) {
std::string id = getId(watcher);
auto erased = mSubscriptions.erase(id);
if (erased) {
BSER::Array cmd;
cmd.push_back("unsubscribe");
cmd.push_back(normalizePath(watcher->mDir));
cmd.push_back(id);
watchmanRequest(cmd);
}
}

View File

@@ -0,0 +1,35 @@
#ifndef WATCHMAN_H
#define WATCHMAN_H
#include "../Backend.hh"
#include "./BSER.hh"
#include "../Signal.hh"
#include "./IPC.hh"
class WatchmanBackend : public Backend {
public:
static bool checkAvailable();
void start() override;
WatchmanBackend() : mStopped(false) {};
~WatchmanBackend();
void writeSnapshot(WatcherRef watcher, std::string *snapshotPath) override;
void getEventsSince(WatcherRef watcher, std::string *snapshotPath) override;
void subscribe(WatcherRef watcher) override;
void unsubscribe(WatcherRef watcher) override;
private:
std::unique_ptr<IPC> mIPC;
Signal mRequestSignal;
Signal mResponseSignal;
BSER::Object mResponse;
std::string mError;
std::unordered_map<std::string, WatcherRef> mSubscriptions;
bool mStopped;
Signal mEndedSignal;
std::string clock(WatcherRef watcher);
void watchmanWatch(std::string dir);
BSER::Object watchmanRequest(BSER cmd);
void handleSubscription(BSER::Object obj);
};
#endif

View File

@@ -0,0 +1,282 @@
#include <string>
#include <stack>
#include "../DirTree.hh"
#include "../shared/BruteForceBackend.hh"
#include "./WindowsBackend.hh"
#include "./win_utils.hh"
#define DEFAULT_BUF_SIZE 1024 * 1024
#define NETWORK_BUF_SIZE 64 * 1024
#define CONVERT_TIME(ft) ULARGE_INTEGER{ft.dwLowDateTime, ft.dwHighDateTime}.QuadPart
void BruteForceBackend::readTree(WatcherRef watcher, std::shared_ptr<DirTree> tree) {
std::stack<std::string> directories;
directories.push(watcher->mDir);
while (!directories.empty()) {
HANDLE hFind = INVALID_HANDLE_VALUE;
std::string path = directories.top();
std::string spec = path + "\\*";
directories.pop();
WIN32_FIND_DATA ffd;
hFind = FindFirstFile(spec.c_str(), &ffd);
if (hFind == INVALID_HANDLE_VALUE) {
if (path == watcher->mDir) {
FindClose(hFind);
throw WatcherError("Error opening directory", watcher);
}
tree->remove(path);
continue;
}
do {
if (strcmp(ffd.cFileName, ".") != 0 && strcmp(ffd.cFileName, "..") != 0) {
std::string fullPath = path + "\\" + ffd.cFileName;
if (watcher->isIgnored(fullPath)) {
continue;
}
tree->add(fullPath, CONVERT_TIME(ffd.ftLastWriteTime), ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
directories.push(fullPath);
}
}
} while (FindNextFile(hFind, &ffd) != 0);
FindClose(hFind);
}
}
void WindowsBackend::start() {
mRunning = true;
notifyStarted();
while (mRunning) {
SleepEx(INFINITE, true);
}
}
WindowsBackend::~WindowsBackend() {
// Mark as stopped, and queue a noop function in the thread to break the loop
mRunning = false;
QueueUserAPC([](__in ULONG_PTR) {}, mThread.native_handle(), (ULONG_PTR)this);
}
class Subscription: public WatcherState {
public:
Subscription(WindowsBackend *backend, WatcherRef watcher, std::shared_ptr<DirTree> tree) {
mRunning = true;
mBackend = backend;
mWatcher = watcher;
mTree = tree;
ZeroMemory(&mOverlapped, sizeof(OVERLAPPED));
mOverlapped.hEvent = this;
mReadBuffer.resize(DEFAULT_BUF_SIZE);
mWriteBuffer.resize(DEFAULT_BUF_SIZE);
mDirectoryHandle = CreateFileW(
utf8ToUtf16(watcher->mDir).data(),
FILE_LIST_DIRECTORY,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL,
OPEN_EXISTING,
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OVERLAPPED,
NULL
);
if (mDirectoryHandle == INVALID_HANDLE_VALUE) {
throw WatcherError("Invalid handle", mWatcher);
}
// Ensure that the path is a directory
BY_HANDLE_FILE_INFORMATION info;
bool success = GetFileInformationByHandle(
mDirectoryHandle,
&info
);
if (!success) {
throw WatcherError("Could not get file information", mWatcher);
}
if (!(info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
throw WatcherError("Not a directory", mWatcher);
}
}
virtual ~Subscription() {
stop();
}
void run() {
try {
poll();
} catch (WatcherError &err) {
mBackend->handleWatcherError(err);
}
}
void stop() {
if (mRunning) {
mRunning = false;
CancelIo(mDirectoryHandle);
CloseHandle(mDirectoryHandle);
}
}
void poll() {
if (!mRunning) {
return;
}
// Asynchronously wait for changes.
int success = ReadDirectoryChangesW(
mDirectoryHandle,
mWriteBuffer.data(),
static_cast<DWORD>(mWriteBuffer.size()),
TRUE, // recursive
FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_ATTRIBUTES
| FILE_NOTIFY_CHANGE_SIZE | FILE_NOTIFY_CHANGE_LAST_WRITE,
NULL,
&mOverlapped,
[](DWORD errorCode, DWORD numBytes, LPOVERLAPPED overlapped) {
auto subscription = reinterpret_cast<Subscription *>(overlapped->hEvent);
try {
subscription->processEvents(errorCode);
} catch (WatcherError &err) {
subscription->mBackend->handleWatcherError(err);
}
}
);
if (!success) {
throw WatcherError("Failed to read changes", mWatcher);
}
}
void processEvents(DWORD errorCode) {
if (!mRunning) {
return;
}
switch (errorCode) {
case ERROR_OPERATION_ABORTED:
return;
case ERROR_INVALID_PARAMETER:
// resize buffers to network size (64kb), and try again
mReadBuffer.resize(NETWORK_BUF_SIZE);
mWriteBuffer.resize(NETWORK_BUF_SIZE);
poll();
return;
case ERROR_NOTIFY_ENUM_DIR:
throw WatcherError("Buffer overflow. Some events may have been lost.", mWatcher);
case ERROR_ACCESS_DENIED: {
// This can happen if the watched directory is deleted. Check if that is the case,
// and if so emit a delete event. Otherwise, fall through to default error case.
DWORD attrs = GetFileAttributesW(utf8ToUtf16(mWatcher->mDir).data());
bool isDir = attrs != INVALID_FILE_ATTRIBUTES && (attrs & FILE_ATTRIBUTE_DIRECTORY);
if (!isDir) {
mWatcher->mEvents.remove(mWatcher->mDir);
mTree->remove(mWatcher->mDir);
mWatcher->notify();
stop();
return;
}
}
default:
if (errorCode != ERROR_SUCCESS) {
throw WatcherError("Unknown error", mWatcher);
}
}
// Swap read and write buffers, and poll again
std::swap(mWriteBuffer, mReadBuffer);
poll();
// Read change events
BYTE *base = mReadBuffer.data();
while (true) {
PFILE_NOTIFY_INFORMATION info = (PFILE_NOTIFY_INFORMATION)base;
processEvent(info);
if (info->NextEntryOffset == 0) {
break;
}
base += info->NextEntryOffset;
}
mWatcher->notify();
}
void processEvent(PFILE_NOTIFY_INFORMATION info) {
std::string path = mWatcher->mDir + "\\" + utf16ToUtf8(info->FileName, info->FileNameLength / sizeof(WCHAR));
if (mWatcher->isIgnored(path)) {
return;
}
switch (info->Action) {
case FILE_ACTION_ADDED:
case FILE_ACTION_RENAMED_NEW_NAME: {
WIN32_FILE_ATTRIBUTE_DATA data;
if (GetFileAttributesExW(utf8ToUtf16(path).data(), GetFileExInfoStandard, &data)) {
mWatcher->mEvents.create(path);
mTree->add(path, CONVERT_TIME(data.ftLastWriteTime), data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
}
break;
}
case FILE_ACTION_MODIFIED: {
WIN32_FILE_ATTRIBUTE_DATA data;
if (GetFileAttributesExW(utf8ToUtf16(path).data(), GetFileExInfoStandard, &data)) {
mTree->update(path, CONVERT_TIME(data.ftLastWriteTime));
if (!(data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY)) {
mWatcher->mEvents.update(path);
}
}
break;
}
case FILE_ACTION_REMOVED:
case FILE_ACTION_RENAMED_OLD_NAME:
mWatcher->mEvents.remove(path);
mTree->remove(path);
break;
}
}
private:
WindowsBackend *mBackend;
std::shared_ptr<Watcher> mWatcher;
std::shared_ptr<DirTree> mTree;
bool mRunning;
HANDLE mDirectoryHandle;
std::vector<BYTE> mReadBuffer;
std::vector<BYTE> mWriteBuffer;
OVERLAPPED mOverlapped;
};
// This function is called by Backend::watch which takes a lock on mMutex
void WindowsBackend::subscribe(WatcherRef watcher) {
// Create a subscription for this watcher
auto sub = std::make_shared<Subscription>(this, watcher, getTree(watcher, false));
watcher->state = sub;
// Queue polling for this subscription in the correct thread.
bool success = QueueUserAPC([](__in ULONG_PTR ptr) {
Subscription *sub = (Subscription *)ptr;
sub->run();
}, mThread.native_handle(), (ULONG_PTR)sub.get());
if (!success) {
throw std::runtime_error("Unable to queue APC");
}
}
// This function is called by Backend::unwatch which takes a lock on mMutex
void WindowsBackend::unsubscribe(WatcherRef watcher) {
watcher->state = nullptr;
}

View File

@@ -0,0 +1,18 @@
#ifndef WINDOWS_H
#define WINDOWS_H
#include <winsock2.h>
#include <windows.h>
#include "../shared/BruteForceBackend.hh"
class WindowsBackend : public BruteForceBackend {
public:
void start() override;
~WindowsBackend();
void subscribe(WatcherRef watcher) override;
void unsubscribe(WatcherRef watcher) override;
private:
bool mRunning;
};
#endif

44
node_modules/@parcel/watcher/src/windows/win_utils.cc generated vendored Normal file
View File

@@ -0,0 +1,44 @@
#include "./win_utils.hh"
std::wstring utf8ToUtf16(std::string input) {
unsigned int len = MultiByteToWideChar(CP_UTF8, 0, input.c_str(), -1, NULL, 0);
WCHAR *output = new WCHAR[len];
MultiByteToWideChar(CP_UTF8, 0, input.c_str(), -1, output, len);
std::wstring res(output);
delete output;
return res;
}
std::string utf16ToUtf8(const WCHAR *input, size_t length) {
unsigned int len = WideCharToMultiByte(CP_UTF8, 0, input, length, NULL, 0, NULL, NULL);
char *output = new char[len + 1];
WideCharToMultiByte(CP_UTF8, 0, input, length, output, len, NULL, NULL);
output[len] = '\0';
std::string res(output);
delete output;
return res;
}
std::string normalizePath(std::string path) {
// Prevent truncation to MAX_PATH characters by adding the \\?\ prefix
std::wstring p = utf8ToUtf16("\\\\?\\" + path);
// Get the required length for the output
unsigned int len = GetLongPathNameW(p.data(), NULL, 0);
if (!len) {
return path;
}
// Allocate output array and get long path
WCHAR *output = new WCHAR[len];
len = GetLongPathNameW(p.data(), output, len);
if (!len) {
delete output;
return path;
}
// Convert back to utf8
std::string res = utf16ToUtf8(output + 4, len - 4);
delete output;
return res;
}

11
node_modules/@parcel/watcher/src/windows/win_utils.hh generated vendored Normal file
View File

@@ -0,0 +1,11 @@
#ifndef WIN_UTILS_H
#define WIN_UTILS_H
#include <string>
#include <windows.h>
std::wstring utf8ToUtf16(std::string input);
std::string utf16ToUtf8(const WCHAR *input, size_t length);
std::string normalizePath(std::string path);
#endif

77
node_modules/@parcel/watcher/wrapper.js generated vendored Normal file
View File

@@ -0,0 +1,77 @@
const path = require('path');
const micromatch = require('micromatch');
const isGlob = require('is-glob');
function normalizeOptions(dir, opts = {}) {
const { ignore, ...rest } = opts;
if (Array.isArray(ignore)) {
opts = { ...rest };
for (const value of ignore) {
if (isGlob(value)) {
if (!opts.ignoreGlobs) {
opts.ignoreGlobs = [];
}
const regex = micromatch.makeRe(value, {
// We set `dot: true` to workaround an issue with the
// regular expression on Linux where the resulting
// negative lookahead `(?!(\\/|^)` was never matching
// in some cases. See also https://bit.ly/3UZlQDm
dot: true,
// C++ does not support lookbehind regex patterns, they
// were only added later to JavaScript engines
// (https://bit.ly/3V7S6UL)
lookbehinds: false
});
opts.ignoreGlobs.push(regex.source);
} else {
if (!opts.ignorePaths) {
opts.ignorePaths = [];
}
opts.ignorePaths.push(path.resolve(dir, value));
}
}
}
return opts;
}
exports.createWrapper = (binding) => {
return {
writeSnapshot(dir, snapshot, opts) {
return binding.writeSnapshot(
path.resolve(dir),
path.resolve(snapshot),
normalizeOptions(dir, opts),
);
},
getEventsSince(dir, snapshot, opts) {
return binding.getEventsSince(
path.resolve(dir),
path.resolve(snapshot),
normalizeOptions(dir, opts),
);
},
async subscribe(dir, fn, opts) {
dir = path.resolve(dir);
opts = normalizeOptions(dir, opts);
await binding.subscribe(dir, fn, opts);
return {
unsubscribe() {
return binding.unsubscribe(dir, fn, opts);
},
};
},
unsubscribe(dir, fn, opts) {
return binding.unsubscribe(
path.resolve(dir),
fn,
normalizeOptions(dir, opts),
);
}
};
};