diff --git a/src/instance-app/jsconfig.json b/src/instance-app/jsconfig.json deleted file mode 100644 index c3886633..00000000 --- a/src/instance-app/jsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "compilerOptions": { - "baseUrl": ".", - "typeRoots": ["./types"] - } -} diff --git a/src/instance-app/pb_hooks/_ph_admin_sync.pb.js b/src/instance-app/pb_hooks/_ph_admin_sync.pb.js new file mode 100644 index 00000000..3e761df1 --- /dev/null +++ b/src/instance-app/pb_hooks/_ph_admin_sync.pb.js @@ -0,0 +1,55 @@ +onAfterBootstrap((e) => { + const dao = $app.dao() + const { mkLog } = /** @type {Lib} */ (require(`${__hooks}/_ph_lib.js`)) + + const log = mkLog(`admin-sync`) + + const { email, tokenKey, passwordHash } = JSON.parse($os.getenv(`ADMIN_SYNC`)) + + if (!email) { + log(`Not active - skipped`) + } + + const result = new DynamicModel({ + // describe the shape of the data (used also as initial values) + id: '', + }) + + try { + dao + .db() + .newQuery('SELECT * from _admins where email = {:email}') + .bind({ email }) + .one(result) + log( + `Existing admin record matching PocketHost login found - updating with latest credentials`, + ) + try { + dao + .db() + .newQuery( + 'update _admins set tokenKey={:tokenKey}, passwordHash={:passwordHash} where email={:email}', + ) + .bind({ email, tokenKey, passwordHash }) + .execute() + log(`Success`) + } catch (e) { + log(`Failed to update admin credentials: ${e}`) + } + } catch (e) { + log(`No admin record matching PocketHost credentials - creating`) + + try { + dao + .db() + .newQuery( + 'insert into _admins (email, tokenKey, passwordHash) VALUES ({:email}, {:tokenKey}, {:passwordHash})', + ) + .bind({ email, tokenKey, passwordHash }) + .execute() + log(`Success`) + } catch (e) { + log(`Failed to insert admin credentials: ${e}`) + } + } +}) diff --git a/src/instance-app/pb_hooks/_ph_lib.js b/src/instance-app/pb_hooks/_ph_lib.js new file mode 100644 index 00000000..cd68fff8 --- /dev/null +++ b/src/instance-app/pb_hooks/_ph_lib.js @@ -0,0 +1,19 @@ +/** @type {Lib['mkLog']} */ +const mkLog = + (namespace) => + /** + * @param {...any} s + * @returns + */ + (...s) => + console.log( + `[${namespace}]`, + ...s.map((p) => { + if (typeof p === 'object') return JSON.stringify(p, null, 2) + return p + }), + ) + +module.exports = { + mkLog, +} diff --git a/src/instance-app/tsconfig.json b/src/instance-app/tsconfig.json new file mode 100644 index 00000000..8b5486ba --- /dev/null +++ b/src/instance-app/tsconfig.json @@ -0,0 +1,8 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "noImplicitAny": false, + "strictNullChecks": false + }, + "include": ["pb_hooks", "types"] +} diff --git a/src/instance-app/types/lib.d.ts b/src/instance-app/types/lib.d.ts new file mode 100644 index 00000000..e96c2cfd --- /dev/null +++ b/src/instance-app/types/lib.d.ts @@ -0,0 +1,5 @@ +type Logger = (...args: any[]) => void + +interface Lib { + mkLog: (namespace: string) => Logger +} diff --git a/src/instance-app/types/types.d.ts b/src/instance-app/types/types.d.ts index cd2a6cf6..c6d65cf7 100644 --- a/src/instance-app/types/types.d.ts +++ b/src/instance-app/types/types.d.ts @@ -1,3 +1,4 @@ +// 1704272575 // GENERATED CODE - DO NOT MODIFY BY HAND // ------------------------------------------------------------------- @@ -14,7 +15,7 @@ * * ```js * // prints "Hello world!" on every 30 minutes - * cronAdd('hello', '*/30 * * * *', () => { + * cronAdd('hello', '*\/30 * * * *', () => { * console.log('Hello world!') * }) * ``` @@ -139,8 +140,19 @@ declare function routerPre( */ declare var __hooks: string -// skip on* hook methods as they are registered via the global on* method -type appWithoutHooks = Omit +// Utility type to exclude the on* hook methods from a type +// (hooks are separately generated as global methods). +// +// See https://www.typescriptlang.org/docs/handbook/2/mapped-types.html#key-remapping-via-as +type excludeHooks = { + [Property in keyof Type as Exclude]: Type[Property] +} + +// CoreApp without the on* hook methods +type CoreApp = excludeHooks + +// PocketBase without the on* hook methods +type PocketBase = excludeHooks /** * `$app` is the current running PocketBase instance that is globally available @@ -151,7 +163,7 @@ type appWithoutHooks = Omit * @namespace * @group PocketBase */ -declare var $app: appWithoutHooks +declare var $app: PocketBase /** * `$template` is a global helper to load and cache HTML templates on the fly. @@ -193,6 +205,20 @@ declare var $template: template.Registry */ declare function readerToString(reader: any, maxBytes?: number): string +/** + * Sleep pauses the current goroutine for at least the specified user duration + * (in ms). A zero or negative duration returns immediately. + * + * Example: + * + * ```js + * sleep(250) // sleeps for 250ms + * ``` + * + * @group PocketBase + */ +declare function sleep(milliseconds: number): void + /** * ArrayOf creates a placeholder array of the specified models. Usually used to * populate DB result into an array of models. @@ -345,7 +371,7 @@ interface MailerMessage extends mailer.Message {} // merge * @group PocketBase */ declare class MailerMessage implements mailer.Message { - constructor(message?: Partial) + constructor(message?: Partial) } interface Command extends cobra.Command {} // merge @@ -429,7 +455,7 @@ declare class DateTime implements types.DateTime { interface ValidationError extends ozzo_validation.Error {} // merge /** * ValidationError defines a single formatted data validation error, usually - * used as part of a error response. + * used as part of an error response. * * ```js * new ValidationError('invalid_title', 'Title is not valid') @@ -450,6 +476,61 @@ declare class Dao implements daos.Dao { constructor(concurrentDB?: dbx.Builder, nonconcurrentDB?: dbx.Builder) } +interface Cookie extends http.Cookie {} // merge +/** + * A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an + * HTTP response. + * + * Example: + * + * ```js + * routerAdd('POST', '/example', (c) => { + * c.setCookie( + * new Cookie({ + * name: 'example_name', + * value: 'example_value', + * path: '/', + * domain: 'example.com', + * maxAge: 10, + * secure: true, + * httpOnly: true, + * sameSite: 3, + * }), + * ) + * + * return c.redirect(200, '/') + * }) + * ``` + * + * @group PocketBase + */ +declare class Cookie implements http.Cookie { + constructor(options?: Partial) +} + +interface SubscriptionMessage extends subscriptions.Message {} // merge +/** + * SubscriptionMessage defines a realtime subscription payload. + * + * Example: + * + * ```js + * onRealtimeConnectRequest((e) => { + * e.client.send( + * new SubscriptionMessage({ + * name: 'example', + * data: '{"greeting": "Hello world"}', + * }), + * ) + * }) + * ``` + * + * @group PocketBase + */ +declare class SubscriptionMessage implements subscriptions.Message { + constructor(options?: Partial) +} + // ------------------------------------------------------------------- // dbxBinds // ------------------------------------------------------------------- @@ -506,6 +587,23 @@ declare namespace $tokens { let recordFileToken: tokens.newRecordFileToken } +// ------------------------------------------------------------------- +// mailsBinds +// ------------------------------------------------------------------- + +/** + * `$mails` defines helpers to send common admins and auth records emails like + * verification, password reset, etc. + * + * @group PocketBase + */ +declare namespace $mails { + let sendAdminPasswordReset: mails.sendAdminPasswordReset + let sendRecordPasswordReset: mails.sendRecordPasswordReset + let sendRecordVerification: mails.sendRecordVerification + let sendRecordChangeEmail: mails.sendRecordChangeEmail +} + // ------------------------------------------------------------------- // securityBinds // ------------------------------------------------------------------- @@ -521,9 +619,6 @@ declare namespace $security { let randomStringWithAlphabet: security.randomStringWithAlphabet let pseudorandomString: security.pseudorandomString let pseudorandomStringWithAlphabet: security.pseudorandomStringWithAlphabet - let parseUnverifiedJWT: security.parseUnverifiedJWT - let parseJWT: security.parseJWT - let createJWT: security.newJWT let encrypt: security.encrypt let decrypt: security.decrypt let hs256: security.hs256 @@ -532,6 +627,13 @@ declare namespace $security { let md5: security.md5 let sha256: security.sha256 let sha512: security.sha512 + let createJWT: security.newJWT + + /** {@inheritDoc security.parseUnverifiedJWT} */ + export function parseUnverifiedJWT(token: string): _TygojaDict + + /** {@inheritDoc security.parseJWT} */ + export function parseJWT(token: string, verificationKey: string): _TygojaDict } // ------------------------------------------------------------------- @@ -589,7 +691,24 @@ declare namespace $filepath { * @group PocketBase */ declare namespace $os { + /** Legacy alias for $os.cmd(). */ export let exec: exec.command + + /** + * Prepares an external OS command. + * + * Example: + * + * ```js + * // prepare the command to execute + * const cmd = $os.cmd('ls', '-sl') + * + * // execute the command and return its standard output as string + * const output = String.fromCharCode(...cmd.output()) + * ``` + */ + export let cmd: exec.command + export let args: os.args export let exit: os.exit export let getenv: os.getenv @@ -617,7 +736,7 @@ interface AdminLoginForm extends forms.AdminLogin {} // merge * @group PocketBase */ declare class AdminLoginForm implements forms.AdminLogin { - constructor(app: core.App) + constructor(app: CoreApp) } interface AdminPasswordResetConfirmForm @@ -629,7 +748,7 @@ interface AdminPasswordResetConfirmForm declare class AdminPasswordResetConfirmForm implements forms.AdminPasswordResetConfirm { - constructor(app: core.App) + constructor(app: CoreApp) } interface AdminPasswordResetRequestForm @@ -641,7 +760,7 @@ interface AdminPasswordResetRequestForm declare class AdminPasswordResetRequestForm implements forms.AdminPasswordResetRequest { - constructor(app: core.App) + constructor(app: CoreApp) } interface AdminUpsertForm extends forms.AdminUpsert {} // merge @@ -650,7 +769,7 @@ interface AdminUpsertForm extends forms.AdminUpsert {} // merge * @group PocketBase */ declare class AdminUpsertForm implements forms.AdminUpsert { - constructor(app: core.App, admin: models.Admin) + constructor(app: CoreApp, admin: models.Admin) } interface AppleClientSecretCreateForm extends forms.AppleClientSecretCreate {} // merge @@ -661,7 +780,7 @@ interface AppleClientSecretCreateForm extends forms.AppleClientSecretCreate {} / declare class AppleClientSecretCreateForm implements forms.AppleClientSecretCreate { - constructor(app: core.App) + constructor(app: CoreApp) } interface CollectionUpsertForm extends forms.CollectionUpsert {} // merge @@ -670,7 +789,7 @@ interface CollectionUpsertForm extends forms.CollectionUpsert {} // merge * @group PocketBase */ declare class CollectionUpsertForm implements forms.CollectionUpsert { - constructor(app: core.App, collection: models.Collection) + constructor(app: CoreApp, collection: models.Collection) } interface CollectionsImportForm extends forms.CollectionsImport {} // merge @@ -679,7 +798,7 @@ interface CollectionsImportForm extends forms.CollectionsImport {} // merge * @group PocketBase */ declare class CollectionsImportForm implements forms.CollectionsImport { - constructor(app: core.App) + constructor(app: CoreApp) } interface RealtimeSubscribeForm extends forms.RealtimeSubscribe {} // merge @@ -697,7 +816,7 @@ interface RecordEmailChangeConfirmForm extends forms.RecordEmailChangeConfirm {} declare class RecordEmailChangeConfirmForm implements forms.RecordEmailChangeConfirm { - constructor(app: core.App, collection: models.Collection) + constructor(app: CoreApp, collection: models.Collection) } interface RecordEmailChangeRequestForm extends forms.RecordEmailChangeRequest {} // merge @@ -708,7 +827,7 @@ interface RecordEmailChangeRequestForm extends forms.RecordEmailChangeRequest {} declare class RecordEmailChangeRequestForm implements forms.RecordEmailChangeRequest { - constructor(app: core.App, record: models.Record) + constructor(app: CoreApp, record: models.Record) } interface RecordOAuth2LoginForm extends forms.RecordOAuth2Login {} // merge @@ -718,7 +837,7 @@ interface RecordOAuth2LoginForm extends forms.RecordOAuth2Login {} // merge */ declare class RecordOAuth2LoginForm implements forms.RecordOAuth2Login { constructor( - app: core.App, + app: CoreApp, collection: models.Collection, optAuthRecord?: models.Record, ) @@ -730,7 +849,7 @@ interface RecordPasswordLoginForm extends forms.RecordPasswordLogin {} // merge * @group PocketBase */ declare class RecordPasswordLoginForm implements forms.RecordPasswordLogin { - constructor(app: core.App, collection: models.Collection) + constructor(app: CoreApp, collection: models.Collection) } interface RecordPasswordResetConfirmForm @@ -742,7 +861,7 @@ interface RecordPasswordResetConfirmForm declare class RecordPasswordResetConfirmForm implements forms.RecordPasswordResetConfirm { - constructor(app: core.App, collection: models.Collection) + constructor(app: CoreApp, collection: models.Collection) } interface RecordPasswordResetRequestForm @@ -754,7 +873,7 @@ interface RecordPasswordResetRequestForm declare class RecordPasswordResetRequestForm implements forms.RecordPasswordResetRequest { - constructor(app: core.App, collection: models.Collection) + constructor(app: CoreApp, collection: models.Collection) } interface RecordUpsertForm extends forms.RecordUpsert {} // merge @@ -763,7 +882,7 @@ interface RecordUpsertForm extends forms.RecordUpsert {} // merge * @group PocketBase */ declare class RecordUpsertForm implements forms.RecordUpsert { - constructor(app: core.App, record: models.Record) + constructor(app: CoreApp, record: models.Record) } interface RecordVerificationConfirmForm @@ -775,7 +894,7 @@ interface RecordVerificationConfirmForm declare class RecordVerificationConfirmForm implements forms.RecordVerificationConfirm { - constructor(app: core.App, collection: models.Collection) + constructor(app: CoreApp, collection: models.Collection) } interface RecordVerificationRequestForm @@ -787,7 +906,7 @@ interface RecordVerificationRequestForm declare class RecordVerificationRequestForm implements forms.RecordVerificationRequest { - constructor(app: core.App, collection: models.Collection) + constructor(app: CoreApp, collection: models.Collection) } interface SettingsUpsertForm extends forms.SettingsUpsert {} // merge @@ -796,7 +915,7 @@ interface SettingsUpsertForm extends forms.SettingsUpsert {} // merge * @group PocketBase */ declare class SettingsUpsertForm implements forms.SettingsUpsert { - constructor(app: core.App) + constructor(app: CoreApp) } interface TestEmailSendForm extends forms.TestEmailSend {} // merge @@ -805,7 +924,7 @@ interface TestEmailSendForm extends forms.TestEmailSend {} // merge * @group PocketBase */ declare class TestEmailSendForm implements forms.TestEmailSend { - constructor(app: core.App) + constructor(app: CoreApp) } interface TestS3FilesystemForm extends forms.TestS3Filesystem {} // merge @@ -814,7 +933,7 @@ interface TestS3FilesystemForm extends forms.TestS3Filesystem {} // merge * @group PocketBase */ declare class TestS3FilesystemForm implements forms.TestS3Filesystem { - constructor(app: core.App) + constructor(app: CoreApp) } // ------------------------------------------------------------------- @@ -888,6 +1007,7 @@ declare namespace $apis { indexFallback: boolean, ): echo.HandlerFunc + let requireGuestOnly: apis.requireGuestOnly let requireRecordAuth: apis.requireRecordAuth let requireAdminAuth: apis.requireAdminAuth let requireAdminAuthOnlyIfAny: apis.requireAdminAuthOnlyIfAny @@ -1441,6 +1561,7 @@ namespace os { (): Array } interface timeout { + [key: string]: any timeout(): boolean } /** PathError records an error and the operation and file path that caused it. */ @@ -1558,6 +1679,7 @@ namespace os { * syscall.Signal. */ interface Signal { + [key: string]: any string(): string signal(): void // to distinguish from other Stringers } @@ -1577,9 +1699,11 @@ namespace os { * underlying operating system process. * * On Unix systems, FindProcess always succeeds and returns a Process for - * the given pid, regardless of whether the process exists. + * the given pid, regardless of whether the process exists. To test whether + * the process actually exists, see whether p.Signal(syscall.Signal(0)) + * reports an error. */ - (pid: number): Process | undefined + (pid: number): Process } interface startProcess { /** @@ -1597,7 +1721,7 @@ namespace os { * * If there is an error, it will be of type *PathError. */ - (name: string, argv: Array, attr: ProcAttr): Process | undefined + (name: string, argv: Array, attr: ProcAttr): Process } interface Process { /** @@ -1622,7 +1746,7 @@ namespace os { * associated with the Process. On most operating systems, the Process must * be a child of the current process or an error will be returned. */ - wait(): ProcessState | undefined + wait(): ProcessState } interface Process { /** @@ -1735,7 +1859,7 @@ namespace os { * returns the number of bytes read and any error encountered. At end of * file, Read returns 0, io.EOF. */ - read(b: string): number + read(b: string | Array): number } interface File { /** @@ -1744,21 +1868,30 @@ namespace os { * returns a non-nil error when n < len(b). At end of file, that error is * io.EOF. */ - readAt(b: string, off: number): number + readAt(b: string | Array, off: number): number } interface File { /** ReadFrom implements io.ReaderFrom. */ readFrom(r: io.Reader): number } - type _subFPBxR = io.Writer - interface onlyWriter extends _subFPBxR {} + /** + * FileWithoutReadFrom implements all the methods of *File other than + * ReadFrom. This is used to permit ReadFrom to call io.Copy without leading + * to a recursive call to ReadFrom. + */ + type _subgUszO = File + interface fileWithoutReadFrom extends _subgUszO {} + interface fileWithoutReadFrom { + /** This ReadFrom method hides the *File ReadFrom method. */ + readFrom(_arg0: fileWithoutReadFrom): void + } interface File { /** * Write writes len(b) bytes from b to the File. It returns the number of * bytes written and an error, if any. Write returns a non-nil error when n * != len(b). */ - write(b: string): number + write(b: string | Array): number } interface File { /** @@ -1768,7 +1901,7 @@ namespace os { * * If file was opened with the O_APPEND flag, WriteAt returns an error. */ - writeAt(b: string, off: number): number + writeAt(b: string | Array, off: number): number } interface File { /** @@ -1777,10 +1910,6 @@ namespace os { * file, 1 means relative to the current offset, and 2 means relative to the * end. It returns the new offset and an error, if any. The behavior of Seek * on a file opened with O_APPEND is not specified. - * - * If f is a directory, the behavior of Seek varies by operating system; you - * can seek to the beginning of the directory on Unix-like operating - * systems, but not on Windows. */ seek(offset: number, whence: number): number } @@ -1811,7 +1940,7 @@ namespace os { * returned file can be used for reading; the associated file descriptor has * mode O_RDONLY. If there is an error, it will be of type *PathError. */ - (name: string): File | undefined + (name: string): File } interface create { /** @@ -1821,7 +1950,7 @@ namespace os { * for I/O; the associated file descriptor has mode O_RDWR. If there is an * error, it will be of type *PathError. */ - (name: string): File | undefined + (name: string): File } interface openFile { /** @@ -1832,14 +1961,15 @@ namespace os { * File can be used for I/O. If there is an error, it will be of type * *PathError. */ - (name: string, flag: number, perm: FileMode): File | undefined + (name: string, flag: number, perm: FileMode): File } interface rename { /** * Rename renames (moves) oldpath to newpath. If newpath already exists and * is not a directory, Rename replaces it. OS-specific restrictions may - * apply when oldpath and newpath are in different directories. If there is - * an error, it will be of type *LinkError. + * apply when oldpath and newpath are in different directories. Even within + * the same directory, on non-Unix platforms Rename is not an atomic + * operation. If there is an error, it will be of type *LinkError. */ (oldpath: string): void } @@ -1898,6 +2028,9 @@ namespace os { * On Unix, including macOS, it returns the $HOME environment variable. On * Windows, it returns %USERPROFILE%. On Plan 9, it returns the $home * environment variable. + * + * If the expected variable is not set in the environment, UserHomeDir + * returns either a platform-specific default value or a non-nil error. */ (): string } @@ -1995,10 +2128,15 @@ namespace os { * DirFS("/prefix").Open("file") is the same as os.Open("/prefix/file"). So * if /prefix/file is a symbolic link pointing outside the /prefix tree, * then using DirFS does not stop the access any more than using os.Open - * does. DirFS is therefore not a general substitute for a chroot-style - * security mechanism when the directory tree contains arbitrary content. + * does. Additionally, the root of the fs.FS returned for a relative path, + * DirFS("prefix"), will be affected by later calls to Chdir. DirFS is + * therefore not a general substitute for a chroot-style security mechanism + * when the directory tree contains arbitrary content. * * The directory dir must not be "". + * + * The result implements [io/fs.StatFS], [io/fs.ReadFileFS] and + * [io/fs.ReadDirFS]. */ (dir: string): fs.FS } @@ -2006,6 +2144,23 @@ namespace os { interface dirFS { open(name: string): fs.File } + interface dirFS { + /** + * The ReadFile method calls the [ReadFile] function for the file with the + * given name in the directory. The function provides robust handling for + * small files and special file systems. Through this method, dirFS + * implements [io/fs.ReadFileFS]. + */ + readFile(name: string): string | Array + } + interface dirFS { + /** + * ReadDir reads the named directory, returning all its directory entries + * sorted by filename. Through this method, dirFS implements + * [io/fs.ReadDirFS]. + */ + readDir(name: string): Array + } interface dirFS { stat(name: string): fs.FileInfo } @@ -2015,16 +2170,17 @@ namespace os { * returns err == nil, not err == EOF. Because ReadFile reads the whole * file, it does not treat an EOF from Read as an error to be reported. */ - (name: string): string + (name: string): string | Array } interface writeFile { /** * WriteFile writes data to the named file, creating it if necessary. If the * file does not exist, WriteFile creates it with permissions perm (before * umask); otherwise WriteFile truncates it before writing, without changing - * permissions. + * permissions. Since WriteFile requires multiple system calls to complete, + * a failure mid-operation can leave the file in a partially written state. */ - (name: string, data: string, perm: FileMode): void + (name: string, data: string | Array, perm: FileMode): void } interface File { /** @@ -2086,7 +2242,8 @@ namespace os { interface chtimes { /** * Chtimes changes the access and modification times of the named file, - * similar to the Unix utime() or utimes() functions. + * similar to the Unix utime() or utimes() functions. A zero time.Time value + * will leave the corresponding file time unchanged. * * The underlying filesystem may truncate or round the values to a less * precise time unit. If there is an error, it will be of type *PathError. @@ -2135,7 +2292,7 @@ namespace os { * conditions described in the comments of the Fd method, and the same * constraints apply. */ - (fd: number, name: string): File | undefined + (fd: number, name: string): File } /** NewFileKind describes the kind of file to newFile. */ interface newFileKind extends Number {} @@ -2190,6 +2347,9 @@ namespace os { interface unixDirent { info(): FileInfo } + interface unixDirent { + string(): string + } interface getwd { /** * Getwd returns a rooted path name corresponding to the current directory. @@ -2225,7 +2385,7 @@ namespace os { * Pipe returns a connected pair of Files; reads from r return bytes written * to w. It returns the files and an error, if any. */ - (): [File | undefined, File | undefined] + (): [File, File] } interface getuid { /** @@ -2330,7 +2490,7 @@ namespace os { * It is the caller's responsibility to remove the file when it is no longer * needed. */ - (dir: string): File | undefined + (dir: string): File } interface mkdirTemp { /** @@ -2351,8 +2511,8 @@ namespace os { (): number } /** File represents an open file descriptor. */ - type _subtNQMD = file - interface File extends _subtNQMD {} + type _subGXkEd = file + interface File extends _subGXkEd {} /** A FileInfo describes a file and is returned by Stat and Lstat. */ interface FileInfo extends fs.FileInfo {} /** @@ -2400,7 +2560,7 @@ namespace os { * * The filepath package uses either forward slashes or backslashes, depending on * the operating system. To process paths such as URLs that always use forward - * slashes regardless of the operating system, see the path package. + * slashes regardless of the operating system, see the [path] package. */ namespace filepath { interface match { @@ -2427,7 +2587,7 @@ namespace filepath { * only possible returned error is ErrBadPattern, when pattern is * malformed. * - * On Windows, escaping is disabled. Instead, '\' is treated as path + * On Windows, escaping is disabled. Instead, '' is treated as path * separator. */ (pattern: string): boolean @@ -2436,7 +2596,7 @@ namespace filepath { /** * Glob returns the names of all files matching pattern or nil if there is * no matching file. The syntax of patterns is the same as in Match. The - * pattern may describe hierarchical names such as /usr/*/bin/ed (assuming + * pattern may describe hierarchical names such as /usr/*\/bin/ed (assuming * the Separator is '/'). * * Glob ignores file system errors such as I/O errors reading directories. @@ -2457,13 +2617,16 @@ namespace filepath { * processing. It applies the following rules iteratively until no further * processing can be done: * - * 1. Replace multiple Separator elements with a single one. - * 2. Eliminate each . path name element (the current directory). - * 3. Eliminate each inner .. path name element (the parent directory) - * along with the non-.. element that precedes it. - * 4. Eliminate .. elements that begin a rooted path: - * that is, replace "/.." by "/" at the beginning of a path, - * assuming Separator is '/'. + * 1. Replace multiple Separator elements with a single one. + * 2. Eliminate each . path name element (the current directory). + * 3. Eliminate each inner .. path name element (the parent directory) + * + * along with the non-.. element that precedes it. + * 4. Eliminate .. elements that begin a rooted path: + * + * that is, replace "/.." by "/" at the beginning of a path, + * + * Assuming Separator is '/'. * * The returned path ends in a slash only if it represents a root directory, * such as "/" on Unix or `C:\` on Windows. @@ -2473,11 +2636,34 @@ namespace filepath { * If the result of this process is an empty string, Clean returns the * string ".". * - * See also Rob Pike, ``Lexical File Names in Plan 9 or Getting Dot-Dot - * Right,'' https://9p.io/sys/doc/lexnames.html + * On Windows, Clean does not modify the volume name other than to replace + * occurrences of "/" with `\`. For example, Clean("//host/share/../x") + * returns `\\host\share\x`. + * + * See also Rob Pike, “Lexical File Names in Plan 9 or Getting Dot-Dot + * Right,” https://9p.io/sys/doc/lexnames.html */ (path: string): string } + interface isLocal { + /** + * IsLocal reports whether path, using lexical analysis only, has all of + * these properties: + * + * - is within the subtree rooted at the directory in which path is evaluated + * - is not an absolute path + * - is not empty + * - on Windows, is not a reserved name such as "NUL" + * + * If IsLocal(path) returns true, then Join(base, path) will always produce + * a path contained within base and Clean(path) will always produce an + * unrooted path with no ".." path elements. + * + * IsLocal is a purely lexical operation. In particular, it does not account + * for the effect of any symbolic links that may exist in the filesystem. + */ + (path: string): boolean + } interface toSlash { /** * ToSlash returns the result of replacing each separator character in path @@ -2578,8 +2764,9 @@ namespace filepath { * The error result returned by the function controls how Walk continues. If * the function returns the special value SkipDir, Walk skips the current * directory (path if info.IsDir() is true, otherwise path's parent - * directory). Otherwise, if the function returns a non-nil error, Walk stops - * entirely and returns that error. + * directory). If the function returns the special value SkipAll, Walk skips + * all remaining files and directories. Otherwise, if the function returns a + * non-nil error, Walk stops entirely and returns that error. * * The err argument reports an error related to path, signaling that Walk will * not walk into that directory. The function can decide how to handle that @@ -2612,6 +2799,10 @@ namespace filepath { * memory before proceeding to walk that directory. * * WalkDir does not follow symbolic links. + * + * WalkDir calls fn with paths that use the separator character appropriate + * for the operating system. This is unlike [io/fs.WalkDir], which always + * uses slash separated paths. */ (root: string, fn: fs.WalkDirFunc): void } @@ -2628,6 +2819,9 @@ namespace filepath { interface statDirEntry { info(): fs.FileInfo } + interface statDirEntry { + string(): string + } interface walk { /** * Walk walks the file tree rooted at root, calling fn for each file or @@ -2690,22 +2884,6 @@ namespace filepath { } } -/** - * Package validation provides configurable and extensible rules for validating - * data of various types. - */ -namespace ozzo_validation { - /** Error interface represents an validation error */ - interface Error { - error(): string - code(): string - message(): string - setMessage(_arg0: string): Error - params(): _TygojaDict - setParams(_arg0: _TygojaDict): Error - } -} - namespace security { interface s256Challenge { /** @@ -2754,30 +2932,30 @@ namespace security { import crand = rand interface encrypt { /** Encrypt encrypts data with key (must be valid 32 char aes key). */ - (data: string, key: string): string + (data: string | Array, key: string): string } interface decrypt { /** * Decrypt decrypts encrypted text with key (must be valid 32 chars aes * key). */ - (cipherText: string, key: string): string + (cipherText: string, key: string): string | Array } interface parseUnverifiedJWT { /** - * ParseUnverifiedJWT parses JWT token and returns its claims but DOES NOT - * verify the signature. + * ParseUnverifiedJWT parses JWT and returns its claims but DOES NOT verify + * the signature. * * It verifies only the exp, iat and nbf claims. */ (token: string): jwt.MapClaims } interface parseJWT { - /** ParseJWT verifies and parses JWT token and returns its claims. */ + /** ParseJWT verifies and parses JWT and returns its claims. */ (token: string, verificationKey: string): jwt.MapClaims } interface newJWT { - /** NewJWT generates and returns new HS256 signed JWT token. */ + /** NewJWT generates and returns new HS256 signed JWT. */ ( payload: jwt.MapClaims, signingKey: string, @@ -2788,8 +2966,7 @@ namespace security { /** * Deprecated: Consider replacing with NewJWT(). * - * NewToken is a legacy alias for NewJWT that generates a HS256 signed JWT - * token. + * NewToken is a legacy alias for NewJWT that generates a HS256 signed JWT. */ ( payload: jwt.MapClaims, @@ -2845,6 +3022,23 @@ namespace security { } } +/** + * Package validation provides configurable and extensible rules for validating + * data of various types. + */ +namespace ozzo_validation { + /** Error interface represents an validation error */ + interface Error { + [key: string]: any + error(): string + code(): string + message(): string + setMessage(_arg0: string): Error + params(): _TygojaDict + setParams(_arg0: _TygojaDict): Error + } +} + /** * Package dbx provides a set of DB-agnostic and easy-to-use query building * methods for relational databases. @@ -2857,26 +3051,27 @@ namespace dbx { * statements, CREATE TABLE statements). */ interface Builder { + [key: string]: any /** * NewQuery creates a new Query object with the given SQL statement. The SQL * statement may contain parameter placeholders which can be bound with * actual parameter values before the statement is executed. */ - newQuery(_arg0: string): Query | undefined + newQuery(_arg0: string): Query /** * Select returns a new SelectQuery object that can be used to build a * SELECT statement. The parameters to this method should be the list column * names to be selected. A column name may have an optional alias name. For * example, Select("id", "my_name AS name"). */ - select(..._arg0: string[]): SelectQuery | undefined + select(..._arg0: string[]): SelectQuery /** * ModelQuery returns a new ModelQuery object that can be used to perform * model insertion, update, and deletion. The parameter to this method * should be a pointer to the model struct that needs to be inserted, * updated, or deleted. */ - model(_arg0: {}): ModelQuery | undefined + model(_arg0: {}): ModelQuery /** * GeneratePlaceholder generates an anonymous parameter placeholder with the * given parameter ID. @@ -2904,7 +3099,7 @@ namespace dbx { * of cols are the column names, while the values of cols are the * corresponding column values to be inserted. */ - insert(table: string, cols: Params): Query | undefined + insert(table: string, cols: Params): Query /** * Upsert creates a Query that represents an UPSERT SQL statement. Upsert * inserts a row into the table if the primary key or unique index is not @@ -2912,11 +3107,7 @@ namespace dbx { * cols are the column names, while the values of cols are the corresponding * column values to be inserted. */ - upsert( - table: string, - cols: Params, - ...constraints: string[] - ): Query | undefined + upsert(table: string, cols: Params, ...constraints: string[]): Query /** * Update creates a Query that represents an UPDATE SQL statement. The keys * of cols are the column names, while the values of cols are the @@ -2924,59 +3115,55 @@ namespace dbx { * UPDATE SQL statement will have no WHERE clause (be careful in this case * as the SQL statement will update ALL rows in the table). */ - update(table: string, cols: Params, where: Expression): Query | undefined + update(table: string, cols: Params, where: Expression): Query /** * Delete creates a Query that represents a DELETE SQL statement. If the * "where" expression is nil, the DELETE SQL statement will have no WHERE * clause (be careful in this case as the SQL statement will delete ALL rows * in the table). */ - delete(table: string, where: Expression): Query | undefined + delete(table: string, where: Expression): Query /** * CreateTable creates a Query that represents a CREATE TABLE SQL statement. * The keys of cols are the column names, while the values of cols are the * corresponding column types. The optional "options" parameters will be * appended to the generated SQL statement. */ - createTable( - table: string, - cols: _TygojaDict, - ...options: string[] - ): Query | undefined + createTable(table: string, cols: _TygojaDict, ...options: string[]): Query /** RenameTable creates a Query that can be used to rename a table. */ - renameTable(oldName: string): Query | undefined + renameTable(oldName: string): Query /** DropTable creates a Query that can be used to drop a table. */ - dropTable(table: string): Query | undefined + dropTable(table: string): Query /** TruncateTable creates a Query that can be used to truncate a table. */ - truncateTable(table: string): Query | undefined + truncateTable(table: string): Query /** AddColumn creates a Query that can be used to add a column to a table. */ - addColumn(table: string): Query | undefined + addColumn(table: string): Query /** * DropColumn creates a Query that can be used to drop a column from a * table. */ - dropColumn(table: string): Query | undefined + dropColumn(table: string): Query /** * RenameColumn creates a Query that can be used to rename a column in a * table. */ - renameColumn(table: string): Query | undefined + renameColumn(table: string): Query /** * AlterColumn creates a Query that can be used to change the definition of * a table column. */ - alterColumn(table: string): Query | undefined + alterColumn(table: string): Query /** * AddPrimaryKey creates a Query that can be used to specify primary key(s) * for a table. The "name" parameter specifies the name of the primary key * constraint. */ - addPrimaryKey(table: string, ...cols: string[]): Query | undefined + addPrimaryKey(table: string, ...cols: string[]): Query /** * DropPrimaryKey creates a Query that can be used to remove the named * primary key constraint from a table. */ - dropPrimaryKey(table: string): Query | undefined + dropPrimaryKey(table: string): Query /** * AddForeignKey creates a Query that can be used to add a foreign key * constraint to a table. The length of cols and refCols must be the same as @@ -2989,37 +3176,37 @@ namespace dbx { cols: Array, refTable: string, ...options: string[] - ): Query | undefined + ): Query /** * DropForeignKey creates a Query that can be used to remove the named * foreign key constraint from a table. */ - dropForeignKey(table: string): Query | undefined + dropForeignKey(table: string): Query /** * CreateIndex creates a Query that can be used to create an index for a * table. */ - createIndex(table: string, ...cols: string[]): Query | undefined + createIndex(table: string, ...cols: string[]): Query /** * CreateUniqueIndex creates a Query that can be used to create a unique * index for a table. */ - createUniqueIndex(table: string, ...cols: string[]): Query | undefined + createUniqueIndex(table: string, ...cols: string[]): Query /** * DropIndex creates a Query that can be used to remove the named index from * a table. */ - dropIndex(table: string): Query | undefined + dropIndex(table: string): Query } /** BaseBuilder provides a basic implementation of the Builder interface. */ interface BaseBuilder {} interface newBaseBuilder { /** NewBaseBuilder creates a new BaseBuilder instance. */ - (db: DB, executor: Executor): BaseBuilder | undefined + (db: DB, executor: Executor): BaseBuilder } interface BaseBuilder { /** DB returns the DB instance that this builder is associated with. */ - db(): DB | undefined + db(): DB } interface BaseBuilder { /** @@ -3034,7 +3221,7 @@ namespace dbx { * statement may contain parameter placeholders which can be bound with * actual parameter values before the statement is executed. */ - newQuery(sql: string): Query | undefined + newQuery(sql: string): Query } interface BaseBuilder { /** @@ -3070,7 +3257,7 @@ namespace dbx { * of cols are the column names, while the values of cols are the * corresponding column values to be inserted. */ - insert(table: string, cols: Params): Query | undefined + insert(table: string, cols: Params): Query } interface BaseBuilder { /** @@ -3080,11 +3267,7 @@ namespace dbx { * cols are the column names, while the values of cols are the corresponding * column values to be inserted. */ - upsert( - table: string, - cols: Params, - ...constraints: string[] - ): Query | undefined + upsert(table: string, cols: Params, ...constraints: string[]): Query } interface BaseBuilder { /** @@ -3094,7 +3277,7 @@ namespace dbx { * UPDATE SQL statement will have no WHERE clause (be careful in this case * as the SQL statement will update ALL rows in the table). */ - update(table: string, cols: Params, where: Expression): Query | undefined + update(table: string, cols: Params, where: Expression): Query } interface BaseBuilder { /** @@ -3103,7 +3286,7 @@ namespace dbx { * clause (be careful in this case as the SQL statement will delete ALL rows * in the table). */ - delete(table: string, where: Expression): Query | undefined + delete(table: string, where: Expression): Query } interface BaseBuilder { /** @@ -3112,48 +3295,44 @@ namespace dbx { * corresponding column types. The optional "options" parameters will be * appended to the generated SQL statement. */ - createTable( - table: string, - cols: _TygojaDict, - ...options: string[] - ): Query | undefined + createTable(table: string, cols: _TygojaDict, ...options: string[]): Query } interface BaseBuilder { /** RenameTable creates a Query that can be used to rename a table. */ - renameTable(oldName: string): Query | undefined + renameTable(oldName: string): Query } interface BaseBuilder { /** DropTable creates a Query that can be used to drop a table. */ - dropTable(table: string): Query | undefined + dropTable(table: string): Query } interface BaseBuilder { /** TruncateTable creates a Query that can be used to truncate a table. */ - truncateTable(table: string): Query | undefined + truncateTable(table: string): Query } interface BaseBuilder { /** AddColumn creates a Query that can be used to add a column to a table. */ - addColumn(table: string): Query | undefined + addColumn(table: string): Query } interface BaseBuilder { /** * DropColumn creates a Query that can be used to drop a column from a * table. */ - dropColumn(table: string): Query | undefined + dropColumn(table: string): Query } interface BaseBuilder { /** * RenameColumn creates a Query that can be used to rename a column in a * table. */ - renameColumn(table: string): Query | undefined + renameColumn(table: string): Query } interface BaseBuilder { /** * AlterColumn creates a Query that can be used to change the definition of * a table column. */ - alterColumn(table: string): Query | undefined + alterColumn(table: string): Query } interface BaseBuilder { /** @@ -3161,14 +3340,14 @@ namespace dbx { * for a table. The "name" parameter specifies the name of the primary key * constraint. */ - addPrimaryKey(table: string, ...cols: string[]): Query | undefined + addPrimaryKey(table: string, ...cols: string[]): Query } interface BaseBuilder { /** * DropPrimaryKey creates a Query that can be used to remove the named * primary key constraint from a table. */ - dropPrimaryKey(table: string): Query | undefined + dropPrimaryKey(table: string): Query } interface BaseBuilder { /** @@ -3183,42 +3362,42 @@ namespace dbx { cols: Array, refTable: string, ...options: string[] - ): Query | undefined + ): Query } interface BaseBuilder { /** * DropForeignKey creates a Query that can be used to remove the named * foreign key constraint from a table. */ - dropForeignKey(table: string): Query | undefined + dropForeignKey(table: string): Query } interface BaseBuilder { /** * CreateIndex creates a Query that can be used to create an index for a * table. */ - createIndex(table: string, ...cols: string[]): Query | undefined + createIndex(table: string, ...cols: string[]): Query } interface BaseBuilder { /** * CreateUniqueIndex creates a Query that can be used to create a unique * index for a table. */ - createUniqueIndex(table: string, ...cols: string[]): Query | undefined + createUniqueIndex(table: string, ...cols: string[]): Query } interface BaseBuilder { /** * DropIndex creates a Query that can be used to remove the named index from * a table. */ - dropIndex(table: string): Query | undefined + dropIndex(table: string): Query } /** MssqlBuilder is the builder for SQL Server databases. */ - type _subklAen = BaseBuilder - interface MssqlBuilder extends _subklAen {} + type _subeOzgU = BaseBuilder + interface MssqlBuilder extends _subeOzgU {} /** MssqlQueryBuilder is the query builder for SQL Server databases. */ - type _subdEVgQ = BaseQueryBuilder - interface MssqlQueryBuilder extends _subdEVgQ {} + type _subPYpyy = BaseQueryBuilder + interface MssqlQueryBuilder extends _subPYpyy {} interface newMssqlBuilder { /** NewMssqlBuilder creates a new MssqlBuilder instance. */ (db: DB, executor: Executor): Builder @@ -3234,7 +3413,7 @@ namespace dbx { * names to be selected. A column name may have an optional alias name. For * example, Select("id", "my_name AS name"). */ - select(...cols: string[]): SelectQuery | undefined + select(...cols: string[]): SelectQuery } interface MssqlBuilder { /** @@ -3242,7 +3421,7 @@ namespace dbx { * model-based DB operations. The model passed to this method should be a * pointer to a model struct. */ - model(model: {}): ModelQuery | undefined + model(model: {}): ModelQuery } interface MssqlBuilder { /** @@ -3260,21 +3439,21 @@ namespace dbx { } interface MssqlBuilder { /** RenameTable creates a Query that can be used to rename a table. */ - renameTable(oldName: string): Query | undefined + renameTable(oldName: string): Query } interface MssqlBuilder { /** * RenameColumn creates a Query that can be used to rename a column in a * table. */ - renameColumn(table: string): Query | undefined + renameColumn(table: string): Query } interface MssqlBuilder { /** * AlterColumn creates a Query that can be used to change the definition of * a table column. */ - alterColumn(table: string): Query | undefined + alterColumn(table: string): Query } interface MssqlQueryBuilder { /** BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses. */ @@ -3286,8 +3465,8 @@ namespace dbx { ): string } /** MysqlBuilder is the builder for MySQL databases. */ - type _subXuKqX = BaseBuilder - interface MysqlBuilder extends _subXuKqX {} + type _subshvgw = BaseBuilder + interface MysqlBuilder extends _subshvgw {} interface newMysqlBuilder { /** NewMysqlBuilder creates a new MysqlBuilder instance. */ (db: DB, executor: Executor): Builder @@ -3303,7 +3482,7 @@ namespace dbx { * names to be selected. A column name may have an optional alias name. For * example, Select("id", "my_name AS name"). */ - select(...cols: string[]): SelectQuery | undefined + select(...cols: string[]): SelectQuery } interface MysqlBuilder { /** @@ -3311,7 +3490,7 @@ namespace dbx { * model-based DB operations. The model passed to this method should be a * pointer to a model struct. */ - model(model: {}): ModelQuery | undefined + model(model: {}): ModelQuery } interface MysqlBuilder { /** @@ -3335,39 +3514,35 @@ namespace dbx { * cols are the column names, while the values of cols are the corresponding * column values to be inserted. */ - upsert( - table: string, - cols: Params, - ...constraints: string[] - ): Query | undefined + upsert(table: string, cols: Params, ...constraints: string[]): Query } interface MysqlBuilder { /** * RenameColumn creates a Query that can be used to rename a column in a * table. */ - renameColumn(table: string): Query | undefined + renameColumn(table: string): Query } interface MysqlBuilder { /** * DropPrimaryKey creates a Query that can be used to remove the named * primary key constraint from a table. */ - dropPrimaryKey(table: string): Query | undefined + dropPrimaryKey(table: string): Query } interface MysqlBuilder { /** * DropForeignKey creates a Query that can be used to remove the named * foreign key constraint from a table. */ - dropForeignKey(table: string): Query | undefined + dropForeignKey(table: string): Query } /** OciBuilder is the builder for Oracle databases. */ - type _subMOhJY = BaseBuilder - interface OciBuilder extends _subMOhJY {} + type _subTukdT = BaseBuilder + interface OciBuilder extends _subTukdT {} /** OciQueryBuilder is the query builder for Oracle databases. */ - type _subNPnto = BaseQueryBuilder - interface OciQueryBuilder extends _subNPnto {} + type _suboNUbJ = BaseQueryBuilder + interface OciQueryBuilder extends _suboNUbJ {} interface newOciBuilder { /** NewOciBuilder creates a new OciBuilder instance. */ (db: DB, executor: Executor): Builder @@ -3379,7 +3554,7 @@ namespace dbx { * names to be selected. A column name may have an optional alias name. For * example, Select("id", "my_name AS name"). */ - select(...cols: string[]): SelectQuery | undefined + select(...cols: string[]): SelectQuery } interface OciBuilder { /** @@ -3387,7 +3562,7 @@ namespace dbx { * model-based DB operations. The model passed to this method should be a * pointer to a model struct. */ - model(model: {}): ModelQuery | undefined + model(model: {}): ModelQuery } interface OciBuilder { /** @@ -3405,18 +3580,18 @@ namespace dbx { * DropIndex creates a Query that can be used to remove the named index from * a table. */ - dropIndex(table: string): Query | undefined + dropIndex(table: string): Query } interface OciBuilder { /** RenameTable creates a Query that can be used to rename a table. */ - renameTable(oldName: string): Query | undefined + renameTable(oldName: string): Query } interface OciBuilder { /** * AlterColumn creates a Query that can be used to change the definition of * a table column. */ - alterColumn(table: string): Query | undefined + alterColumn(table: string): Query } interface OciQueryBuilder { /** BuildOrderByAndLimit generates the ORDER BY and LIMIT clauses. */ @@ -3428,8 +3603,8 @@ namespace dbx { ): string } /** PgsqlBuilder is the builder for PostgreSQL databases. */ - type _submlkgA = BaseBuilder - interface PgsqlBuilder extends _submlkgA {} + type _subKWcqA = BaseBuilder + interface PgsqlBuilder extends _subKWcqA {} interface newPgsqlBuilder { /** NewPgsqlBuilder creates a new PgsqlBuilder instance. */ (db: DB, executor: Executor): Builder @@ -3441,7 +3616,7 @@ namespace dbx { * names to be selected. A column name may have an optional alias name. For * example, Select("id", "my_name AS name"). */ - select(...cols: string[]): SelectQuery | undefined + select(...cols: string[]): SelectQuery } interface PgsqlBuilder { /** @@ -3449,7 +3624,7 @@ namespace dbx { * model-based DB operations. The model passed to this method should be a * pointer to a model struct. */ - model(model: {}): ModelQuery | undefined + model(model: {}): ModelQuery } interface PgsqlBuilder { /** @@ -3470,33 +3645,29 @@ namespace dbx { * cols are the column names, while the values of cols are the corresponding * column values to be inserted. */ - upsert( - table: string, - cols: Params, - ...constraints: string[] - ): Query | undefined + upsert(table: string, cols: Params, ...constraints: string[]): Query } interface PgsqlBuilder { /** * DropIndex creates a Query that can be used to remove the named index from * a table. */ - dropIndex(table: string): Query | undefined + dropIndex(table: string): Query } interface PgsqlBuilder { /** RenameTable creates a Query that can be used to rename a table. */ - renameTable(oldName: string): Query | undefined + renameTable(oldName: string): Query } interface PgsqlBuilder { /** * AlterColumn creates a Query that can be used to change the definition of * a table column. */ - alterColumn(table: string): Query | undefined + alterColumn(table: string): Query } /** SqliteBuilder is the builder for SQLite databases. */ - type _subJtnhw = BaseBuilder - interface SqliteBuilder extends _subJtnhw {} + type _subsoHkv = BaseBuilder + interface SqliteBuilder extends _subsoHkv {} interface newSqliteBuilder { /** NewSqliteBuilder creates a new SqliteBuilder instance. */ (db: DB, executor: Executor): Builder @@ -3512,7 +3683,7 @@ namespace dbx { * names to be selected. A column name may have an optional alias name. For * example, Select("id", "my_name AS name"). */ - select(...cols: string[]): SelectQuery | undefined + select(...cols: string[]): SelectQuery } interface SqliteBuilder { /** @@ -3520,7 +3691,7 @@ namespace dbx { * model-based DB operations. The model passed to this method should be a * pointer to a model struct. */ - model(model: {}): ModelQuery | undefined + model(model: {}): ModelQuery } interface SqliteBuilder { /** @@ -3541,22 +3712,22 @@ namespace dbx { * DropIndex creates a Query that can be used to remove the named index from * a table. */ - dropIndex(table: string): Query | undefined + dropIndex(table: string): Query } interface SqliteBuilder { /** TruncateTable creates a Query that can be used to truncate a table. */ - truncateTable(table: string): Query | undefined + truncateTable(table: string): Query } interface SqliteBuilder { /** RenameTable creates a Query that can be used to rename a table. */ - renameTable(oldName: string): Query | undefined + renameTable(oldName: string): Query } interface SqliteBuilder { /** * AlterColumn creates a Query that can be used to change the definition of * a table column. */ - alterColumn(table: string): Query | undefined + alterColumn(table: string): Query } interface SqliteBuilder { /** @@ -3564,14 +3735,14 @@ namespace dbx { * for a table. The "name" parameter specifies the name of the primary key * constraint. */ - addPrimaryKey(table: string, ...cols: string[]): Query | undefined + addPrimaryKey(table: string, ...cols: string[]): Query } interface SqliteBuilder { /** * DropPrimaryKey creates a Query that can be used to remove the named * primary key constraint from a table. */ - dropPrimaryKey(table: string): Query | undefined + dropPrimaryKey(table: string): Query } interface SqliteBuilder { /** @@ -3586,18 +3757,18 @@ namespace dbx { cols: Array, refTable: string, ...options: string[] - ): Query | undefined + ): Query } interface SqliteBuilder { /** * DropForeignKey creates a Query that can be used to remove the named * foreign key constraint from a table. */ - dropForeignKey(table: string): Query | undefined + dropForeignKey(table: string): Query } /** StandardBuilder is the builder that is used by DB for an unknown driver. */ - type _subLFjYA = BaseBuilder - interface StandardBuilder extends _subLFjYA {} + type _subZtxuI = BaseBuilder + interface StandardBuilder extends _subZtxuI {} interface newStandardBuilder { /** NewStandardBuilder creates a new StandardBuilder instance. */ (db: DB, executor: Executor): Builder @@ -3613,7 +3784,7 @@ namespace dbx { * names to be selected. A column name may have an optional alias name. For * example, Select("id", "my_name AS name"). */ - select(...cols: string[]): SelectQuery | undefined + select(...cols: string[]): SelectQuery } interface StandardBuilder { /** @@ -3621,7 +3792,7 @@ namespace dbx { * model-based DB operations. The model passed to this method should be a * pointer to a model struct. */ - model(model: {}): ModelQuery | undefined + model(model: {}): ModelQuery } /** * LogFunc logs a message for each SQL statement being executed. This method @@ -3682,8 +3853,8 @@ namespace dbx { * methods. DB allows easier query building and population of data into Go * variables. */ - type _subvKgBu = Builder - interface DB extends _subvKgBu { + type _subwSDJJ = Builder + interface DB extends _subwSDJJ { /** * FieldMapper maps struct fields to DB columns. Defaults to * DefaultFieldMapFunc. @@ -3714,7 +3885,7 @@ namespace dbx { interface Errors extends Array {} interface newFromDB { /** NewFromDB encapsulates an existing database connection. */ - (sqlDB: sql.DB, driverName: string): DB | undefined + (sqlDB: sql.DB, driverName: string): DB } interface open { /** @@ -3723,25 +3894,25 @@ namespace dbx { * doesn't try to establish a DB connection either. Please refer to * sql.Open() for more information. */ - (driverName: string): DB | undefined + (driverName: string): DB } interface mustOpen { /** * MustOpen opens a database and establishes a connection to it. Please * refer to sql.Open() and sql.Ping() for more information. */ - (driverName: string): DB | undefined + (driverName: string): DB } interface DB { /** Clone makes a shallow copy of DB. */ - clone(): DB | undefined + clone(): DB } interface DB { /** * WithContext returns a new instance of DB associated with the given * context. */ - withContext(ctx: context.Context): DB | undefined + withContext(ctx: context.Context): DB } interface DB { /** @@ -3752,7 +3923,7 @@ namespace dbx { } interface DB { /** DB returns the sql.DB instance encapsulated by dbx.DB. */ - db(): sql.DB | undefined + db(): sql.DB } interface DB { /** @@ -3764,18 +3935,18 @@ namespace dbx { } interface DB { /** Begin starts a transaction. */ - begin(): Tx | undefined + begin(): Tx } interface DB { /** * BeginTx starts a transaction with the given context and transaction * options. */ - beginTx(ctx: context.Context, opts: sql.TxOptions): Tx | undefined + beginTx(ctx: context.Context, opts: sql.TxOptions): Tx } interface DB { /** Wrap encapsulates an existing transaction. */ - wrap(sqlTx: sql.Tx): Tx | undefined + wrap(sqlTx: sql.Tx): Tx } interface DB { /** @@ -3829,6 +4000,7 @@ namespace dbx { * statement. */ interface Expression { + [key: string]: any /** * Build converts an expression into a SQL fragment. If the expression * contains binding parameters, they will be added to the given Params. @@ -3908,7 +4080,7 @@ namespace dbx { * example, Like("name", "key").Match(false, true) generates "name" LIKE * "key%". */ - (col: string, ...values: string[]): LikeExp | undefined + (col: string, ...values: string[]): LikeExp } interface notLike { /** @@ -3916,7 +4088,7 @@ namespace dbx { * "key", "word") will generate a SQL expression: "name" NOT LIKE "%key%" * AND "name" NOT LIKE "%word%". Please see Like() for more details. */ - (col: string, ...values: string[]): LikeExp | undefined + (col: string, ...values: string[]): LikeExp } interface orLike { /** @@ -3925,7 +4097,7 @@ namespace dbx { * OrLike("name", "key", "word") will generate a SQL expression: "name" LIKE * "%key%" OR "name" LIKE "%word%". Please see Like() for more details. */ - (col: string, ...values: string[]): LikeExp | undefined + (col: string, ...values: string[]): LikeExp } interface orNotLike { /** @@ -3934,7 +4106,7 @@ namespace dbx { * NOT LIKE "%key%" OR "name" NOT LIKE "%word%". Please see Like() for more * details. */ - (col: string, ...values: string[]): LikeExp | undefined + (col: string, ...values: string[]): LikeExp } interface exists { /** @@ -4015,14 +4187,14 @@ namespace dbx { * position 2i represents a special character and the string at position * 2i+1 is the corresponding escaped version. */ - escape(...chars: string[]): LikeExp | undefined + escape(...chars: string[]): LikeExp } interface LikeExp { /** * Match specifies whether to do wildcard matching on the left and/or right * of given strings. */ - match(left: boolean): LikeExp | undefined + match(left: boolean): LikeExp } interface LikeExp { /** Build converts an expression into a SQL fragment. */ @@ -4055,6 +4227,7 @@ namespace dbx { * unconventional table names. */ interface TableModel { + [key: string]: any tableName(): string } /** ModelQuery represents a query associated with a struct model. */ @@ -4065,7 +4238,7 @@ namespace dbx { fieldMapFunc: FieldMapFunc, db: DB, builder: Builder, - ): ModelQuery | undefined + ): ModelQuery } interface ModelQuery { /** Context returns the context associated with the query. */ @@ -4073,14 +4246,14 @@ namespace dbx { } interface ModelQuery { /** WithContext associates a context with the query. */ - withContext(ctx: context.Context): ModelQuery | undefined + withContext(ctx: context.Context): ModelQuery } interface ModelQuery { /** * Exclude excludes the specified struct fields from being inserted/updated * into the DB table. */ - exclude(...attrs: string[]): ModelQuery | undefined + exclude(...attrs: string[]): ModelQuery } interface ModelQuery { /** @@ -4147,20 +4320,17 @@ namespace dbx { interface Params extends _TygojaDict {} /** Executor prepares, executes, or queries a SQL statement. */ interface Executor { + [key: string]: any /** Exec executes a SQL statement */ exec(query: string, ...args: {}[]): sql.Result /** ExecContext executes a SQL statement with the given context */ execContext(ctx: context.Context, query: string, ...args: {}[]): sql.Result /** Query queries a SQL statement */ - query(query: string, ...args: {}[]): sql.Rows | undefined + query(query: string, ...args: {}[]): sql.Rows /** QueryContext queries a SQL statement with the given context */ - queryContext( - ctx: context.Context, - query: string, - ...args: {}[] - ): sql.Rows | undefined + queryContext(ctx: context.Context, query: string, ...args: {}[]): sql.Rows /** Prepare creates a prepared statement */ - prepare(query: string): sql.Stmt | undefined + prepare(query: string): sql.Stmt } /** Query represents a SQL statement to be executed. */ interface Query { @@ -4188,7 +4358,7 @@ namespace dbx { } interface newQuery { /** NewQuery creates a new Query with the given SQL statement. */ - (db: DB, executor: Executor, sql: string): Query | undefined + (db: DB, executor: Executor, sql: string): Query } interface Query { /** @@ -4204,7 +4374,7 @@ namespace dbx { } interface Query { /** WithContext associates a context with the query. */ - withContext(ctx: context.Context): Query | undefined + withContext(ctx: context.Context): Query } interface Query { /** @@ -4214,7 +4384,7 @@ namespace dbx { * Column()), allowing you to implement auto fail/retry or any other * additional handling. */ - withExecHook(fn: ExecHookFunc): Query | undefined + withExecHook(fn: ExecHookFunc): Query } interface Query { /** @@ -4222,7 +4392,7 @@ namespace dbx { * on q.One(), allowing you to implement custom struct scan based on the * One() argument and/or result. */ - withOneHook(fn: OneHookFunc): Query | undefined + withOneHook(fn: OneHookFunc): Query } interface Query { /** @@ -4230,7 +4400,7 @@ namespace dbx { * on q.All(), allowing you to implement custom slice scan based on the * All() argument and/or result. */ - withAllHook(fn: AllHookFunc): Query | undefined + withAllHook(fn: AllHookFunc): Query } interface Query { /** @@ -4244,7 +4414,7 @@ namespace dbx { * Prepare creates a prepared statement for later queries or executions. * Close() should be called after finishing all queries. */ - prepare(): Query | undefined + prepare(): Query } interface Query { /** @@ -4259,7 +4429,7 @@ namespace dbx { * parameter placeholders in the SQL statement are in the format of * "{:ParamName}". */ - bind(params: Params): Query | undefined + bind(params: Params): Query } interface Query { /** Execute executes the SQL statement without retrieving data. */ @@ -4308,10 +4478,11 @@ namespace dbx { * Rows executes the SQL statement and returns a Rows object to allow * retrieving data row by row. */ - rows(): Rows | undefined + rows(): Rows } /** QueryBuilder builds different clauses for a SELECT SQL statement. */ interface QueryBuilder { + [key: string]: any /** * BuildSelect generates a SELECT clause from the given selected column * names. @@ -4341,11 +4512,11 @@ namespace dbx { interface BaseQueryBuilder {} interface newBaseQueryBuilder { /** NewBaseQueryBuilder creates a new BaseQueryBuilder instance. */ - (db: DB): BaseQueryBuilder | undefined + (db: DB): BaseQueryBuilder } interface BaseQueryBuilder { /** DB returns the DB instance associated with the query builder. */ - db(): DB | undefined + db(): DB } interface BaseQueryBuilder { /** @@ -4415,8 +4586,8 @@ namespace dbx { * be obtained by calling Query.Rows(). It is mainly used to populate data row * by row. */ - type _subpryha = sql.Rows - interface Rows extends _subpryha {} + type _subkUEGQ = sql.Rows + interface Rows extends _subkUEGQ {} interface Rows { /** * ScanMap populates the current row of data into a NullStringMap. Note that @@ -4476,14 +4647,14 @@ namespace dbx { } interface newSelectQuery { /** NewSelectQuery creates a new SelectQuery instance. */ - (builder: Builder, db: DB): SelectQuery | undefined + (builder: Builder, db: DB): SelectQuery } interface SelectQuery { /** * WithBuildHook runs the provided hook function with the query created on * Build(). */ - withBuildHook(fn: BuildHookFunc): SelectQuery | undefined + withBuildHook(fn: BuildHookFunc): SelectQuery } interface SelectQuery { /** Context returns the context associated with the query. */ @@ -4491,88 +4662,88 @@ namespace dbx { } interface SelectQuery { /** WithContext associates a context with the query. */ - withContext(ctx: context.Context): SelectQuery | undefined + withContext(ctx: context.Context): SelectQuery } interface SelectQuery { /** * Select specifies the columns to be selected. Column names will be * automatically quoted. */ - select(...cols: string[]): SelectQuery | undefined + select(...cols: string[]): SelectQuery } interface SelectQuery { /** * AndSelect adds additional columns to be selected. Column names will be * automatically quoted. */ - andSelect(...cols: string[]): SelectQuery | undefined + andSelect(...cols: string[]): SelectQuery } interface SelectQuery { /** * Distinct specifies whether to select columns distinctively. By default, * distinct is false. */ - distinct(v: boolean): SelectQuery | undefined + distinct(v: boolean): SelectQuery } interface SelectQuery { /** * SelectOption specifies additional option that should be append to * "SELECT". */ - selectOption(option: string): SelectQuery | undefined + selectOption(option: string): SelectQuery } interface SelectQuery { /** * From specifies which tables to select from. Table names will be * automatically quoted. */ - from(...tables: string[]): SelectQuery | undefined + from(...tables: string[]): SelectQuery } interface SelectQuery { /** Where specifies the WHERE condition. */ - where(e: Expression): SelectQuery | undefined + where(e: Expression): SelectQuery } interface SelectQuery { /** * AndWhere concatenates a new WHERE condition with the existing one (if * any) using "AND". */ - andWhere(e: Expression): SelectQuery | undefined + andWhere(e: Expression): SelectQuery } interface SelectQuery { /** * OrWhere concatenates a new WHERE condition with the existing one (if any) * using "OR". */ - orWhere(e: Expression): SelectQuery | undefined + orWhere(e: Expression): SelectQuery } interface SelectQuery { /** * Join specifies a JOIN clause. The "typ" parameter specifies the JOIN type * (e.g. "INNER JOIN", "LEFT JOIN"). */ - join(typ: string, table: string, on: Expression): SelectQuery | undefined + join(typ: string, table: string, on: Expression): SelectQuery } interface SelectQuery { /** * InnerJoin specifies an INNER JOIN clause. This is a shortcut method for * Join. */ - innerJoin(table: string, on: Expression): SelectQuery | undefined + innerJoin(table: string, on: Expression): SelectQuery } interface SelectQuery { /** * LeftJoin specifies a LEFT JOIN clause. This is a shortcut method for * Join. */ - leftJoin(table: string, on: Expression): SelectQuery | undefined + leftJoin(table: string, on: Expression): SelectQuery } interface SelectQuery { /** * RightJoin specifies a RIGHT JOIN clause. This is a shortcut method for * Join. */ - rightJoin(table: string, on: Expression): SelectQuery | undefined + rightJoin(table: string, on: Expression): SelectQuery } interface SelectQuery { /** @@ -4580,7 +4751,7 @@ namespace dbx { * quoted. A column name can contain "ASC" or "DESC" to indicate its * ordering direction. */ - orderBy(...cols: string[]): SelectQuery | undefined + orderBy(...cols: string[]): SelectQuery } interface SelectQuery { /** @@ -4588,67 +4759,67 @@ namespace dbx { * Column names will be properly quoted. A column name can contain "ASC" or * "DESC" to indicate its ordering direction. */ - andOrderBy(...cols: string[]): SelectQuery | undefined + andOrderBy(...cols: string[]): SelectQuery } interface SelectQuery { /** * GroupBy specifies the GROUP BY clause. Column names will be properly * quoted. */ - groupBy(...cols: string[]): SelectQuery | undefined + groupBy(...cols: string[]): SelectQuery } interface SelectQuery { /** * AndGroupBy appends additional columns to the existing GROUP BY clause. * Column names will be properly quoted. */ - andGroupBy(...cols: string[]): SelectQuery | undefined + andGroupBy(...cols: string[]): SelectQuery } interface SelectQuery { /** Having specifies the HAVING clause. */ - having(e: Expression): SelectQuery | undefined + having(e: Expression): SelectQuery } interface SelectQuery { /** * AndHaving concatenates a new HAVING condition with the existing one (if * any) using "AND". */ - andHaving(e: Expression): SelectQuery | undefined + andHaving(e: Expression): SelectQuery } interface SelectQuery { /** * OrHaving concatenates a new HAVING condition with the existing one (if * any) using "OR". */ - orHaving(e: Expression): SelectQuery | undefined + orHaving(e: Expression): SelectQuery } interface SelectQuery { /** Union specifies a UNION clause. */ - union(q: Query): SelectQuery | undefined + union(q: Query): SelectQuery } interface SelectQuery { /** UnionAll specifies a UNION ALL clause. */ - unionAll(q: Query): SelectQuery | undefined + unionAll(q: Query): SelectQuery } interface SelectQuery { /** Limit specifies the LIMIT clause. A negative limit means no limit. */ - limit(limit: number): SelectQuery | undefined + limit(limit: number): SelectQuery } interface SelectQuery { /** Offset specifies the OFFSET clause. A negative offset means no offset. */ - offset(offset: number): SelectQuery | undefined + offset(offset: number): SelectQuery } interface SelectQuery { /** Bind specifies the parameter values to be bound to the query. */ - bind(params: Params): SelectQuery | undefined + bind(params: Params): SelectQuery } interface SelectQuery { /** AndBind appends additional parameters to be bound to the query. */ - andBind(params: Params): SelectQuery | undefined + andBind(params: Params): SelectQuery } interface SelectQuery { /** Build builds the SELECT query and returns an executable Query object. */ - build(): Query | undefined + build(): Query } interface SelectQuery { /** @@ -4699,7 +4870,7 @@ namespace dbx { * Rows builds and executes the SELECT query and returns a Rows object for * data retrieval purpose. This is a shortcut to SelectQuery.Build().Rows() */ - rows(): Rows | undefined + rows(): Rows } interface SelectQuery { /** @@ -4741,7 +4912,7 @@ namespace dbx { * Info exports common SelectQuery fields allowing to inspect the current * select query options. */ - info(): QueryInfo | undefined + info(): QueryInfo } /** FieldMapFunc converts a struct field name into a DB column name. */ interface FieldMapFunc { @@ -4752,12 +4923,13 @@ namespace dbx { (a: {}): string } interface structInfo {} - type _subYkrPN = structInfo - interface structValue extends _subYkrPN {} + type _subtaShB = structInfo + interface structValue extends _subtaShB {} interface fieldInfo {} interface structInfoMapKey {} /** PostScanner is an optional interface used by ScanStruct. */ interface PostScanner { + [key: string]: any /** * PostScan executes right after the struct has been populated with the DB * values, allowing you to further normalize or validate the loaded data. @@ -4785,8 +4957,8 @@ namespace dbx { (a: {}): string } /** Tx enhances sql.Tx with additional querying methods. */ - type _subbdnmI = Builder - interface Tx extends _subbdnmI {} + type _subgTasg = Builder + interface Tx extends _subgTasg {} interface Tx { /** Commit commits the transaction. */ commit(): void @@ -4813,6 +4985,75 @@ namespace dbx { * Note that the examples in this package assume a Unix system. They may not run * on Windows, and they do not run in the Go Playground used by golang.org and * godoc.org. + * + * # Executables in the current directory + * + * The functions Command and LookPath look for a program in the directories + * listed in the current path, following the conventions of the host operating + * system. Operating systems have for decades included the current directory in + * this search, sometimes implicitly and sometimes configured explicitly that + * way by default. Modern practice is that including the current directory is + * usually unexpected and often leads to security problems. + * + * To avoid those security problems, as of Go 1.19, this package will not + * resolve a program using an implicit or explicit path entry relative to the + * current directory. That is, if you run exec.LookPath("go"), it will not + * successfully return ./go on Unix nor .\go.exe on Windows, no matter how the + * path is configured. Instead, if the usual path algorithms would result in + * that answer, these functions return an error err satisfying errors.Is(err, + * ErrDot). + * + * For example, consider these two program snippets: + * + * path, err := exec.LookPath("prog") + * if err != nil { + * log.Fatal(err) + * } + * use(path) + * + * And + * + * cmd := exec.Command("prog") + * if err := cmd.Run(); err != nil { + * log.Fatal(err) + * } + * + * These will not find and run ./prog or .\prog.exe, no matter how the current + * path is configured. + * + * Code that always wants to run a program from the current directory can be + * rewritten to say "./prog" instead of "prog". + * + * Code that insists on including results from relative path entries can instead + * override the error using an errors.Is check: + * + * path, err := exec.LookPath("prog") + * if errors.Is(err, exec.ErrDot) { + * err = nil + * } + * if err != nil { + * log.Fatal(err) + * } + * use(path) + * + * And + * + * cmd := exec.Command("prog") + * if errors.Is(cmd.Err, exec.ErrDot) { + * cmd.Err = nil + * } + * if err := cmd.Run(); err != nil { + * log.Fatal(err) + * } + * + * Setting the environment variable GODEBUG=execerrdot=0 disables generation of + * ErrDot entirely, temporarily restoring the pre-Go 1.19 behavior for programs + * that are unable to apply more targeted fixes. A future version of Go may + * remove support for this variable. + * + * Before adding such overrides, make sure you understand the security + * implications of doing so. See https://go.dev/blog/path-security for more + * information. */ namespace exec { interface command { @@ -4840,13 +5081,14 @@ namespace exec { * do the quoting yourself and provide the full command line in * SysProcAttr.CmdLine, leaving Args empty. */ - (name: string, ...arg: string[]): Cmd | undefined + (name: string, ...arg: string[]): Cmd } } namespace filesystem { /** FileReader defines an interface for a file resource reader. */ interface FileReader { + [key: string]: any open(): io.ReadSeekCloser } /** @@ -4855,31 +5097,45 @@ namespace filesystem { * The file could be from a local path, multipipart/formdata header, etc. */ interface File { + reader: FileReader name: string originalName: string size: number - reader: FileReader } interface newFileFromPath { /** * NewFileFromPath creates a new File instance from the provided local file * path. */ - (path: string): File | undefined + (path: string): File } interface newFileFromBytes { /** * NewFileFromBytes creates a new File instance from the provided byte * slice. */ - (b: string, name: string): File | undefined + (b: string | Array, name: string): File } interface newFileFromMultipart { /** - * NewFileFromMultipart creates a new File instace from the provided - * multipart header. + * NewFileFromMultipart creates a new File from the provided multipart + * header. */ - (mh: multipart.FileHeader): File | undefined + (mh: multipart.FileHeader): File + } + interface newFileFromUrl { + /** + * NewFileFromUrl creates a new File from the provided url by downloading + * the resource and load it as BytesReader. + * + * Example + * + * ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + * defer cancel() + * + * file, err := filesystem.NewFileFromUrl(ctx, "https://example.com/image.png") + */ + (ctx: context.Context, url: string): File } /** MultipartReader defines a FileReader from [multipart.FileHeader]. */ interface MultipartReader { @@ -4899,14 +5155,14 @@ namespace filesystem { } /** BytesReader defines a FileReader from bytes content. */ interface BytesReader { - bytes: string + bytes: string | Array } interface BytesReader { /** Open implements the [filesystem.FileReader] interface. */ open(): io.ReadSeekCloser } - type _subYsCWp = bytes.Reader - interface bytesReadSeekCloser extends _subYsCWp {} + type _subHVtTs = bytes.Reader + interface bytesReadSeekCloser extends _subHVtTs {} interface bytesReadSeekCloser { /** Close implements the [io.ReadSeekCloser] interface. */ close(): void @@ -4925,7 +5181,7 @@ namespace filesystem { accessKey: string, secretKey: string, s3ForcePathStyle: boolean, - ): System | undefined + ): System } interface newLocal { /** @@ -4933,7 +5189,7 @@ namespace filesystem { * * NB! Make sure to call `Close()` after you are done working with it. */ - (dirPath: string): System | undefined + (dirPath: string): System } interface System { /** SetContext assigns the specified context to the current filesystem. */ @@ -4949,7 +5205,7 @@ namespace filesystem { } interface System { /** Attributes returns the attributes for the file with fileKey path. */ - attributes(fileKey: string): blob.Attributes | undefined + attributes(fileKey: string): blob.Attributes } interface System { /** @@ -4957,7 +5213,15 @@ namespace filesystem { * * NB! Make sure to call `Close()` after you are done working with it. */ - getFile(fileKey: string): blob.Reader | undefined + getFile(fileKey: string): blob.Reader + } + interface System { + /** + * Copy copies the file stored at srcKey to dstKey. + * + * If dstKey file already exists, it is overwritten. + */ + copy(srcKey: string): void } interface System { /** @@ -4968,7 +5232,7 @@ namespace filesystem { } interface System { /** Upload writes content into the fileKey location. */ - upload(content: string, fileKey: string): void + upload(content: string | Array, fileKey: string): void } interface System { /** UploadFile uploads the provided multipart file to the fileKey location. */ @@ -5026,56 +5290,91 @@ namespace filesystem { namespace tokens { interface newAdminAuthToken { /** NewAdminAuthToken generates and returns a new admin authentication token. */ - (app: core.App, admin: models.Admin): string + (app: CoreApp, admin: models.Admin): string } interface newAdminResetPasswordToken { /** * NewAdminResetPasswordToken generates and returns a new admin password * reset request token. */ - (app: core.App, admin: models.Admin): string + (app: CoreApp, admin: models.Admin): string } interface newAdminFileToken { /** * NewAdminFileToken generates and returns a new admin private file access * token. */ - (app: core.App, admin: models.Admin): string + (app: CoreApp, admin: models.Admin): string } interface newRecordAuthToken { /** * NewRecordAuthToken generates and returns a new auth record authentication * token. */ - (app: core.App, record: models.Record): string + (app: CoreApp, record: models.Record): string } interface newRecordVerifyToken { /** * NewRecordVerifyToken generates and returns a new record verification * token. */ - (app: core.App, record: models.Record): string + (app: CoreApp, record: models.Record): string } interface newRecordResetPasswordToken { /** * NewRecordResetPasswordToken generates and returns a new auth record * password reset request token. */ - (app: core.App, record: models.Record): string + (app: CoreApp, record: models.Record): string } interface newRecordChangeEmailToken { /** * NewRecordChangeEmailToken generates and returns a new auth record change * email request token. */ - (app: core.App, record: models.Record, newEmail: string): string + (app: CoreApp, record: models.Record, newEmail: string): string } interface newRecordFileToken { /** * NewRecordFileToken generates and returns a new record private file access * token. */ - (app: core.App, record: models.Record): string + (app: CoreApp, record: models.Record): string + } +} + +/** + * Package mails implements various helper methods for sending user and admin + * emails like forgotten password, verification, etc. + */ +namespace mails { + interface sendAdminPasswordReset { + /** + * SendAdminPasswordReset sends a password reset request email to the + * specified admin. + */ + (app: CoreApp, admin: models.Admin): void + } + interface sendRecordPasswordReset { + /** + * SendRecordPasswordReset sends a password reset request email to the + * specified user. + */ + (app: CoreApp, authRecord: models.Record): void + } + interface sendRecordVerification { + /** + * SendRecordVerification sends a verification request email to the + * specified user. + */ + (app: CoreApp, authRecord: models.Record): void + } + interface sendRecordChangeEmail { + /** + * SendUserChangeEmail sends a change email confirmation email to the + * specified user. + */ + (app: CoreApp, record: models.Record, newEmail: string): void } } @@ -5094,12 +5393,12 @@ namespace forms { interface newAdminLogin { /** * NewAdminLogin creates a new [AdminLogin] form initialized with the - * provided [core.App] instance. + * provided [CoreApp] instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - (app: core.App): AdminLogin | undefined + (app: CoreApp): AdminLogin } interface AdminLogin { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5122,7 +5421,7 @@ namespace forms { */ submit( ...interceptors: InterceptorFunc[] - ): models.Admin | undefined + ): models.Admin } /** AdminPasswordResetConfirm is an admin password reset confirmation form. */ interface AdminPasswordResetConfirm { @@ -5133,12 +5432,12 @@ namespace forms { interface newAdminPasswordResetConfirm { /** * NewAdminPasswordResetConfirm creates a new [AdminPasswordResetConfirm] - * form initialized with from the provided [core.App] instance. + * form initialized with from the provided [CoreApp] instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - (app: core.App): AdminPasswordResetConfirm | undefined + (app: CoreApp): AdminPasswordResetConfirm } interface AdminPasswordResetConfirm { /** @@ -5166,7 +5465,7 @@ namespace forms { */ submit( ...interceptors: InterceptorFunc[] - ): models.Admin | undefined + ): models.Admin } /** AdminPasswordResetRequest is an admin password reset request form. */ interface AdminPasswordResetRequest { @@ -5175,12 +5474,12 @@ namespace forms { interface newAdminPasswordResetRequest { /** * NewAdminPasswordResetRequest creates a new [AdminPasswordResetRequest] - * form initialized with from the provided [core.App] instance. + * form initialized with from the provided [CoreApp] instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - (app: core.App): AdminPasswordResetRequest | undefined + (app: CoreApp): AdminPasswordResetRequest } interface AdminPasswordResetRequest { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5217,13 +5516,13 @@ namespace forms { interface newAdminUpsert { /** * NewAdminUpsert creates a new [AdminUpsert] form with initializer config - * created from the provided [core.App] and [models.Admin] instances (for + * created from the provided [CoreApp] and [models.Admin] instances (for * create you could pass a pointer to an empty Admin - `&models.Admin{}`). * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - (app: core.App, admin: models.Admin): AdminUpsert | undefined + (app: CoreApp, admin: models.Admin): AdminUpsert } interface AdminUpsert { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5246,7 +5545,8 @@ namespace forms { submit(...interceptors: InterceptorFunc[]): void } /** - * AppleClientSecretCreate is a [models.Admin] upsert (create/update) form. + * AppleClientSecretCreate is a form struct to generate a new Apple Client + * Secret. * * Reference: * https://developer.apple.com/documentation/sign_in_with_apple/generate_and_validate_tokens @@ -5270,18 +5570,17 @@ namespace forms { */ privateKey: string /** - * Duration specifies how long the generated JWT token should be considered - * valid. The specified value must be in seconds and max 15777000 - * (~6months). + * Duration specifies how long the generated JWT should be considered valid. + * The specified value must be in seconds and max 15777000 (~6months). */ duration: number } interface newAppleClientSecretCreate { /** * NewAppleClientSecretCreate creates a new [AppleClientSecretCreate] form - * with initializer config created from the provided [core.App] instances. + * with initializer config created from the provided [CoreApp] instances. */ - (app: core.App): AppleClientSecretCreate | undefined + (app: CoreApp): AppleClientSecretCreate } interface AppleClientSecretCreate { /** @@ -5300,7 +5599,7 @@ namespace forms { } interface newBackupCreate { /** NewBackupCreate creates new BackupCreate request form. */ - (app: core.App): BackupCreate | undefined + (app: CoreApp): BackupCreate } interface BackupCreate { /** SetContext replaces the default form context with the provided one. */ @@ -5328,7 +5627,7 @@ namespace forms { } interface newBackupUpload { /** NewBackupUpload creates new BackupUpload request form. */ - (app: core.App): BackupUpload | undefined + (app: CoreApp): BackupUpload } interface BackupUpload { /** @@ -5387,14 +5686,14 @@ namespace forms { interface newCollectionUpsert { /** * NewCollectionUpsert creates a new [CollectionUpsert] form with - * initializer config created from the provided [core.App] and + * initializer config created from the provided [CoreApp] and * [models.Collection] instances (for create you could pass a pointer to an * empty Collection - `&models.Collection{}`). * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - (app: core.App, collection: models.Collection): CollectionUpsert | undefined + (app: CoreApp, collection: models.Collection): CollectionUpsert } interface CollectionUpsert { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5431,12 +5730,12 @@ namespace forms { interface newCollectionsImport { /** * NewCollectionsImport creates a new [CollectionsImport] form with - * initialized with from the provided [core.App] instance. + * initialized with from the provided [CoreApp] instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - (app: core.App): CollectionsImport | undefined + (app: CoreApp): CollectionsImport } interface CollectionsImport { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5477,7 +5776,7 @@ namespace forms { } interface newRealtimeSubscribe { /** NewRealtimeSubscribe creates new RealtimeSubscribe request form. */ - (): RealtimeSubscribe | undefined + (): RealtimeSubscribe } interface RealtimeSubscribe { /** @@ -5494,16 +5793,13 @@ namespace forms { interface newRecordEmailChangeConfirm { /** * NewRecordEmailChangeConfirm creates a new [RecordEmailChangeConfirm] form - * initialized with from the provided [core.App] and [models.Collection] + * initialized with from the provided [CoreApp] and [models.Collection] * instances. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - ( - app: core.App, - collection: models.Collection, - ): RecordEmailChangeConfirm | undefined + (app: CoreApp, collection: models.Collection): RecordEmailChangeConfirm } interface RecordEmailChangeConfirm { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5527,7 +5823,7 @@ namespace forms { */ submit( ...interceptors: InterceptorFunc[] - ): models.Record | undefined + ): models.Record } /** RecordEmailChangeRequest is an auth record email change request form. */ interface RecordEmailChangeRequest { @@ -5536,13 +5832,13 @@ namespace forms { interface newRecordEmailChangeRequest { /** * NewRecordEmailChangeRequest creates a new [RecordEmailChangeRequest] form - * initialized with from the provided [core.App] and [models.Record] + * initialized with from the provided [CoreApp] and [models.Record] * instances. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - (app: core.App, record: models.Record): RecordEmailChangeRequest | undefined + (app: CoreApp, record: models.Record): RecordEmailChangeRequest } interface RecordEmailChangeRequest { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5589,8 +5885,8 @@ namespace forms { /** The authorization code returned from the initial request. */ code: string /** - * The code verifier sent with the initial request as part of the - * code_challenge. + * The optional PKCE code verifier as part of the code_challenge sent with + * the initial request. */ codeVerifier: string /** The redirect url sent with the initial request. */ @@ -5604,16 +5900,16 @@ namespace forms { interface newRecordOAuth2Login { /** * NewRecordOAuth2Login creates a new [RecordOAuth2Login] form with - * initialized with from the provided [core.App] instance. + * initialized with from the provided [CoreApp] instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ ( - app: core.App, + app: CoreApp, collection: models.Collection, optAuthRecord: models.Record, - ): RecordOAuth2Login | undefined + ): RecordOAuth2Login } interface RecordOAuth2Login { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5650,7 +5946,7 @@ namespace forms { */ submit( ...interceptors: InterceptorFunc[] - ): [models.Record | undefined, auth.AuthUser | undefined] + ): [models.Record, auth.AuthUser] } /** RecordPasswordLogin is record username/email + password login form. */ interface RecordPasswordLogin { @@ -5660,16 +5956,13 @@ namespace forms { interface newRecordPasswordLogin { /** * NewRecordPasswordLogin creates a new [RecordPasswordLogin] form - * initialized with from the provided [core.App] and [models.Collection] + * initialized with from the provided [CoreApp] and [models.Collection] * instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - ( - app: core.App, - collection: models.Collection, - ): RecordPasswordLogin | undefined + (app: CoreApp, collection: models.Collection): RecordPasswordLogin } interface RecordPasswordLogin { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5692,7 +5985,7 @@ namespace forms { */ submit( ...interceptors: InterceptorFunc[] - ): models.Record | undefined + ): models.Record } /** * RecordPasswordResetConfirm is an auth record password reset confirmation @@ -5706,15 +5999,12 @@ namespace forms { interface newRecordPasswordResetConfirm { /** * NewRecordPasswordResetConfirm creates a new [RecordPasswordResetConfirm] - * form initialized with from the provided [core.App] instance. + * form initialized with from the provided [CoreApp] instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - ( - app: core.App, - collection: models.Collection, - ): RecordPasswordResetConfirm | undefined + (app: CoreApp, collection: models.Collection): RecordPasswordResetConfirm } interface RecordPasswordResetConfirm { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5737,7 +6027,7 @@ namespace forms { */ submit( ...interceptors: InterceptorFunc[] - ): models.Record | undefined + ): models.Record } /** RecordPasswordResetRequest is an auth record reset password request form. */ interface RecordPasswordResetRequest { @@ -5746,15 +6036,12 @@ namespace forms { interface newRecordPasswordResetRequest { /** * NewRecordPasswordResetRequest creates a new [RecordPasswordResetRequest] - * form initialized with from the provided [core.App] instance. + * form initialized with from the provided [CoreApp] instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - ( - app: core.App, - collection: models.Collection, - ): RecordPasswordResetRequest | undefined + (app: CoreApp, collection: models.Collection): RecordPasswordResetRequest } interface RecordPasswordResetRequest { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5796,14 +6083,14 @@ namespace forms { interface newRecordUpsert { /** * NewRecordUpsert creates a new [RecordUpsert] form with initializer config - * created from the provided [core.App] and [models.Record] instances (for + * created from the provided [CoreApp] and [models.Record] instances (for * create you could pass a pointer to an empty Record - * models.NewRecord(collection)). * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - (app: core.App, record: models.Record): RecordUpsert | undefined + (app: CoreApp, record: models.Record): RecordUpsert } interface RecordUpsert { /** Data returns the loaded form's data. */ @@ -5884,7 +6171,7 @@ namespace forms { * LoadData loads and normalizes the provided regular record data fields * into the form. */ - loadData(requestInfo: _TygojaDict): void + loadData(requestData: _TygojaDict): void } interface RecordUpsert { /** @@ -5925,15 +6212,12 @@ namespace forms { interface newRecordVerificationConfirm { /** * NewRecordVerificationConfirm creates a new [RecordVerificationConfirm] - * form initialized with from the provided [core.App] instance. + * form initialized with from the provided [CoreApp] instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - ( - app: core.App, - collection: models.Collection, - ): RecordVerificationConfirm | undefined + (app: CoreApp, collection: models.Collection): RecordVerificationConfirm } interface RecordVerificationConfirm { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -5956,7 +6240,7 @@ namespace forms { */ submit( ...interceptors: InterceptorFunc[] - ): models.Record | undefined + ): models.Record } /** * RecordVerificationRequest is an auth record email verification request @@ -5968,15 +6252,12 @@ namespace forms { interface newRecordVerificationRequest { /** * NewRecordVerificationRequest creates a new [RecordVerificationRequest] - * form initialized with from the provided [core.App] instance. + * form initialized with from the provided [CoreApp] instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - ( - app: core.App, - collection: models.Collection, - ): RecordVerificationRequest | undefined + (app: CoreApp, collection: models.Collection): RecordVerificationRequest } interface RecordVerificationRequest { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -6003,17 +6284,17 @@ namespace forms { submit(...interceptors: InterceptorFunc[]): void } /** SettingsUpsert is a [settings.Settings] upsert (create/update) form. */ - type _subBpyXp = settings.Settings - interface SettingsUpsert extends _subBpyXp {} + type _subxSkzb = settings.Settings + interface SettingsUpsert extends _subxSkzb {} interface newSettingsUpsert { /** * NewSettingsUpsert creates a new [SettingsUpsert] form with initializer - * config created from the provided [core.App] instance. + * config created from the provided [CoreApp] instance. * * If you want to submit the form as part of a transaction, you can change * the default Dao via [SetDao()]. */ - (app: core.App): SettingsUpsert | undefined + (app: CoreApp): SettingsUpsert } interface SettingsUpsert { /** SetDao replaces the default form Dao instance with the provided one. */ @@ -6046,7 +6327,7 @@ namespace forms { } interface newTestEmailSend { /** NewTestEmailSend creates and initializes new TestEmailSend form. */ - (app: core.App): TestEmailSend | undefined + (app: CoreApp): TestEmailSend } interface TestEmailSend { /** @@ -6066,7 +6347,7 @@ namespace forms { } interface newTestS3Filesystem { /** NewTestS3Filesystem creates and initializes new TestS3Filesystem form. */ - (app: core.App): TestS3Filesystem | undefined + (app: CoreApp): TestS3Filesystem } interface TestS3Filesystem { /** @@ -6105,23 +6386,23 @@ namespace apis { } interface newNotFoundError { /** NewNotFoundError creates and returns 404 `ApiError`. */ - (message: string, data: any): ApiError | undefined + (message: string, data: any): ApiError } interface newBadRequestError { /** NewBadRequestError creates and returns 400 `ApiError`. */ - (message: string, data: any): ApiError | undefined + (message: string, data: any): ApiError } interface newForbiddenError { /** NewForbiddenError creates and returns 403 `ApiError`. */ - (message: string, data: any): ApiError | undefined + (message: string, data: any): ApiError } interface newUnauthorizedError { /** NewUnauthorizedError creates and returns 401 `ApiError`. */ - (message: string, data: any): ApiError | undefined + (message: string, data: any): ApiError } interface newApiError { /** NewApiError creates and returns new normalized `ApiError` instance. */ - (status: number, message: string, data: any): ApiError | undefined + (status: number, message: string, data: any): ApiError } interface backupApi {} interface initApi { @@ -6129,7 +6410,7 @@ namespace apis { * InitApi creates a configured echo instance with registered system and app * specific routes and middlewares. */ - (app: core.App): echo.Echo | undefined + (app: CoreApp): echo.Echo } interface staticDirectoryHandler { /** @@ -6210,7 +6491,7 @@ namespace apis { * admin Authorization header ONLY if the application has at least 1 * existing Admin model. */ - (app: core.App): echo.MiddlewareFunc + (app: CoreApp): echo.MiddlewareFunc } interface requireAdminOrRecordAuth { /** @@ -6244,7 +6525,7 @@ namespace apis { * This middleware is expected to be already registered by default for all * routes. */ - (app: core.App): echo.MiddlewareFunc + (app: CoreApp): echo.MiddlewareFunc } interface loadCollectionContext { /** @@ -6254,7 +6535,7 @@ namespace apis { * Set optCollectionTypes to further filter the found collection by its * type. */ - (app: core.App, ...optCollectionTypes: string[]): echo.MiddlewareFunc + (app: CoreApp, ...optCollectionTypes: string[]): echo.MiddlewareFunc } interface activityLogger { /** @@ -6264,36 +6545,43 @@ namespace apis { * The middleware does nothing if the app logs retention period is zero * (aka. app.Settings().Logs.MaxDays = 0). */ - (app: core.App): echo.MiddlewareFunc + (app: CoreApp): echo.MiddlewareFunc } interface realtimeApi {} + /** RecordData represents the broadcasted record subscrition message data. */ interface recordData { + record: any // map or models.Record action: string - record?: models.Record } interface getter { + [key: string]: any get(_arg0: string): any } interface recordAuthApi {} interface providerInfo { name: string + displayName: string state: string + authUrl: string + /** + * Technically could be omitted if the provider doesn't support PKCE, but to + * avoid breaking existing typed clients we'll return them as empty string + */ codeVerifier: string codeChallenge: string codeChallengeMethod: string - authUrl: string } interface recordApi {} interface requestData { /** Deprecated: Use RequestInfo instead. */ - (c: echo.Context): models.RequestInfo | undefined + (c: echo.Context): models.RequestInfo } interface requestInfo { /** * RequestInfo exports cached common request data fields (query, body, * logged auth state, etc.) from the provided context. */ - (c: echo.Context): models.RequestInfo | undefined + (c: echo.Context): models.RequestInfo } interface recordAuthResponse { /** @@ -6301,7 +6589,7 @@ namespace apis { * specified request context. */ ( - app: core.App, + app: CoreApp, c: echo.Context, authRecord: models.Record, meta: any, @@ -6382,7 +6670,7 @@ namespace apis { * ShowStartBanner: false, * }) */ - (app: core.App, config: ServeConfig): http.Server | undefined + (app: CoreApp, config: ServeConfig): http.Server } interface migrationsConnection { db?: dbx.DB @@ -6391,6 +6679,89 @@ namespace apis { interface settingsApi {} } +namespace pocketbase { + /** AppWrapper serves as a private CoreApp instance wrapper. */ + type _subDghba = CoreApp + interface appWrapper extends _subDghba {} + /** + * PocketBase defines a PocketBase app launcher. + * + * It implements [CoreApp] via embedding and all of the app interface methods + * could be accessed directly through the instance (eg. + * PocketBase.DataDir()). + */ + type _subQSfhg = appWrapper + interface PocketBase extends _subQSfhg { + /** RootCmd is the main console command */ + rootCmd?: cobra.Command + } + /** Config is the PocketBase initialization config struct. */ + interface Config { + /** Optional default values for the console flags */ + defaultDev: boolean + defaultDataDir: string // if not set, it will fallback to "./pb_data" + defaultEncryptionEnv: string + /** Hide the default console server info on app startup */ + hideStartBanner: boolean + /** Optional DB configurations */ + dataMaxOpenConns: number // default to core.DefaultDataMaxOpenConns + dataMaxIdleConns: number // default to core.DefaultDataMaxIdleConns + logsMaxOpenConns: number // default to core.DefaultLogsMaxOpenConns + logsMaxIdleConns: number // default to core.DefaultLogsMaxIdleConns + } + interface _new { + /** + * New creates a new PocketBase instance with the default configuration. Use + * [NewWithConfig()] if you want to provide a custom configuration. + * + * Note that the application will not be initialized/bootstrapped yet, aka. + * DB connections, migrations, app settings, etc. will not be accessible. + * Everything will be initialized when [Start()] is executed. If you want to + * initialize the application before calling [Start()], then you'll have to + * manually call [Bootstrap()]. + */ + (): PocketBase + } + interface newWithConfig { + /** + * NewWithConfig creates a new PocketBase instance with the provided config. + * + * Note that the application will not be initialized/bootstrapped yet, aka. + * DB connections, migrations, app settings, etc. will not be accessible. + * Everything will be initialized when [Start()] is executed. If you want to + * initialize the application before calling [Start()], then you'll have to + * manually call [Bootstrap()]. + */ + (config: Config): PocketBase + } + interface PocketBase { + /** + * Start starts the application, aka. registers the default system commands + * (serve, migrate, version) and executes pb.RootCmd. + */ + start(): void + } + interface PocketBase { + /** + * Execute initializes the application (if not already) and executes the + * pb.RootCmd with graceful shutdown support. + * + * This method differs from pb.Start() by not registering the default system + * commands! + */ + execute(): void + } + /** + * ColoredWriter is a small wrapper struct to construct a [color.Color] + * writter. + */ + interface coloredWriter {} + interface coloredWriter { + /** Write writes the p bytes using the colored writer. */ + write(p: string | Array): number + } +} + /** * Package template is a thin wrapper around the standard html/template and * text/template packages that implements a convenient registry to load and @@ -6418,11 +6789,12 @@ namespace apis { namespace template { interface newRegistry { /** - * NewRegistry creates and initializes a new blank templates registry. + * NewRegistry creates and initializes a new templates registry with some + * defaults (eg. global "raw" template function for unescaped HTML). * * Use the Registry.Load* methods to load templates into the registry. */ - (): Registry | undefined + (): Registry } /** * Registry defines a templates registry that is safe to be used by multiple @@ -6431,6 +6803,30 @@ namespace template { * Use the Registry.Load* methods to load templates into the registry. */ interface Registry {} + interface Registry { + /** + * AddFuncs registers new global template functions. + * + * The key of each map entry is the function name that will be used in the + * templates. If a function with the map entry name already exists it will + * be replaced with the new one. + * + * The value of each map entry is a function that must have either a single + * return value, or two return values of which the second has type error. + * + * Example: + * + * R.AddFuncs(map[string]any{ + * + * "toUpper": func(str string) string { + * return strings.ToUppser(str) + * }, + * ... + * + * }) + */ + addFuncs(funcs: _TygojaDict): Registry + } interface Registry { /** * LoadFiles caches (if not already) the specified filenames set as a single @@ -6438,24 +6834,24 @@ namespace template { * * There must be at least 1 filename specified. */ - loadFiles(...filenames: string[]): Renderer | undefined + loadFiles(...filenames: string[]): Renderer } interface Registry { /** * LoadString caches (if not already) the specified inline string as a * single template and returns a ready to use Renderer instance. */ - loadString(text: string): Renderer | undefined + loadString(text: string): Renderer } interface Registry { /** - * LoadString caches (if not already) the specified fs and globPatterns pair - * as single template and returns a ready to use Renderer instance. + * LoadFS caches (if not already) the specified fs and globPatterns pair as + * single template and returns a ready to use Renderer instance. * * There must be at least 1 file matching the provided globPattern(s) (note * that most file names serves as glob patterns matching themselves). */ - loadFS(fs: fs.FS, ...globPatterns: string[]): Renderer | undefined + loadFS(fsys: fs.FS, ...globPatterns: string[]): Renderer } /** Renderer defines a single parsed template. */ interface Renderer {} @@ -6468,80 +6864,6 @@ namespace template { } } -namespace pocketbase { - /** AppWrapper serves as a private core.App instance wrapper. */ - type _subSsZSM = core.App - interface appWrapper extends _subSsZSM {} - /** - * PocketBase defines a PocketBase app launcher. - * - * It implements [core.App] via embedding and all of the app interface methods - * could be accessed directly through the instance (eg. - * PocketBase.DataDir()). - */ - type _subtNSbs = appWrapper - interface PocketBase extends _subtNSbs { - /** RootCmd is the main console command */ - rootCmd?: cobra.Command - } - /** Config is the PocketBase initialization config struct. */ - interface Config { - /** Optional default values for the console flags */ - defaultDebug: boolean - defaultDataDir: string // if not set, it will fallback to "./pb_data" - defaultEncryptionEnv: string - /** Hide the default console server info on app startup */ - hideStartBanner: boolean - /** Optional DB configurations */ - dataMaxOpenConns: number // default to core.DefaultDataMaxOpenConns - dataMaxIdleConns: number // default to core.DefaultDataMaxIdleConns - logsMaxOpenConns: number // default to core.DefaultLogsMaxOpenConns - logsMaxIdleConns: number // default to core.DefaultLogsMaxIdleConns - } - interface _new { - /** - * New creates a new PocketBase instance with the default configuration. Use - * [NewWithConfig()] if you want to provide a custom configuration. - * - * Note that the application will not be initialized/bootstrapped yet, aka. - * DB connections, migrations, app settings, etc. will not be accessible. - * Everything will be initialized when [Start()] is executed. If you want to - * initialize the application before calling [Start()], then you'll have to - * manually call [Bootstrap()]. - */ - (): PocketBase | undefined - } - interface newWithConfig { - /** - * NewWithConfig creates a new PocketBase instance with the provided config. - * - * Note that the application will not be initialized/bootstrapped yet, aka. - * DB connections, migrations, app settings, etc. will not be accessible. - * Everything will be initialized when [Start()] is executed. If you want to - * initialize the application before calling [Start()], then you'll have to - * manually call [Bootstrap()]. - */ - (config: Config): PocketBase | undefined - } - interface PocketBase { - /** - * Start starts the application, aka. registers the default system commands - * (serve, migrate, version) and executes pb.RootCmd. - */ - start(): void - } - interface PocketBase { - /** - * Execute initializes the application (if not already) and executes the - * pb.RootCmd with graceful shutdown support. - * - * This method differs from pb.Start() by not registering the default system - * commands! - */ - execute(): void - } -} - /** * Package io provides basic interfaces to I/O primitives. Its primary job is to * wrap existing implementations of such primitives, such as those in package @@ -6574,6 +6896,9 @@ namespace io { * the error err. Doing so correctly handles I/O errors that happen after * reading some bytes and also both of the allowed EOF behaviors. * + * If len(p) == 0, Read should always return n == 0. It may return a non-nil + * error if some error condition is known, such as EOF. + * * Implementations of Read are discouraged from returning a zero byte count * with a nil error, except when len(p) == 0. Callers should treat a return of * 0 and nil as indicating that nothing happened; in particular it does not @@ -6582,27 +6907,84 @@ namespace io { * Implementations must not retain p. */ interface Reader { - read(p: string): number - } - /** - * Writer is the interface that wraps the basic Write method. - * - * Write writes len(p) bytes from p to the underlying data stream. It returns - * the number of bytes written from p (0 <= n <= len(p)) and any error - * encountered that caused the write to stop early. Write must return a - * non-nil error if it returns n < len(p). Write must not modify the slice - * data, even temporarily. - * - * Implementations must not retain p. - */ - interface Writer { - write(p: string): number + [key: string]: any + read(p: string | Array): number } /** * ReadSeekCloser is the interface that groups the basic Read, Seek and Close * methods. */ - interface ReadSeekCloser {} + interface ReadSeekCloser { + [key: string]: any + } +} + +/** + * Package bytes implements functions for the manipulation of byte slices. It is + * analogous to the facilities of the [strings] package. + */ +namespace bytes { + /** + * A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker, + * io.ByteScanner, and io.RuneScanner interfaces by reading from a byte slice. + * Unlike a Buffer, a Reader is read-only and supports seeking. The zero value + * for Reader operates like a Reader of an empty slice. + */ + interface Reader {} + interface Reader { + /** Len returns the number of bytes of the unread portion of the slice. */ + len(): number + } + interface Reader { + /** + * Size returns the original length of the underlying byte slice. Size is + * the number of bytes available for reading via ReadAt. The result is + * unaffected by any method calls except Reset. + */ + size(): number + } + interface Reader { + /** Read implements the io.Reader interface. */ + read(b: string | Array): number + } + interface Reader { + /** ReadAt implements the io.ReaderAt interface. */ + readAt(b: string | Array, off: number): number + } + interface Reader { + /** ReadByte implements the io.ByteReader interface. */ + readByte(): number + } + interface Reader { + /** + * UnreadByte complements ReadByte in implementing the io.ByteScanner + * interface. + */ + unreadByte(): void + } + interface Reader { + /** ReadRune implements the io.RuneReader interface. */ + readRune(): [number, number] + } + interface Reader { + /** + * UnreadRune complements ReadRune in implementing the io.RuneScanner + * interface. + */ + unreadRune(): void + } + interface Reader { + /** Seek implements the io.Seeker interface. */ + seek(offset: number, whence: number): number + } + interface Reader { + /** WriteTo implements the io.WriterTo interface. */ + writeTo(w: io.Writer): number + } + interface Reader { + /** Reset resets the Reader to be reading from b. */ + reset(b: string | Array): void + } } /** @@ -6657,7 +7039,13 @@ namespace syscall { */ foreground: boolean pgid: number // Child's process group ID if Setpgid. - pdeathsig: Signal // Signal that the process will get when its parent dies (Linux and FreeBSD only) + /** + * Pdeathsig, if non-zero, is a signal that the kernel will send to the + * child process when the creating thread dies. Note that the signal is sent + * on thread termination, which may happen before process termination. There + * are more details at https://go.dev/issue/27505. + */ + pdeathsig: Signal cloneflags: number // Flags for clone calls (Linux only) unshareflags: number // Flags for unshare calls (Linux only) uidMappings: Array // User ID mappings for user namespaces. @@ -6670,11 +7058,14 @@ namespace syscall { */ gidMappingsEnableSetgroups: boolean ambientCaps: Array // Ambient capabilities (Linux only) + useCgroupFD: boolean // Whether to make use of the CgroupFD field. + cgroupFD: number // File descriptor of a cgroup to put the new process into. } // @ts-ignore import errorspkg = errors /** A RawConn is a raw network connection. */ interface RawConn { + [key: string]: any /** * Control invokes f on the underlying connection's file descriptor or * handle. The file descriptor fd is guaranteed to remain valid while f @@ -6702,8 +7093,8 @@ namespace syscall { * err = errno * } * - * Errno values can be tested against error values from the os package using - * errors.Is. For example: + * Errno values can be tested against error values using errors.Is. For + * example: * * _, _, err := syscall.Syscall(...) * if errors.Is(err, fs.ErrNotExist) ... @@ -6729,7 +7120,7 @@ namespace syscall { * The calendrical calculations always assume a Gregorian calendar, with no leap * seconds. * - * Monotonic Clocks + * # Monotonic Clocks * * Operating systems provide both a “wall clock,” which is subject to changes * for clock synchronization, and a “monotonic clock,” which is not. The general @@ -6767,10 +7158,10 @@ namespace syscall { * t.Round(0). * * If Times t and u both contain monotonic clock readings, the operations - * t.After(u), t.Before(u), t.Equal(u), and t.Sub(u) are carried out using the - * monotonic clock readings alone, ignoring the wall clock readings. If either t - * or u contains no monotonic clock reading, these operations fall back to using - * the wall clock readings. + * t.After(u), t.Before(u), t.Equal(u), t.Compare(u), and t.Sub(u) are carried + * out using the monotonic clock readings alone, ignoring the wall clock + * readings. If either t or u contains no monotonic clock reading, these + * operations fall back to using the wall clock readings. * * On some systems the monotonic clock will stop if the computer goes to sleep. * On such a system, t.Sub(u) may not accurately reflect the actual time that @@ -6784,6 +7175,9 @@ namespace syscall { * t.GobDecode, t.UnmarshalBinary. t.UnmarshalJSON, and t.UnmarshalText always * create times with no monotonic clock reading. * + * The monotonic clock reading exists only in Time values. It is not a part of + * Duration values or the Unix times returned by t.Unix and friends. + * * Note that the Go == operator compares not just the time instant but also the * Location and the monotonic clock reading. See the documentation for the Time * type for a discussion of equality testing for Time values. @@ -6833,7 +7227,10 @@ namespace time { * AppendFormat is like Format but appends the textual representation to b * and returns the extended buffer. */ - appendFormat(b: string, layout: string): string + appendFormat( + b: string | Array, + layout: string, + ): string | Array } /** * A Time represents an instant in time with nanosecond precision. @@ -6890,6 +7287,13 @@ namespace time { /** Before reports whether the time instant t is before u. */ before(u: Time): boolean } + interface Time { + /** + * Compare compares the time instant t with u. If t is before u, it returns + * -1; if t is after u, it returns +1; if they're the same, it returns 0. + */ + compare(u: Time): number + } interface Time { /** * Equal reports whether t and u represent the same time instant. Two times @@ -7032,6 +7436,13 @@ namespace time { */ round(m: Duration): Duration } + interface Duration { + /** + * Abs returns the absolute value of d. As a special case, math.MinInt64 is + * converted to math.MaxInt64. + */ + abs(): Duration + } interface Time { /** Add returns the time t+d. */ add(d: Duration): Time @@ -7076,7 +7487,7 @@ namespace time { } interface Time { /** Location returns the time zone information associated with t. */ - location(): Location | undefined + location(): Location } interface Time { /** @@ -7086,6 +7497,16 @@ namespace time { */ zone(): [string, number] } + interface Time { + /** + * ZoneBounds returns the bounds of the time zone in effect at time t. The + * zone begins at start and the next zone begins at end. If the zone begins + * at the beginning of time, start will be returned as a zero Time. If the + * zone goes on forever, end will be returned as a zero Time. The Location + * of the returned times will be the same as t. + */ + zoneBounds(): Time + } interface Time { /** * Unix returns t as a Unix time, the number of seconds elapsed since @@ -7129,48 +7550,51 @@ namespace time { } interface Time { /** MarshalBinary implements the encoding.BinaryMarshaler interface. */ - marshalBinary(): string + marshalBinary(): string | Array } interface Time { /** UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. */ - unmarshalBinary(data: string): void + unmarshalBinary(data: string | Array): void } interface Time { /** GobEncode implements the gob.GobEncoder interface. */ - gobEncode(): string + gobEncode(): string | Array } interface Time { /** GobDecode implements the gob.GobDecoder interface. */ - gobDecode(data: string): void + gobDecode(data: string | Array): void } interface Time { /** * MarshalJSON implements the json.Marshaler interface. The time is a quoted - * string in RFC 3339 format, with sub-second precision added if present. + * string in the RFC 3339 format with sub-second precision. If the timestamp + * cannot be represented as valid RFC 3339 (e.g., the year is out of range), + * then an error is reported. */ - marshalJSON(): string + marshalJSON(): string | Array } interface Time { /** - * UnmarshalJSON implements the json.Unmarshaler interface. The time is - * expected to be a quoted string in RFC 3339 format. + * UnmarshalJSON implements the json.Unmarshaler interface. The time must be + * a quoted string in the RFC 3339 format. */ - unmarshalJSON(data: string): void + unmarshalJSON(data: string | Array): void } interface Time { /** * MarshalText implements the encoding.TextMarshaler interface. The time is - * formatted in RFC 3339 format, with sub-second precision added if - * present. + * formatted in RFC 3339 format with sub-second precision. If the timestamp + * cannot be represented as valid RFC 3339 (e.g., the year is out of range), + * then an error is reported. */ - marshalText(): string + marshalText(): string | Array } interface Time { /** * UnmarshalText implements the encoding.TextUnmarshaler interface. The time - * is expected to be in RFC 3339 format. + * must be in the RFC 3339 format. */ - unmarshalText(data: string): void + unmarshalText(data: string | Array): void } interface Time { /** @@ -7208,265 +7632,29 @@ namespace time { } } -/** - * Package fs defines basic interfaces to a file system. A file system can be - * provided by the host operating system but also by other packages. - */ -namespace fs { - /** - * An FS provides access to a hierarchical file system. - * - * The FS interface is the minimum implementation required of the file system. - * A file system may implement additional interfaces, such as ReadFileFS, to - * provide additional or optimized functionality. - */ - interface FS { - /** - * Open opens the named file. - * - * When Open returns an error, it should be of type *PathError with the Op - * field set to "open", the Path field set to name, and the Err field - * describing the problem. - * - * Open should reject attempts to open names that do not satisfy - * ValidPath(name), returning a *PathError with Err set to ErrInvalid or - * ErrNotExist. - */ - open(name: string): File - } - /** - * A File provides access to a single file. The File interface is the minimum - * implementation required of the file. Directory files should also implement - * ReadDirFile. A file may implement io.ReaderAt or io.Seeker as - * optimizations. - */ - interface File { - stat(): FileInfo - read(_arg0: string): number - close(): void - } - /** - * A DirEntry is an entry read from a directory (using the ReadDir function or - * a ReadDirFile's ReadDir method). - */ - interface DirEntry { - /** - * Name returns the name of the file (or subdirectory) described by the - * entry. This name is only the final element of the path (the base name), - * not the entire path. For example, Name would return "hello.go" not - * "home/gopher/hello.go". - */ - name(): string - /** IsDir reports whether the entry describes a directory. */ - isDir(): boolean - /** - * Type returns the type bits for the entry. The type bits are a subset of - * the usual FileMode bits, those returned by the FileMode.Type method. - */ - type(): FileMode - /** - * Info returns the FileInfo for the file or subdirectory described by the - * entry. The returned FileInfo may be from the time of the original - * directory read or from the time of the call to Info. If the file has been - * removed or renamed since the directory read, Info may return an error - * satisfying errors.Is(err, ErrNotExist). If the entry denotes a symbolic - * link, Info reports the information about the link itself, not the link's - * target. - */ - info(): FileInfo - } - /** A FileInfo describes a file and is returned by Stat. */ - interface FileInfo { - name(): string // base name of the file - size(): number // length in bytes for regular files; system-dependent for others - mode(): FileMode // file mode bits - modTime(): time.Time // modification time - isDir(): boolean // abbreviation for Mode().IsDir() - sys(): any // underlying data source (can return nil) - } - /** - * A FileMode represents a file's mode and permission bits. The bits have the - * same definition on all systems, so that information about files can be - * moved from one system to another portably. Not all bits apply to all - * systems. The only required bit is ModeDir for directories. - */ - interface FileMode extends Number {} - interface FileMode { - string(): string - } - interface FileMode { - /** - * IsDir reports whether m describes a directory. That is, it tests for the - * ModeDir bit being set in m. - */ - isDir(): boolean - } - interface FileMode { - /** - * IsRegular reports whether m describes a regular file. That is, it tests - * that no mode type bits are set. - */ - isRegular(): boolean - } - interface FileMode { - /** Perm returns the Unix permission bits in m (m & ModePerm). */ - perm(): FileMode - } - interface FileMode { - /** Type returns type bits in m (m & ModeType). */ - type(): FileMode - } - /** PathError records an error and the operation and file path that caused it. */ - interface PathError { - op: string - path: string - err: Error - } - interface PathError { - error(): string - } - interface PathError { - unwrap(): void - } - interface PathError { - /** Timeout reports whether this error represents a timeout. */ - timeout(): boolean - } - /** - * WalkDirFunc is the type of the function called by WalkDir to visit each - * file or directory. - * - * The path argument contains the argument to WalkDir as a prefix. That is, if - * WalkDir is called with root argument "dir" and finds a file named "a" in - * that directory, the walk function will be called with argument "dir/a". - * - * The d argument is the fs.DirEntry for the named path. - * - * The error result returned by the function controls how WalkDir continues. - * If the function returns the special value SkipDir, WalkDir skips the - * current directory (path if d.IsDir() is true, otherwise path's parent - * directory). Otherwise, if the function returns a non-nil error, WalkDir - * stops entirely and returns that error. - * - * The err argument reports an error related to path, signaling that WalkDir - * will not walk into that directory. The function can decide how to handle - * that error; as described earlier, returning the error will cause WalkDir to - * stop walking the entire tree. - * - * WalkDir calls the function with a non-nil err argument in two cases. - * - * First, if the initial fs.Stat on the root directory fails, WalkDir calls - * the function with path set to root, d set to nil, and err set to the error - * from fs.Stat. - * - * Second, if a directory's ReadDir method fails, WalkDir calls the function - * with path set to the directory's path, d set to an fs.DirEntry describing - * the directory, and err set to the error from ReadDir. In this second case, - * the function is called twice with the path of the directory: the first call - * is before the directory read is attempted and has err set to nil, giving - * the function a chance to return SkipDir and avoid the ReadDir entirely. The - * second call is after a failed ReadDir and reports the error from ReadDir. - * (If ReadDir succeeds, there is no second call.) - * - * The differences between WalkDirFunc compared to filepath.WalkFunc are: - * - * - The second argument has type fs.DirEntry instead of fs.FileInfo. - * - The function is called before reading a directory, to allow SkipDir - * to bypass the directory read entirely. - * - If a directory read fails, the function is called a second time - * for that directory to report the error. - */ - interface WalkDirFunc { - (path: string, d: DirEntry, err: Error): void - } -} - -/** - * Package bytes implements functions for the manipulation of byte slices. It is - * analogous to the facilities of the strings package. - */ -namespace bytes { - /** - * A Reader implements the io.Reader, io.ReaderAt, io.WriterTo, io.Seeker, - * io.ByteScanner, and io.RuneScanner interfaces by reading from a byte slice. - * Unlike a Buffer, a Reader is read-only and supports seeking. The zero value - * for Reader operates like a Reader of an empty slice. - */ - interface Reader {} - interface Reader { - /** Len returns the number of bytes of the unread portion of the slice. */ - len(): number - } - interface Reader { - /** - * Size returns the original length of the underlying byte slice. Size is - * the number of bytes available for reading via ReadAt. The returned value - * is always the same and is not affected by calls to any other method. - */ - size(): number - } - interface Reader { - /** Read implements the io.Reader interface. */ - read(b: string): number - } - interface Reader { - /** ReadAt implements the io.ReaderAt interface. */ - readAt(b: string, off: number): number - } - interface Reader { - /** ReadByte implements the io.ByteReader interface. */ - readByte(): string - } - interface Reader { - /** - * UnreadByte complements ReadByte in implementing the io.ByteScanner - * interface. - */ - unreadByte(): void - } - interface Reader { - /** ReadRune implements the io.RuneReader interface. */ - readRune(): [string, number] - } - interface Reader { - /** - * UnreadRune complements ReadRune in implementing the io.RuneScanner - * interface. - */ - unreadRune(): void - } - interface Reader { - /** Seek implements the io.Seeker interface. */ - seek(offset: number, whence: number): number - } - interface Reader { - /** WriteTo implements the io.WriterTo interface. */ - writeTo(w: io.Writer): number - } - interface Reader { - /** Reset resets the Reader to be reading from b. */ - reset(b: string): void - } -} - /** * Package context defines the Context type, which carries deadlines, * cancellation signals, and other request-scoped values across API boundaries * and between processes. * - * Incoming requests to a server should create a Context, and outgoing calls to - * servers should accept a Context. The chain of function calls between them + * Incoming requests to a server should create a [Context], and outgoing calls + * to servers should accept a Context. The chain of function calls between them * must propagate the Context, optionally replacing it with a derived Context - * created using WithCancel, WithDeadline, WithTimeout, or WithValue. When a - * Context is canceled, all Contexts derived from it are also canceled. + * created using [WithCancel], [WithDeadline], [WithTimeout], or [WithValue]. + * When a Context is canceled, all Contexts derived from it are also canceled. * - * The WithCancel, WithDeadline, and WithTimeout functions take a Context (the - * parent) and return a derived Context (the child) and a CancelFunc. Calling - * the CancelFunc cancels the child and its children, removes the parent's - * reference to the child, and stops any associated timers. Failing to call the - * CancelFunc leaks the child and its children until the parent is canceled or - * the timer fires. The go vet tool checks that CancelFuncs are used on all - * control-flow paths. + * The [WithCancel], [WithDeadline], and [WithTimeout] functions take a Context + * (the parent) and return a derived Context (the child) and a [CancelFunc]. + * Calling the CancelFunc cancels the child and its children, removes the + * parent's reference to the child, and stops any associated timers. Failing to + * call the CancelFunc leaks the child and its children until the parent is + * canceled or the timer fires. The go vet tool checks that CancelFuncs are used + * on all control-flow paths. + * + * The [WithCancelCause] function returns a [CancelCauseFunc], which takes an + * error and records it as the cancellation cause. Calling [Cause] on the + * canceled context or any of its children retrieves the cause. If no cause is + * specified, Cause(ctx) returns the same value as ctx.Err(). * * Programs that use Contexts should follow these rules to keep interfaces * consistent across packages and enable static analysis tools to check context @@ -7480,8 +7668,8 @@ namespace bytes { * // ... use ctx ... * } * - * Do not pass a nil Context, even if a function permits it. Pass context.TODO - * if you are unsure about which Context to use. + * Do not pass a nil [Context], even if a function permits it. Pass + * [context.TODO] if you are unsure about which Context to use. * * Use context Values only for request-scoped data that transits processes and * APIs, not for passing optional parameters to functions. @@ -7500,6 +7688,7 @@ namespace context { * Context's methods may be called by multiple goroutines simultaneously. */ interface Context { + [key: string]: any /** * Deadline returns the time when work done on behalf of this context should * be canceled. Deadline returns ok==false when no deadline is set. @@ -7586,1890 +7775,181 @@ namespace context { } /** - * Package exec runs external commands. It wraps os.StartProcess to make it - * easier to remap stdin and stdout, connect I/O with pipes, and do other - * adjustments. - * - * Unlike the "system" library call from C and other languages, the os/exec - * package intentionally does not invoke the system shell and does not expand - * any glob patterns or handle other expansions, pipelines, or redirections - * typically done by shells. The package behaves more like C's "exec" family of - * functions. To expand glob patterns, either call the shell directly, taking - * care to escape any dangerous input, or use the path/filepath package's Glob - * function. To expand environment variables, use package os's ExpandEnv. - * - * Note that the examples in this package assume a Unix system. They may not run - * on Windows, and they do not run in the Go Playground used by golang.org and - * godoc.org. + * Package fs defines basic interfaces to a file system. A file system can be + * provided by the host operating system but also by other packages. */ -namespace exec { +namespace fs { /** - * Cmd represents an external command being prepared or run. + * An FS provides access to a hierarchical file system. * - * A Cmd cannot be reused after calling its Run, Output or CombinedOutput - * methods. + * The FS interface is the minimum implementation required of the file system. + * A file system may implement additional interfaces, such as ReadFileFS, to + * provide additional or optimized functionality. */ - interface Cmd { + interface FS { + [key: string]: any /** - * Path is the path of the command to run. + * Open opens the named file. * - * This is the only field that must be set to a non-zero value. If Path is - * relative, it is evaluated relative to Dir. + * When Open returns an error, it should be of type *PathError with the Op + * field set to "open", the Path field set to name, and the Err field + * describing the problem. + * + * Open should reject attempts to open names that do not satisfy + * ValidPath(name), returning a *PathError with Err set to ErrInvalid or + * ErrNotExist. */ - path: string - /** - * Args holds command line arguments, including the command as Args[0]. If - * the Args field is empty or nil, Run uses {Path}. - * - * In typical use, both Path and Args are set by calling Command. - */ - args: Array - /** - * Env specifies the environment of the process. Each entry is of the form - * "key=value". If Env is nil, the new process uses the current process's - * environment. If Env contains duplicate environment keys, only the last - * value in the slice for each duplicate key is used. As a special case on - * Windows, SYSTEMROOT is always added if missing and not explicitly set to - * the empty string. - */ - env: Array - /** - * Dir specifies the working directory of the command. If Dir is the empty - * string, Run runs the command in the calling process's current directory. - */ - dir: string - /** - * Stdin specifies the process's standard input. - * - * If Stdin is nil, the process reads from the null device (os.DevNull). - * - * If Stdin is an *os.File, the process's standard input is connected - * directly to that file. - * - * Otherwise, during the execution of the command a separate goroutine reads - * from Stdin and delivers that data to the command over a pipe. In this - * case, Wait does not complete until the goroutine stops copying, either - * because it has reached the end of Stdin (EOF or a read error) or because - * writing to the pipe returned an error. - */ - stdin: io.Reader - /** - * Stdout and Stderr specify the process's standard output and error. - * - * If either is nil, Run connects the corresponding file descriptor to the - * null device (os.DevNull). - * - * If either is an *os.File, the corresponding output from the process is - * connected directly to that file. - * - * Otherwise, during the execution of the command a separate goroutine reads - * from the process over a pipe and delivers that data to the corresponding - * Writer. In this case, Wait does not complete until the goroutine reaches - * EOF or encounters an error. - * - * If Stdout and Stderr are the same writer, and have a type that can be - * compared with ==, at most one goroutine at a time will call Write. - */ - stdout: io.Writer - stderr: io.Writer - /** - * ExtraFiles specifies additional open files to be inherited by the new - * process. It does not include standard input, standard output, or standard - * error. If non-nil, entry i becomes file descriptor 3+i. - * - * ExtraFiles is not supported on Windows. - */ - extraFiles: Array - /** - * SysProcAttr holds optional, operating system-specific attributes. Run - * passes it to os.StartProcess as the os.ProcAttr's Sys field. - */ - sysProcAttr?: syscall.SysProcAttr - /** Process is the underlying process, once started. */ - process?: os.Process - /** - * ProcessState contains information about an exited process, available - * after a call to Wait or Run. - */ - processState?: os.ProcessState + open(name: string): File } - interface Cmd { + /** + * A File provides access to a single file. The File interface is the minimum + * implementation required of the file. Directory files should also implement + * ReadDirFile. A file may implement io.ReaderAt or io.Seeker as + * optimizations. + */ + interface File { + [key: string]: any + stat(): FileInfo + read(_arg0: string | Array): number + close(): void + } + /** + * A DirEntry is an entry read from a directory (using the ReadDir function or + * a ReadDirFile's ReadDir method). + */ + interface DirEntry { + [key: string]: any /** - * String returns a human-readable description of c. It is intended only for - * debugging. In particular, it is not suitable for use as input to a shell. - * The output of String may vary across Go releases. + * Name returns the name of the file (or subdirectory) described by the + * entry. This name is only the final element of the path (the base name), + * not the entire path. For example, Name would return "hello.go" not + * "home/gopher/hello.go". */ + name(): string + /** IsDir reports whether the entry describes a directory. */ + isDir(): boolean + /** + * Type returns the type bits for the entry. The type bits are a subset of + * the usual FileMode bits, those returned by the FileMode.Type method. + */ + type(): FileMode + /** + * Info returns the FileInfo for the file or subdirectory described by the + * entry. The returned FileInfo may be from the time of the original + * directory read or from the time of the call to Info. If the file has been + * removed or renamed since the directory read, Info may return an error + * satisfying errors.Is(err, ErrNotExist). If the entry denotes a symbolic + * link, Info reports the information about the link itself, not the link's + * target. + */ + info(): FileInfo + } + /** A FileInfo describes a file and is returned by Stat. */ + interface FileInfo { + [key: string]: any + name(): string // base name of the file + size(): number // length in bytes for regular files; system-dependent for others + mode(): FileMode // file mode bits + modTime(): time.Time // modification time + isDir(): boolean // abbreviation for Mode().IsDir() + sys(): any // underlying data source (can return nil) + } + /** + * A FileMode represents a file's mode and permission bits. The bits have the + * same definition on all systems, so that information about files can be + * moved from one system to another portably. Not all bits apply to all + * systems. The only required bit is ModeDir for directories. + */ + interface FileMode extends Number {} + interface FileMode { string(): string } - interface Cmd { + interface FileMode { /** - * Run starts the specified command and waits for it to complete. - * - * The returned error is nil if the command runs, has no problems copying - * stdin, stdout, and stderr, and exits with a zero exit status. - * - * If the command starts but does not complete successfully, the error is of - * type *ExitError. Other error types may be returned for other situations. - * - * If the calling goroutine has locked the operating system thread with - * runtime.LockOSThread and modified any inheritable OS-level thread state - * (for example, Linux or Plan 9 name spaces), the new process will inherit - * the caller's thread state. + * IsDir reports whether m describes a directory. That is, it tests for the + * ModeDir bit being set in m. */ - run(): void + isDir(): boolean } - interface Cmd { + interface FileMode { /** - * Start starts the specified command but does not wait for it to complete. - * - * If Start returns successfully, the c.Process field will be set. - * - * The Wait method will return the exit code and release associated - * resources once the command exits. + * IsRegular reports whether m describes a regular file. That is, it tests + * that no mode type bits are set. */ - start(): void + isRegular(): boolean } - interface Cmd { - /** - * Wait waits for the command to exit and waits for any copying to stdin or - * copying from stdout or stderr to complete. - * - * The command must have been started by Start. - * - * The returned error is nil if the command runs, has no problems copying - * stdin, stdout, and stderr, and exits with a zero exit status. - * - * If the command fails to run or doesn't complete successfully, the error - * is of type *ExitError. Other error types may be returned for I/O - * problems. - * - * If any of c.Stdin, c.Stdout or c.Stderr are not an *os.File, Wait also - * waits for the respective I/O loop copying to or from the process to - * complete. - * - * Wait releases any resources associated with the Cmd. - */ - wait(): void + interface FileMode { + /** Perm returns the Unix permission bits in m (m & ModePerm). */ + perm(): FileMode } - interface Cmd { - /** - * Output runs the command and returns its standard output. Any returned - * error will usually be of type *ExitError. If c.Stderr was nil, Output - * populates ExitError.Stderr. - */ - output(): string + interface FileMode { + /** Type returns type bits in m (m & ModeType). */ + type(): FileMode } - interface Cmd { - /** - * CombinedOutput runs the command and returns its combined standard output - * and standard error. - */ - combinedOutput(): string + /** PathError records an error and the operation and file path that caused it. */ + interface PathError { + op: string + path: string + err: Error } - interface Cmd { - /** - * StdinPipe returns a pipe that will be connected to the command's standard - * input when the command starts. The pipe will be closed automatically - * after Wait sees the command exit. A caller need only call Close to force - * the pipe to close sooner. For example, if the command being run will not - * exit until standard input is closed, the caller must close the pipe. - */ - stdinPipe(): io.WriteCloser + interface PathError { + error(): string } - interface Cmd { - /** - * StdoutPipe returns a pipe that will be connected to the command's - * standard output when the command starts. - * - * Wait will close the pipe after seeing the command exit, so most callers - * need not close the pipe themselves. It is thus incorrect to call Wait - * before all reads from the pipe have completed. For the same reason, it is - * incorrect to call Run when using StdoutPipe. See the example for - * idiomatic usage. - */ - stdoutPipe(): io.ReadCloser + interface PathError { + unwrap(): void } - interface Cmd { - /** - * StderrPipe returns a pipe that will be connected to the command's - * standard error when the command starts. - * - * Wait will close the pipe after seeing the command exit, so most callers - * need not close the pipe themselves. It is thus incorrect to call Wait - * before all reads from the pipe have completed. For the same reason, it is - * incorrect to use Run when using StderrPipe. See the StdoutPipe example - * for idiomatic usage. - */ - stderrPipe(): io.ReadCloser - } -} - -/** - * Package sql provides a generic interface around SQL (or SQL-like) databases. - * - * The sql package must be used in conjunction with a database driver. See - * https://golang.org/s/sqldrivers for a list of drivers. - * - * Drivers that do not support context cancellation will not return until after - * the query is completed. - * - * For usage examples, see the wiki page at https://golang.org/s/sqlwiki. - */ -namespace sql { - /** TxOptions holds the transaction options to be used in DB.BeginTx. */ - interface TxOptions { - /** - * Isolation is the transaction isolation level. If zero, the driver or - * database's default level is used. - */ - isolation: IsolationLevel - readOnly: boolean + interface PathError { + /** Timeout reports whether this error represents a timeout. */ + timeout(): boolean } /** - * DB is a database handle representing a pool of zero or more underlying - * connections. It's safe for concurrent use by multiple goroutines. + * WalkDirFunc is the type of the function called by WalkDir to visit each + * file or directory. * - * The sql package creates and frees connections automatically; it also - * maintains a free pool of idle connections. If the database has a concept of - * per-connection state, such state can be reliably observed within a - * transaction (Tx) or connection (Conn). Once DB.Begin is called, the - * returned Tx is bound to a single connection. Once Commit or Rollback is - * called on the transaction, that transaction's connection is returned to - * DB's idle connection pool. The pool size can be controlled with - * SetMaxIdleConns. - */ - interface DB {} - interface DB { - /** - * PingContext verifies a connection to the database is still alive, - * establishing a connection if necessary. - */ - pingContext(ctx: context.Context): void - } - interface DB { - /** - * Ping verifies a connection to the database is still alive, establishing a - * connection if necessary. - * - * Ping uses context.Background internally; to specify the context, use - * PingContext. - */ - ping(): void - } - interface DB { - /** - * Close closes the database and prevents new queries from starting. Close - * then waits for all queries that have started processing on the server to - * finish. - * - * It is rare to Close a DB, as the DB handle is meant to be long-lived and - * shared between many goroutines. - */ - close(): void - } - interface DB { - /** - * SetMaxIdleConns sets the maximum number of connections in the idle - * connection pool. - * - * If MaxOpenConns is greater than 0 but less than the new MaxIdleConns, - * then the new MaxIdleConns will be reduced to match the MaxOpenConns - * limit. - * - * If n <= 0, no idle connections are retained. - * - * The default max idle connections is currently 2. This may change in a - * future release. - */ - setMaxIdleConns(n: number): void - } - interface DB { - /** - * SetMaxOpenConns sets the maximum number of open connections to the - * database. - * - * If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than - * MaxIdleConns, then MaxIdleConns will be reduced to match the new - * MaxOpenConns limit. - * - * If n <= 0, then there is no limit on the number of open connections. The - * default is 0 (unlimited). - */ - setMaxOpenConns(n: number): void - } - interface DB { - /** - * SetConnMaxLifetime sets the maximum amount of time a connection may be - * reused. - * - * Expired connections may be closed lazily before reuse. - * - * If d <= 0, connections are not closed due to a connection's age. - */ - setConnMaxLifetime(d: time.Duration): void - } - interface DB { - /** - * SetConnMaxIdleTime sets the maximum amount of time a connection may be - * idle. - * - * Expired connections may be closed lazily before reuse. - * - * If d <= 0, connections are not closed due to a connection's idle time. - */ - setConnMaxIdleTime(d: time.Duration): void - } - interface DB { - /** Stats returns database statistics. */ - stats(): DBStats - } - interface DB { - /** - * PrepareContext creates a prepared statement for later queries or - * executions. Multiple queries or executions may be run concurrently from - * the returned statement. The caller must call the statement's Close method - * when the statement is no longer needed. - * - * The provided context is used for the preparation of the statement, not - * for the execution of the statement. - */ - prepareContext(ctx: context.Context, query: string): Stmt | undefined - } - interface DB { - /** - * Prepare creates a prepared statement for later queries or executions. - * Multiple queries or executions may be run concurrently from the returned - * statement. The caller must call the statement's Close method when the - * statement is no longer needed. - * - * Prepare uses context.Background internally; to specify the context, use - * PrepareContext. - */ - prepare(query: string): Stmt | undefined - } - interface DB { - /** - * ExecContext executes a query without returning any rows. The args are for - * any placeholder parameters in the query. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface DB { - /** - * Exec executes a query without returning any rows. The args are for any - * placeholder parameters in the query. - * - * Exec uses context.Background internally; to specify the context, use - * ExecContext. - */ - exec(query: string, ...args: any[]): Result - } - interface DB { - /** - * QueryContext executes a query that returns rows, typically a SELECT. The - * args are for any placeholder parameters in the query. - */ - queryContext( - ctx: context.Context, - query: string, - ...args: any[] - ): Rows | undefined - } - interface DB { - /** - * Query executes a query that returns rows, typically a SELECT. The args - * are for any placeholder parameters in the query. - * - * Query uses context.Background internally; to specify the context, use - * QueryContext. - */ - query(query: string, ...args: any[]): Rows | undefined - } - interface DB { - /** - * QueryRowContext executes a query that is expected to return at most one - * row. QueryRowContext always returns a non-nil value. Errors are deferred - * until Row's Scan method is called. If the query selects no rows, the - * *Row's Scan will return ErrNoRows. Otherwise, the *Row's Scan scans the - * first selected row and discards the rest. - */ - queryRowContext( - ctx: context.Context, - query: string, - ...args: any[] - ): Row | undefined - } - interface DB { - /** - * QueryRow executes a query that is expected to return at most one row. - * QueryRow always returns a non-nil value. Errors are deferred until Row's - * Scan method is called. If the query selects no rows, the *Row's Scan will - * return ErrNoRows. Otherwise, the *Row's Scan scans the first selected row - * and discards the rest. - * - * QueryRow uses context.Background internally; to specify the context, use - * QueryRowContext. - */ - queryRow(query: string, ...args: any[]): Row | undefined - } - interface DB { - /** - * BeginTx starts a transaction. - * - * The provided context is used until the transaction is committed or rolled - * back. If the context is canceled, the sql package will roll back the - * transaction. Tx.Commit will return an error if the context provided to - * BeginTx is canceled. - * - * The provided TxOptions is optional and may be nil if defaults should be - * used. If a non-default isolation level is used that the driver doesn't - * support, an error will be returned. - */ - beginTx(ctx: context.Context, opts: TxOptions): Tx | undefined - } - interface DB { - /** - * Begin starts a transaction. The default isolation level is dependent on - * the driver. - * - * Begin uses context.Background internally; to specify the context, use - * BeginTx. - */ - begin(): Tx | undefined - } - interface DB { - /** Driver returns the database's underlying driver. */ - driver(): any - } - interface DB { - /** - * Conn returns a single connection by either opening a new connection or - * returning an existing connection from the connection pool. Conn will - * block until either a connection is returned or ctx is canceled. Queries - * run on the same Conn will be run in the same database session. - * - * Every Conn must be returned to the database pool after use by calling - * Conn.Close. - */ - conn(ctx: context.Context): Conn | undefined - } - /** - * Tx is an in-progress database transaction. + * The path argument contains the argument to WalkDir as a prefix. That is, if + * WalkDir is called with root argument "dir" and finds a file named "a" in + * that directory, the walk function will be called with argument "dir/a". * - * A transaction must end with a call to Commit or Rollback. + * The d argument is the fs.DirEntry for the named path. * - * After a call to Commit or Rollback, all operations on the transaction fail - * with ErrTxDone. + * The error result returned by the function controls how WalkDir continues. + * If the function returns the special value SkipDir, WalkDir skips the + * current directory (path if d.IsDir() is true, otherwise path's parent + * directory). If the function returns the special value SkipAll, WalkDir + * skips all remaining files and directories. Otherwise, if the function + * returns a non-nil error, WalkDir stops entirely and returns that error. * - * The statements prepared for a transaction by calling the transaction's - * Prepare or Stmt methods are closed by the call to Commit or Rollback. - */ - interface Tx {} - interface Tx { - /** Commit commits the transaction. */ - commit(): void - } - interface Tx { - /** Rollback aborts the transaction. */ - rollback(): void - } - interface Tx { - /** - * PrepareContext creates a prepared statement for use within a transaction. - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - * - * To use an existing prepared statement on this transaction, see Tx.Stmt. - * - * The provided context will be used for the preparation of the context, not - * for the execution of the returned statement. The returned statement will - * run in the transaction context. - */ - prepareContext(ctx: context.Context, query: string): Stmt | undefined - } - interface Tx { - /** - * Prepare creates a prepared statement for use within a transaction. - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - * - * To use an existing prepared statement on this transaction, see Tx.Stmt. - * - * Prepare uses context.Background internally; to specify the context, use - * PrepareContext. - */ - prepare(query: string): Stmt | undefined - } - interface Tx { - /** - * StmtContext returns a transaction-specific prepared statement from an - * existing statement. - * - * Example: updateMoney, err := db.Prepare("UPDATE balance SET money=money+? - * WHERE id=?") ... tx, err := db.Begin() ... res, err := - * tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203) - * - * The provided context is used for the preparation of the statement, not - * for the execution of the statement. - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - */ - stmtContext(ctx: context.Context, stmt: Stmt): Stmt | undefined - } - interface Tx { - /** - * Stmt returns a transaction-specific prepared statement from an existing - * statement. - * - * Example: updateMoney, err := db.Prepare("UPDATE balance SET money=money+? - * WHERE id=?") ... tx, err := db.Begin() ... res, err := - * tx.Stmt(updateMoney).Exec(123.45, 98293203) - * - * The returned statement operates within the transaction and will be closed - * when the transaction has been committed or rolled back. - * - * Stmt uses context.Background internally; to specify the context, use - * StmtContext. - */ - stmt(stmt: Stmt): Stmt | undefined - } - interface Tx { - /** - * ExecContext executes a query that doesn't return rows. For example: an - * INSERT and UPDATE. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface Tx { - /** - * Exec executes a query that doesn't return rows. For example: an INSERT - * and UPDATE. - * - * Exec uses context.Background internally; to specify the context, use - * ExecContext. - */ - exec(query: string, ...args: any[]): Result - } - interface Tx { - /** QueryContext executes a query that returns rows, typically a SELECT. */ - queryContext( - ctx: context.Context, - query: string, - ...args: any[] - ): Rows | undefined - } - interface Tx { - /** - * Query executes a query that returns rows, typically a SELECT. - * - * Query uses context.Background internally; to specify the context, use - * QueryContext. - */ - query(query: string, ...args: any[]): Rows | undefined - } - interface Tx { - /** - * QueryRowContext executes a query that is expected to return at most one - * row. QueryRowContext always returns a non-nil value. Errors are deferred - * until Row's Scan method is called. If the query selects no rows, the - * *Row's Scan will return ErrNoRows. Otherwise, the *Row's Scan scans the - * first selected row and discards the rest. - */ - queryRowContext( - ctx: context.Context, - query: string, - ...args: any[] - ): Row | undefined - } - interface Tx { - /** - * QueryRow executes a query that is expected to return at most one row. - * QueryRow always returns a non-nil value. Errors are deferred until Row's - * Scan method is called. If the query selects no rows, the *Row's Scan will - * return ErrNoRows. Otherwise, the *Row's Scan scans the first selected row - * and discards the rest. - * - * QueryRow uses context.Background internally; to specify the context, use - * QueryRowContext. - */ - queryRow(query: string, ...args: any[]): Row | undefined - } - /** - * Stmt is a prepared statement. A Stmt is safe for concurrent use by multiple - * goroutines. + * The err argument reports an error related to path, signaling that WalkDir + * will not walk into that directory. The function can decide how to handle + * that error; as described earlier, returning the error will cause WalkDir to + * stop walking the entire tree. * - * If a Stmt is prepared on a Tx or Conn, it will be bound to a single - * underlying connection forever. If the Tx or Conn closes, the Stmt will - * become unusable and all operations will return an error. If a Stmt is - * prepared on a DB, it will remain usable for the lifetime of the DB. When - * the Stmt needs to execute on a new underlying connection, it will prepare - * itself on the new connection automatically. + * WalkDir calls the function with a non-nil err argument in two cases. + * + * First, if the initial fs.Stat on the root directory fails, WalkDir calls + * the function with path set to root, d set to nil, and err set to the error + * from fs.Stat. + * + * Second, if a directory's ReadDir method fails, WalkDir calls the function + * with path set to the directory's path, d set to an fs.DirEntry describing + * the directory, and err set to the error from ReadDir. In this second case, + * the function is called twice with the path of the directory: the first call + * is before the directory read is attempted and has err set to nil, giving + * the function a chance to return SkipDir or SkipAll and avoid the ReadDir + * entirely. The second call is after a failed ReadDir and reports the error + * from ReadDir. (If ReadDir succeeds, there is no second call.) + * + * The differences between WalkDirFunc compared to filepath.WalkFunc are: + * + * - The second argument has type fs.DirEntry instead of fs.FileInfo. + * - The function is called before reading a directory, to allow SkipDir + * or SkipAll to bypass the directory read entirely or skip all remaining + * files and directories respectively. + * - If a directory read fails, the function is called a second time + * for that directory to report the error. */ - interface Stmt {} - interface Stmt { - /** - * ExecContext executes a prepared statement with the given arguments and - * returns a Result summarizing the effect of the statement. - */ - execContext(ctx: context.Context, ...args: any[]): Result - } - interface Stmt { - /** - * Exec executes a prepared statement with the given arguments and returns a - * Result summarizing the effect of the statement. - * - * Exec uses context.Background internally; to specify the context, use - * ExecContext. - */ - exec(...args: any[]): Result - } - interface Stmt { - /** - * QueryContext executes a prepared query statement with the given arguments - * and returns the query results as a *Rows. - */ - queryContext(ctx: context.Context, ...args: any[]): Rows | undefined - } - interface Stmt { - /** - * Query executes a prepared query statement with the given arguments and - * returns the query results as a *Rows. - * - * Query uses context.Background internally; to specify the context, use - * QueryContext. - */ - query(...args: any[]): Rows | undefined - } - interface Stmt { - /** - * QueryRowContext executes a prepared query statement with the given - * arguments. If an error occurs during the execution of the statement, that - * error will be returned by a call to Scan on the returned *Row, which is - * always non-nil. If the query selects no rows, the *Row's Scan will return - * ErrNoRows. Otherwise, the *Row's Scan scans the first selected row and - * discards the rest. - */ - queryRowContext(ctx: context.Context, ...args: any[]): Row | undefined - } - interface Stmt { - /** - * QueryRow executes a prepared query statement with the given arguments. If - * an error occurs during the execution of the statement, that error will be - * returned by a call to Scan on the returned *Row, which is always non-nil. - * If the query selects no rows, the *Row's Scan will return ErrNoRows. - * Otherwise, the *Row's Scan scans the first selected row and discards the - * rest. - * - * Example usage: - * - * Var name string err := nameByUseridStmt.QueryRow(id).Scan(&name) - * - * QueryRow uses context.Background internally; to specify the context, use - * QueryRowContext. - */ - queryRow(...args: any[]): Row | undefined - } - interface Stmt { - /** Close closes the statement. */ - close(): void - } - /** - * Rows is the result of a query. Its cursor starts before the first row of - * the result set. Use Next to advance from row to row. - */ - interface Rows {} - interface Rows { - /** - * Next prepares the next result row for reading with the Scan method. It - * returns true on success, or false if there is no next result row or an - * error happened while preparing it. Err should be consulted to distinguish - * between the two cases. - * - * Every call to Scan, even the first one, must be preceded by a call to - * Next. - */ - next(): boolean - } - interface Rows { - /** - * NextResultSet prepares the next result set for reading. It reports - * whether there is further result sets, or false if there is no further - * result set or if there is an error advancing to it. The Err method should - * be consulted to distinguish between the two cases. - * - * After calling NextResultSet, the Next method should always be called - * before scanning. If there are further result sets they may not have rows - * in the result set. - */ - nextResultSet(): boolean - } - interface Rows { - /** - * Err returns the error, if any, that was encountered during iteration. Err - * may be called after an explicit or implicit Close. - */ - err(): void - } - interface Rows { - /** - * Columns returns the column names. Columns returns an error if the rows - * are closed. - */ - columns(): Array - } - interface Rows { - /** - * ColumnTypes returns column information such as column type, length, and - * nullable. Some information may not be available from some drivers. - */ - columnTypes(): Array - } - interface Rows { - /** - * Scan copies the columns in the current row into the values pointed at by - * dest. The number of values in dest must be the same as the number of - * columns in Rows. - * - * Scan converts columns read from the database into the following common Go - * types and special types provided by the sql package: - * - * *string - * *[]byte - * *int, *int8, *int16, *int32, *int64 - * *uint, *uint8, *uint16, *uint32, *uint64 - * *bool - * *float32, *float64 - * *interface{} - * *RawBytes - * *Rows (cursor value) - * any type implementing Scanner (see Scanner docs) - * - * In the most simple case, if the type of the value from the source column - * is an integer, bool or string type T and dest is of type *T, Scan simply - * assigns the value through the pointer. - * - * Scan also converts between string and numeric types, as long as no - * information would be lost. While Scan stringifies all numbers scanned - * from numeric database columns into *string, scans into numeric types are - * checked for overflow. For example, a float64 with value 300 or a string - * with value "300" can scan into a uint16, but not into a uint8, though - * float64(255) or "255" can scan into a uint8. One exception is that scans - * of some float64 numbers to strings may lose information when - * stringifying. In general, scan floating point columns into *float64. - * - * If a dest argument has type *[]byte, Scan saves in that argument a copy - * of the corresponding data. The copy is owned by the caller and can be - * modified and held indefinitely. The copy can be avoided by using an - * argument of type *RawBytes instead; see the documentation for RawBytes - * for restrictions on its use. - * - * If an argument has type *interface{}, Scan copies the value provided by - * the underlying driver without conversion. When scanning from a source - * value of type []byte to *interface{}, a copy of the slice is made and the - * caller owns the result. - * - * Source values of type time.Time may be scanned into values of type - * *time.Time, *interface{}, *string, or *[]byte. When converting to the - * latter two, time.RFC3339Nano is used. - * - * Source values of type bool may be scanned into types *bool, *interface{}, - * *string, *[]byte, or *RawBytes. - * - * For scanning into *bool, the source may be true, false, 1, 0, or string - * inputs parseable by strconv.ParseBool. - * - * Scan can also convert a cursor returned from a query, such as "select - * cursor(select * from my_table) from dual", into a *Rows value that can - * itself be scanned from. The parent select query will close any cursor - * *Rows if the parent *Rows is closed. - * - * If any of the first arguments implementing Scanner returns an error, that - * error will be wrapped in the returned error - */ - scan(...dest: any[]): void - } - interface Rows { - /** - * Close closes the Rows, preventing further enumeration. If Next is called - * and returns false and there are no further result sets, the Rows are - * closed automatically and it will suffice to check the result of Err. - * Close is idempotent and does not affect the result of Err. - */ - close(): void - } - /** A Result summarizes an executed SQL command. */ - interface Result { - /** - * LastInsertId returns the integer generated by the database in response to - * a command. Typically this will be from an "auto increment" column when - * inserting a new row. Not all databases support this feature, and the - * syntax of such statements varies. - */ - lastInsertId(): number - /** - * RowsAffected returns the number of rows affected by an update, insert, or - * delete. Not every database or database driver may support this. - */ - rowsAffected(): number - } -} - -namespace migrate { - /** MigrationsList defines a list with migration definitions */ - interface MigrationsList {} - interface MigrationsList { - /** Item returns a single migration from the list by its index. */ - item(index: number): Migration | undefined - } - interface MigrationsList { - /** Items returns the internal migrations list slice. */ - items(): Array - } - interface MigrationsList { - /** - * Register adds new migration definition to the list. - * - * If `optFilename` is not provided, it will try to get the name from its - * .go file. - * - * The list will be sorted automatically based on the migrations file name. - */ - register( - up: (db: dbx.Builder) => void, - down: (db: dbx.Builder) => void, - ...optFilename: string[] - ): void - } -} - -/** - * Package cobra is a commander providing a simple interface to create powerful - * modern CLI interfaces. In addition to providing an interface, Cobra - * simultaneously provides a controller to organize your application code. - */ -namespace cobra { - interface Command { - /** - * GenBashCompletion generates bash completion file and writes to the passed - * writer. - */ - genBashCompletion(w: io.Writer): void - } - interface Command { - /** GenBashCompletionFile generates bash completion file. */ - genBashCompletionFile(filename: string): void - } - interface Command { - /** GenBashCompletionFileV2 generates Bash completion version 2. */ - genBashCompletionFileV2(filename: string, includeDesc: boolean): void - } - interface Command { - /** - * GenBashCompletionV2 generates Bash completion file version 2 and writes - * it to the passed writer. - */ - genBashCompletionV2(w: io.Writer, includeDesc: boolean): void - } - // @ts-ignore - import flag = pflag - /** - * Command is just that, a command for your application. E.g. 'go run ...' - - * 'run' is the command. Cobra requires you to define the usage and - * description as part of your command definition to ensure usability. - */ - interface Command { - /** - * Use is the one-line usage message. Recommended syntax is as follows: - * - * [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. - * ... indicates that you can specify multiple values for the previous argument. - * | indicates mutually exclusive information. You can use the argument to the left of the separator or the - * argument to the right of the separator. You cannot use both arguments in a single use of the command. - * { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are - * optional, they are enclosed in brackets ([ ]). - * - * Example: add [-F file | -D dir]... [-f format] profile - */ - use: string - /** - * Aliases is an array of aliases that can be used instead of the first word - * in Use. - */ - aliases: Array - /** - * SuggestFor is an array of command names for which this command will be - * suggested - similar to aliases but only suggests. - */ - suggestFor: Array - /** Short is the short description shown in the 'help' output. */ - short: string - /** - * The group id under which this subcommand is grouped in the 'help' output - * of its parent. - */ - groupID: string - /** Long is the long message shown in the 'help ' output. */ - long: string - /** Example is examples of how to use the command. */ - example: string - /** - * ValidArgs is list of all valid non-flag arguments that are accepted in - * shell completions - */ - validArgs: Array - /** - * ValidArgsFunction is an optional function that provides valid non-flag - * arguments for shell completion. It is a dynamic version of using - * ValidArgs. Only one of ValidArgs and ValidArgsFunction can be used for a - * command. - */ - validArgsFunction: ( - cmd: Command, - args: Array, - toComplete: string, - ) => [Array, ShellCompDirective] - /** Expected arguments */ - args: PositionalArgs - /** - * ArgAliases is List of aliases for ValidArgs. These are not suggested to - * the user in the shell completion, but accepted if entered manually. - */ - argAliases: Array - /** - * BashCompletionFunction is custom bash functions used by the legacy bash - * autocompletion generator. For portability with other shells, it is - * recommended to instead use ValidArgsFunction - */ - bashCompletionFunction: string - /** - * Deprecated defines, if this command is deprecated and should print this - * string when used. - */ - deprecated: string - /** - * Annotations are key/value pairs that can be used by applications to - * identify or group commands. - */ - annotations: _TygojaDict - /** - * Version defines the version for this command. If this value is non-empty - * and the command does not define a "version" flag, a "version" boolean - * flag will be added to the command and, if specified, will print content - * of the "Version" variable. A shorthand "v" flag will also be added if the - * command does not define one. - */ - version: string - /** - * The *Run functions are executed in the following order: - * - * * PersistentPreRun() - * * PreRun() - * * Run() - * * PostRun() - * * PersistentPostRun() - * - * All functions get the same args, the arguments after the command name. - * - * PersistentPreRun: children of this command will inherit and execute. - */ - persistentPreRun: (cmd: Command, args: Array) => void - /** PersistentPreRunE: PersistentPreRun but returns an error. */ - persistentPreRunE: (cmd: Command, args: Array) => void - /** PreRun: children of this command will not inherit. */ - preRun: (cmd: Command, args: Array) => void - /** PreRunE: PreRun but returns an error. */ - preRunE: (cmd: Command, args: Array) => void - /** - * Run: Typically the actual work function. Most commands will only - * implement this. - */ - run: (cmd: Command, args: Array) => void - /** RunE: Run but returns an error. */ - runE: (cmd: Command, args: Array) => void - /** PostRun: run after the Run command. */ - postRun: (cmd: Command, args: Array) => void - /** PostRunE: PostRun but returns an error. */ - postRunE: (cmd: Command, args: Array) => void - /** - * PersistentPostRun: children of this command will inherit and execute - * after PostRun. - */ - persistentPostRun: (cmd: Command, args: Array) => void - /** PersistentPostRunE: PersistentPostRun but returns an error. */ - persistentPostRunE: (cmd: Command, args: Array) => void - /** FParseErrWhitelist flag parse errors to be ignored */ - fParseErrWhitelist: FParseErrWhitelist - /** - * CompletionOptions is a set of options to control the handling of shell - * completion - */ - completionOptions: CompletionOptions - /** - * TraverseChildren parses flags on all parents before executing child - * command. - */ - traverseChildren: boolean - /** - * Hidden defines, if this command is hidden and should NOT show up in the - * list of available commands. - */ - hidden: boolean - /** SilenceErrors is an option to quiet errors down stream. */ - silenceErrors: boolean - /** SilenceUsage is an option to silence usage when an error occurs. */ - silenceUsage: boolean - /** - * DisableFlagParsing disables the flag parsing. If this is true all flags - * will be passed to the command as arguments. - */ - disableFlagParsing: boolean - /** - * DisableAutoGenTag defines, if gen tag ("Auto generated by - * spf13/cobra...") will be printed by generating docs for this command. - */ - disableAutoGenTag: boolean - /** - * DisableFlagsInUseLine will disable the addition of [flags] to the usage - * line of a command when printing help or generating docs - */ - disableFlagsInUseLine: boolean - /** - * DisableSuggestions disables the suggestions based on Levenshtein distance - * that go along with 'unknown command' messages. - */ - disableSuggestions: boolean - /** - * SuggestionsMinimumDistance defines minimum levenshtein distance to - * display suggestions. Must be > 0. - */ - suggestionsMinimumDistance: number - } - interface Command { - /** - * Context returns underlying command context. If command was executed with - * ExecuteContext or the context was set with SetContext, the previously set - * context will be returned. Otherwise, nil is returned. - * - * Notice that a call to Execute and ExecuteC will replace a nil context of - * a command with a context.Background, so a background context will be - * returned by Context after one of these functions has been called. - */ - context(): context.Context - } - interface Command { - /** - * SetContext sets context for the command. This context will be overwritten - * by Command.ExecuteContext or Command.ExecuteContextC. - */ - setContext(ctx: context.Context): void - } - interface Command { - /** - * SetArgs sets arguments for the command. It is set to os.Args[1:] by - * default, if desired, can be overridden particularly useful when testing. - */ - setArgs(a: Array): void - } - interface Command { - /** - * SetOutput sets the destination for usage and error messages. If output is - * nil, os.Stderr is used. Deprecated: Use SetOut and/or SetErr instead - */ - setOutput(output: io.Writer): void - } - interface Command { - /** - * SetOut sets the destination for usage messages. If newOut is nil, - * os.Stdout is used. - */ - setOut(newOut: io.Writer): void - } - interface Command { - /** - * SetErr sets the destination for error messages. If newErr is nil, - * os.Stderr is used. - */ - setErr(newErr: io.Writer): void - } - interface Command { - /** SetIn sets the source for input data If newIn is nil, os.Stdin is used. */ - setIn(newIn: io.Reader): void - } - interface Command { - /** SetUsageFunc sets usage function. Usage can be defined by application. */ - setUsageFunc(f: (_arg0: Command) => void): void - } - interface Command { - /** SetUsageTemplate sets usage template. Can be defined by Application. */ - setUsageTemplate(s: string): void - } - interface Command { - /** - * SetFlagErrorFunc sets a function to generate an error when flag parsing - * fails. - */ - setFlagErrorFunc(f: (_arg0: Command, _arg1: Error) => void): void - } - interface Command { - /** SetHelpFunc sets help function. Can be defined by Application. */ - setHelpFunc(f: (_arg0: Command, _arg1: Array) => void): void - } - interface Command { - /** SetHelpCommand sets help command. */ - setHelpCommand(cmd: Command): void - } - interface Command { - /** SetHelpCommandGroupID sets the group id of the help command. */ - setHelpCommandGroupID(groupID: string): void - } - interface Command { - /** SetCompletionCommandGroupID sets the group id of the completion command. */ - setCompletionCommandGroupID(groupID: string): void - } - interface Command { - /** - * SetHelpTemplate sets help template to be used. Application can use it to - * set custom template. - */ - setHelpTemplate(s: string): void - } - interface Command { - /** - * SetVersionTemplate sets version template to be used. Application can use - * it to set custom template. - */ - setVersionTemplate(s: string): void - } - interface Command { - /** - * SetGlobalNormalizationFunc sets a normalization function to all flag sets - * and also to child commands. The user should not have a cyclic dependency - * on commands. - */ - setGlobalNormalizationFunc(n: (f: any, name: string) => any): void - } - interface Command { - /** OutOrStdout returns output to stdout. */ - outOrStdout(): io.Writer - } - interface Command { - /** OutOrStderr returns output to stderr */ - outOrStderr(): io.Writer - } - interface Command { - /** ErrOrStderr returns output to stderr */ - errOrStderr(): io.Writer - } - interface Command { - /** InOrStdin returns input to stdin */ - inOrStdin(): io.Reader - } - interface Command { - /** - * UsageFunc returns either the function set by SetUsageFunc for this - * command or a parent, or it returns a default usage function. - */ - usageFunc(): (_arg0: Command) => void - } - interface Command { - /** - * Usage puts out the usage for the command. Used when a user provides - * invalid input. Can be defined by user by overriding UsageFunc. - */ - usage(): void - } - interface Command { - /** - * HelpFunc returns either the function set by SetHelpFunc for this command - * or a parent, or it returns a function with default help behavior. - */ - helpFunc(): (_arg0: Command, _arg1: Array) => void - } - interface Command { - /** - * Help puts out the help for the command. Used when a user calls help - * [command]. Can be defined by user by overriding HelpFunc. - */ - help(): void - } - interface Command { - /** UsageString returns usage string. */ - usageString(): string - } - interface Command { - /** - * FlagErrorFunc returns either the function set by SetFlagErrorFunc for - * this command or a parent, or it returns a function which returns the - * original error. - */ - flagErrorFunc(): (_arg0: Command, _arg1: Error) => void - } - interface Command { - /** UsagePadding return padding for the usage. */ - usagePadding(): number - } - interface Command { - /** CommandPathPadding return padding for the command path. */ - commandPathPadding(): number - } - interface Command { - /** NamePadding returns padding for the name. */ - namePadding(): number - } - interface Command { - /** UsageTemplate returns usage template for the command. */ - usageTemplate(): string - } - interface Command { - /** HelpTemplate return help template for the command. */ - helpTemplate(): string - } - interface Command { - /** VersionTemplate return version template for the command. */ - versionTemplate(): string - } - interface Command { - /** - * Find the target command given the args and command tree Meant to be run - * on the highest node. Only searches down. - */ - find(args: Array): [Command | undefined, Array] - } - interface Command { - /** - * Traverse the command tree to find the command, and parse args for each - * parent. - */ - traverse(args: Array): [Command | undefined, Array] - } - interface Command { - /** SuggestionsFor provides suggestions for the typedName. */ - suggestionsFor(typedName: string): Array - } - interface Command { - /** - * VisitParents visits all parents of the command and invokes fn on each - * parent. - */ - visitParents(fn: (_arg0: Command) => void): void - } - interface Command { - /** Root finds root command. */ - root(): Command | undefined - } - interface Command { - /** - * ArgsLenAtDash will return the length of c.Flags().Args at the moment when - * a -- was found during args parsing. - */ - argsLenAtDash(): number - } - interface Command { - /** - * ExecuteContext is the same as Execute(), but sets the ctx on the command. - * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or - * ValidArgs functions. - */ - executeContext(ctx: context.Context): void - } - interface Command { - /** - * Execute uses the args (os.Args[1:] by default) and run through the - * command tree finding appropriate matches for commands and then - * corresponding flags. - */ - execute(): void - } - interface Command { - /** - * ExecuteContextC is the same as ExecuteC(), but sets the ctx on the - * command. Retrieve ctx by calling cmd.Context() inside your *Run lifecycle - * or ValidArgs functions. - */ - executeContextC(ctx: context.Context): Command | undefined - } - interface Command { - /** ExecuteC executes the command. */ - executeC(): Command | undefined - } - interface Command { - validateArgs(args: Array): void - } - interface Command { - /** - * ValidateRequiredFlags validates all required flags are present and - * returns an error otherwise - */ - validateRequiredFlags(): void - } - interface Command { - /** - * InitDefaultHelpFlag adds default help flag to c. It is called - * automatically by executing the c or by calling help and usage. If c - * already has help flag, it will do nothing. - */ - initDefaultHelpFlag(): void - } - interface Command { - /** - * InitDefaultVersionFlag adds default version flag to c. It is called - * automatically by executing the c. If c already has a version flag, it - * will do nothing. If c.Version is empty, it will do nothing. - */ - initDefaultVersionFlag(): void - } - interface Command { - /** - * InitDefaultHelpCmd adds default help command to c. It is called - * automatically by executing the c or by calling help and usage. If c - * already has help command or c has no subcommands, it will do nothing. - */ - initDefaultHelpCmd(): void - } - interface Command { - /** ResetCommands delete parent, subcommand and help command from c. */ - resetCommands(): void - } - interface Command { - /** Commands returns a sorted slice of child commands. */ - commands(): Array - } - interface Command { - /** AddCommand adds one or more commands to this parent command. */ - addCommand(...cmds: (Command | undefined)[]): void - } - interface Command { - /** Groups returns a slice of child command groups. */ - groups(): Array - } - interface Command { - /** - * AllChildCommandsHaveGroup returns if all subcommands are assigned to a - * group - */ - allChildCommandsHaveGroup(): boolean - } - interface Command { - /** ContainsGroup return if groupID exists in the list of command groups. */ - containsGroup(groupID: string): boolean - } - interface Command { - /** AddGroup adds one or more command groups to this parent command. */ - addGroup(...groups: (Group | undefined)[]): void - } - interface Command { - /** RemoveCommand removes one or more commands from a parent command. */ - removeCommand(...cmds: (Command | undefined)[]): void - } - interface Command { - /** - * Print is a convenience method to Print to the defined output, fallback to - * Stderr if not set. - */ - print(...i: {}[]): void - } - interface Command { - /** - * Println is a convenience method to Println to the defined output, - * fallback to Stderr if not set. - */ - println(...i: {}[]): void - } - interface Command { - /** - * Printf is a convenience method to Printf to the defined output, fallback - * to Stderr if not set. - */ - printf(format: string, ...i: {}[]): void - } - interface Command { - /** - * PrintErr is a convenience method to Print to the defined Err output, - * fallback to Stderr if not set. - */ - printErr(...i: {}[]): void - } - interface Command { - /** - * PrintErrln is a convenience method to Println to the defined Err output, - * fallback to Stderr if not set. - */ - printErrln(...i: {}[]): void - } - interface Command { - /** - * PrintErrf is a convenience method to Printf to the defined Err output, - * fallback to Stderr if not set. - */ - printErrf(format: string, ...i: {}[]): void - } - interface Command { - /** CommandPath returns the full path to this command. */ - commandPath(): string - } - interface Command { - /** UseLine puts out the full usage for a given command (including parents). */ - useLine(): string - } - interface Command { - /** - * DebugFlags used to determine which flags have been assigned to which - * commands and which persist. - */ - debugFlags(): void - } - interface Command { - /** Name returns the command's name: the first word in the use line. */ - name(): string - } - interface Command { - /** HasAlias determines if a given string is an alias of the command. */ - hasAlias(s: string): boolean - } - interface Command { - /** - * CalledAs returns the command name or alias that was used to invoke this - * command or an empty string if the command has not been called. - */ - calledAs(): string - } - interface Command { - /** NameAndAliases returns a list of the command name and all aliases */ - nameAndAliases(): string - } - interface Command { - /** HasExample determines if the command has example. */ - hasExample(): boolean - } - interface Command { - /** Runnable determines if the command is itself runnable. */ - runnable(): boolean - } - interface Command { - /** HasSubCommands determines if the command has children commands. */ - hasSubCommands(): boolean - } - interface Command { - /** - * IsAvailableCommand determines if a command is available as a non-help - * command (this includes all non deprecated/hidden commands). - */ - isAvailableCommand(): boolean - } - interface Command { - /** - * IsAdditionalHelpTopicCommand determines if a command is an additional - * help topic command; additional help topic command is determined by the - * fact that it is NOT runnable/hidden/deprecated, and has no sub commands - * that are runnable/hidden/deprecated. Concrete example: - * https://github.com/spf13/cobra/issues/393#issuecomment-282741924. - */ - isAdditionalHelpTopicCommand(): boolean - } - interface Command { - /** - * HasHelpSubCommands determines if a command has any available 'help' sub - * commands that need to be shown in the usage/help default template under - * 'additional help topics'. - */ - hasHelpSubCommands(): boolean - } - interface Command { - /** - * HasAvailableSubCommands determines if a command has available sub - * commands that need to be shown in the usage/help default template under - * 'available commands'. - */ - hasAvailableSubCommands(): boolean - } - interface Command { - /** HasParent determines if the command is a child command. */ - hasParent(): boolean - } - interface Command { - /** - * GlobalNormalizationFunc returns the global normalization function or nil - * if it doesn't exist. - */ - globalNormalizationFunc(): (f: any, name: string) => any - } - interface Command { - /** - * Flags returns the complete FlagSet that applies to this command (local - * and persistent declared here and by all parents). - */ - flags(): any | undefined - } - interface Command { - /** - * LocalNonPersistentFlags are flags specific to this command which will NOT - * persist to subcommands. - */ - localNonPersistentFlags(): any | undefined - } - interface Command { - /** - * LocalFlags returns the local FlagSet specifically set in the current - * command. - */ - localFlags(): any | undefined - } - interface Command { - /** - * InheritedFlags returns all flags which were inherited from parent - * commands. - */ - inheritedFlags(): any | undefined - } - interface Command { - /** - * NonInheritedFlags returns all flags which were not inherited from parent - * commands. - */ - nonInheritedFlags(): any | undefined - } - interface Command { - /** - * PersistentFlags returns the persistent FlagSet specifically set in the - * current command. - */ - persistentFlags(): any | undefined - } - interface Command { - /** ResetFlags deletes all flags from command. */ - resetFlags(): void - } - interface Command { - /** - * HasFlags checks if the command contains any flags (local plus persistent - * from the entire structure). - */ - hasFlags(): boolean - } - interface Command { - /** HasPersistentFlags checks if the command contains persistent flags. */ - hasPersistentFlags(): boolean - } - interface Command { - /** - * HasLocalFlags checks if the command has flags specifically declared - * locally. - */ - hasLocalFlags(): boolean - } - interface Command { - /** - * HasInheritedFlags checks if the command has flags inherited from its - * parent command. - */ - hasInheritedFlags(): boolean - } - interface Command { - /** - * HasAvailableFlags checks if the command contains any flags (local plus - * persistent from the entire structure) which are not hidden or - * deprecated. - */ - hasAvailableFlags(): boolean - } - interface Command { - /** - * HasAvailablePersistentFlags checks if the command contains persistent - * flags which are not hidden or deprecated. - */ - hasAvailablePersistentFlags(): boolean - } - interface Command { - /** - * HasAvailableLocalFlags checks if the command has flags specifically - * declared locally which are not hidden or deprecated. - */ - hasAvailableLocalFlags(): boolean - } - interface Command { - /** - * HasAvailableInheritedFlags checks if the command has flags inherited from - * its parent command which are not hidden or deprecated. - */ - hasAvailableInheritedFlags(): boolean - } - interface Command { - /** Flag climbs up the command tree looking for matching flag. */ - flag(name: string): any | undefined - } - interface Command { - /** ParseFlags parses persistent flag tree and local flags. */ - parseFlags(args: Array): void - } - interface Command { - /** Parent returns a commands parent command. */ - parent(): Command | undefined - } - interface Command { - /** - * RegisterFlagCompletionFunc should be called to register a function to - * provide completion for a flag. - */ - registerFlagCompletionFunc( - flagName: string, - f: ( - cmd: Command, - args: Array, - toComplete: string, - ) => [Array, ShellCompDirective], - ): void - } - interface Command { - /** - * InitDefaultCompletionCmd adds a default 'completion' command to c. This - * function will do nothing if any of the following is true: - * - * 1. The feature has been explicitly disabled by the program, - * 2. C has no subcommands (to avoid creating one), - * 3. C already has a 'completion' command provided by the program. - */ - initDefaultCompletionCmd(): void - } - interface Command { - /** - * GenFishCompletion generates fish completion file and writes to the passed - * writer. - */ - genFishCompletion(w: io.Writer, includeDesc: boolean): void - } - interface Command { - /** GenFishCompletionFile generates fish completion file. */ - genFishCompletionFile(filename: string, includeDesc: boolean): void - } - interface Command { - /** - * MarkFlagsRequiredTogether marks the given flags with annotations so that - * Cobra errors if the command is invoked with a subset (but not all) of the - * given flags. - */ - markFlagsRequiredTogether(...flagNames: string[]): void - } - interface Command { - /** - * MarkFlagsMutuallyExclusive marks the given flags with annotations so that - * Cobra errors if the command is invoked with more than one flag from the - * given set of flags. - */ - markFlagsMutuallyExclusive(...flagNames: string[]): void - } - interface Command { - /** - * ValidateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic - * and returns the first error encountered. - */ - validateFlagGroups(): void - } - interface Command { - /** - * GenPowerShellCompletionFile generates powershell completion file without - * descriptions. - */ - genPowerShellCompletionFile(filename: string): void - } - interface Command { - /** - * GenPowerShellCompletion generates powershell completion file without - * descriptions and writes it to the passed writer. - */ - genPowerShellCompletion(w: io.Writer): void - } - interface Command { - /** - * GenPowerShellCompletionFileWithDesc generates powershell completion file - * with descriptions. - */ - genPowerShellCompletionFileWithDesc(filename: string): void - } - interface Command { - /** - * GenPowerShellCompletionWithDesc generates powershell completion file with - * descriptions and writes it to the passed writer. - */ - genPowerShellCompletionWithDesc(w: io.Writer): void - } - interface Command { - /** - * MarkFlagRequired instructs the various shell completion implementations - * to prioritize the named flag when performing completion, and causes your - * command to report an error if invoked without the flag. - */ - markFlagRequired(name: string): void - } - interface Command { - /** - * MarkPersistentFlagRequired instructs the various shell completion - * implementations to prioritize the named persistent flag when performing - * completion, and causes your command to report an error if invoked without - * the flag. - */ - markPersistentFlagRequired(name: string): void - } - interface Command { - /** - * MarkFlagFilename instructs the various shell completion implementations - * to limit completions for the named flag to the specified file - * extensions. - */ - markFlagFilename(name: string, ...extensions: string[]): void - } - interface Command { - /** - * MarkFlagCustom adds the BashCompCustom annotation to the named flag, if - * it exists. The bash completion script will call the bash function f for - * the flag. - * - * This will only work for bash completion. It is recommended to instead use - * c.RegisterFlagCompletionFunc(...) which allows to register a Go function - * which will work across all shells. - */ - markFlagCustom(name: string, f: string): void - } - interface Command { - /** - * MarkPersistentFlagFilename instructs the various shell completion - * implementations to limit completions for the named persistent flag to the - * specified file extensions. - */ - markPersistentFlagFilename(name: string, ...extensions: string[]): void - } - interface Command { - /** - * MarkFlagDirname instructs the various shell completion implementations to - * limit completions for the named flag to directory names. - */ - markFlagDirname(name: string): void - } - interface Command { - /** - * MarkPersistentFlagDirname instructs the various shell completion - * implementations to limit completions for the named persistent flag to - * directory names. - */ - markPersistentFlagDirname(name: string): void - } - interface Command { - /** - * GenZshCompletionFile generates zsh completion file including - * descriptions. - */ - genZshCompletionFile(filename: string): void - } - interface Command { - /** - * GenZshCompletion generates zsh completion file including descriptions and - * writes it to the passed writer. - */ - genZshCompletion(w: io.Writer): void - } - interface Command { - /** - * GenZshCompletionFileNoDesc generates zsh completion file without - * descriptions. - */ - genZshCompletionFileNoDesc(filename: string): void - } - interface Command { - /** - * GenZshCompletionNoDesc generates zsh completion file without descriptions - * and writes it to the passed writer. - */ - genZshCompletionNoDesc(w: io.Writer): void - } - interface Command { - /** - * MarkZshCompPositionalArgumentFile only worked for zsh and its behavior - * was not consistent with Bash completion. It has therefore been disabled. - * Instead, when no other completion is specified, file completion is done - * by default for every argument. One can disable file completion on a - * per-argument basis by using ValidArgsFunction and - * ShellCompDirectiveNoFileComp. To achieve file extension filtering, one - * can use ValidArgsFunction and ShellCompDirectiveFilterFileExt. - * - * Deprecated - */ - markZshCompPositionalArgumentFile( - argPosition: number, - ...patterns: string[] - ): void - } - interface Command { - /** - * MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore - * been disabled. To achieve the same behavior across all shells, one can - * use ValidArgs (for the first argument only) or ValidArgsFunction for any - * argument (can include the first one also). - * - * Deprecated - */ - markZshCompPositionalArgumentWords( - argPosition: number, - ...words: string[] - ): void - } -} - -/** - * Package jwt is a Go implementation of JSON Web Tokens: - * http://self-issued.info/docs/draft-jones-json-web-token.html - * - * See README.md for more info. - */ -namespace jwt { - /** - * MapClaims is a claims type that uses the map[string]interface{} for JSON - * decoding. This is the default claims type if you don't supply one - */ - interface MapClaims extends _TygojaDict {} - interface MapClaims { - /** - * VerifyAudience Compares the aud claim against cmp. If required is false, - * this method will return true if the value matches or is unset - */ - verifyAudience(cmp: string, req: boolean): boolean - } - interface MapClaims { - /** - * VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). If req - * is false, it will return true, if exp is unset. - */ - verifyExpiresAt(cmp: number, req: boolean): boolean - } - interface MapClaims { - /** - * VerifyIssuedAt compares the exp claim against cmp (cmp >= iat). If req is - * false, it will return true, if iat is unset. - */ - verifyIssuedAt(cmp: number, req: boolean): boolean - } - interface MapClaims { - /** - * VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). If req - * is false, it will return true, if nbf is unset. - */ - verifyNotBefore(cmp: number, req: boolean): boolean - } - interface MapClaims { - /** - * VerifyIssuer compares the iss claim against cmp. If required is false, - * this method will return true if the value matches or is unset - */ - verifyIssuer(cmp: string, req: boolean): boolean - } - interface MapClaims { - /** - * Valid validates time based claims "exp, iat, nbf". There is no accounting - * for clock skew. As well, if any of the above claims are not in the token, - * it will still be considered a valid claim. - */ - valid(): void + interface WalkDirFunc { + (path: string, d: DirEntry, err: Error): void } } @@ -9478,6 +7958,23 @@ namespace jwt { * * The implementation is sufficient for HTTP (RFC 2388) and the multipart bodies * generated by popular browsers. + * + * # Limits + * + * To protect against malicious inputs, this package sets limits on the size of + * the MIME data it processes. + * + * Reader.NextPart and Reader.NextRawPart limit the number of headers in a part + * to 10000 and Reader.ReadForm limits the total number of headers in all + * FileHeaders to 10000. These limits may be adjusted with the + * GODEBUG=multipartmaxheaders= setting. + * + * Reader.ReadForm further limits the number of parts in a form to 1000. This + * limit may be adjusted with the GODEBUG=multipartmaxparts= setting. + */ +/** + * Copyright 2023 The Go Authors. All rights reserved. Use of this source code + * is governed by a BSD-style license that can be found in the LICENSE file. */ namespace multipart { /** A FileHeader describes a file part of a multipart request. */ @@ -9504,7 +8001,7 @@ namespace multipart { * resp, err := http.PostForm("http://example.com/form", * url.Values{"key": {"Value"}, "id": {"123"}}) * - * The client must close the response body when finished with it: + * The caller must close the response body when finished with it: * * resp, err := http.Get("http://example.com/") * if err != nil { @@ -9514,6 +8011,8 @@ namespace multipart { * body, err := io.ReadAll(resp.Body) * // ... * + * # Clients and Transports + * * For control over HTTP client headers, redirect policy, and other settings, * create a Client: * @@ -9544,6 +8043,8 @@ namespace multipart { * Clients and Transports are safe for concurrent use by multiple goroutines and * for efficiency should only be created once and re-used. * + * # Servers + * * ListenAndServe starts an HTTP server with a given address and handler. The * handler is usually nil, which means to use DefaultServeMux. Handle and * HandleFunc add handlers to DefaultServeMux: @@ -9568,18 +8069,19 @@ namespace multipart { * } * log.Fatal(s.ListenAndServe()) * + * # HTTP/2 + * * Starting with Go 1.6, the http package has transparent support for the HTTP/2 * protocol when using HTTPS. Programs that must disable HTTP/2 can do so by * setting Transport.TLSNextProto (for clients) or Server.TLSNextProto (for * servers) to a non-nil, empty map. Alternatively, the following GODEBUG - * environment variables are currently supported: + * settings are currently supported: * * GODEBUG=http2client=0 # disable HTTP/2 client support * GODEBUG=http2server=0 # disable HTTP/2 server support * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps * - * The GODEBUG variables are not covered by Go's API compatibility promise. * Please report any issues before disabling HTTP/2 support: * https://golang.org/s/http2bug * @@ -9821,8 +8323,8 @@ namespace http { } interface Request { /** - * Context returns the request's context. To change the context, use - * WithContext. + * Context returns the request's context. To change the context, use Clone + * or WithContext. * * The returned context is always non-nil; it defaults to the background * context. @@ -9845,11 +8347,9 @@ namespace http { * and reading the response headers and body. * * To create a new request with a context, use NewRequestWithContext. To - * change the context of a request, such as an incoming request you want to - * modify before sending back out, use Request.Clone. Between those two - * uses, it's rare to need WithContext. + * make a deep copy of a request with a new context, use Request.Clone. */ - withContext(ctx: context.Context): Request | undefined + withContext(ctx: context.Context): Request } interface Request { /** @@ -9860,7 +8360,7 @@ namespace http { * of a request and its response: obtaining a connection, sending the * request, and reading the response headers and body. */ - clone(ctx: context.Context): Request | undefined + clone(ctx: context.Context): Request } interface Request { /** @@ -9883,7 +8383,7 @@ namespace http { * not found. If multiple cookies match the given name, only one cookie will * be returned. */ - cookie(name: string): Cookie | undefined + cookie(name: string): Cookie } interface Request { /** @@ -9915,7 +8415,7 @@ namespace http { * and an error. Use this function instead of ParseMultipartForm to process * the request body as a stream. */ - multipartReader(): multipart.Reader | undefined + multipartReader(): multipart.Reader } interface Request { /** @@ -9960,11 +8460,12 @@ namespace http { * Authentication with the provided username and password. * * With HTTP Basic Authentication the provided username and password are not - * encrypted. + * encrypted. It should generally only be used in an HTTPS request. * - * Some protocols may impose additional requirements on pre-escaping the - * username and password. For instance, when used with OAuth2, both - * arguments must be URL encoded first with url.QueryEscape. + * The username may not contain a colon. Some protocols may impose + * additional requirements on pre-escaping the username and password. For + * instance, when used with OAuth2, both arguments must be URL encoded first + * with url.QueryEscape. */ setBasicAuth(username: string): void } @@ -10030,7 +8531,7 @@ namespace http { * FormFile returns the first file for the provided form key. FormFile calls * ParseMultipartForm and ParseForm if necessary. */ - formFile(key: string): [multipart.File, multipart.FileHeader | undefined] + formFile(key: string): [multipart.File, multipart.FileHeader] } /** * A ResponseWriter interface is used by an HTTP handler to construct an HTTP @@ -10040,13 +8541,15 @@ namespace http { * returned. */ interface ResponseWriter { + [key: string]: any /** * Header returns the header map that will be sent by WriteHeader. The * Header map also is the mechanism with which Handlers can set HTTP * trailers. * * Changing the header map after a call to WriteHeader (or Write) has no - * effect unless the modified headers are trailers. + * effect unless the HTTP status code was of the 1xx class or the modified + * headers are trailers. * * There are two ways to set Trailers. The preferred way is to predeclare in * the headers which trailers you will later send by setting the "Trailer" @@ -10082,19 +8585,24 @@ namespace http { * supported by all HTTP/2 clients. Handlers should read before writing if * possible to maximize compatibility. */ - write(_arg0: string): number + write(_arg0: string | Array): number /** * WriteHeader sends an HTTP response header with the provided status code. * * If WriteHeader is not called explicitly, the first call to Write will * trigger an implicit WriteHeader(http.StatusOK). Thus explicit calls to - * WriteHeader are mainly used to send error codes. + * WriteHeader are mainly used to send error codes or 1xx informational + * responses. * - * The provided code must be a valid HTTP 1xx-5xx status code. Only one - * header may be written. Go does not currently support sending user-defined - * 1xx informational headers, with the exception of 100-continue response - * header that the Server sends automatically when the Request.Body is - * read. + * The provided code must be a valid HTTP 1xx-5xx status code. Any number of + * 1xx headers may be written, followed by at most one 2xx-5xx header. 1xx + * headers are sent immediately, but 2xx-5xx headers may be buffered. Use + * the Flusher interface to send buffered data. The header map is cleared + * when 2xx-5xx headers are sent, but not with 1xx headers. + * + * The server will automatically send a 100 (Continue) header on the first + * read from the request body if the request has an "Expect: 100-continue" + * header. */ writeHeader(statusCode: number): void } @@ -10111,6 +8619,11 @@ namespace http { */ addr: string handler: Handler // handler to invoke, http.DefaultServeMux if nil + /** + * DisableGeneralOptionsHandler, if true, passes "OPTIONS *" requests to the + * Handler, otherwise responds with 200 OK and Content-Length: 0. + */ + disableGeneralOptionsHandler: boolean /** * TLSConfig optionally provides a TLS configuration for use by ServeTLS and * ListenAndServeTLS. Note that this value is cloned by ServeTLS and @@ -10318,6 +8831,315 @@ namespace http { } } +/** + * Package blob provides an easy and portable way to interact with blobs within + * a storage location. Subpackages contain driver implementations of blob for + * supported services. + * + * See https://gocloud.dev/howto/blob/ for a detailed how-to guide. + * + * *blob.Bucket implements io/fs.FS and io/fs.SubFS, so it can be used with + * functions in that package. + * + * # Errors + * + * The errors returned from this package can be inspected in several ways: + * + * The Code function from gocloud.dev/gcerrors will return an error code, also + * defined in that package, when invoked on an error. + * + * The Bucket.ErrorAs method can retrieve the driver error underlying the + * returned error. + * + * # OpenCensus Integration + * + * OpenCensus supports tracing and metric collection for multiple languages and + * backend providers. See https://opencensus.io. + * + * This API collects OpenCensus traces and metrics for the following methods: + * + * - Attributes + * - Copy + * - Delete + * - ListPage + * - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll + * are included because they call NewRangeReader.) + * - NewWriter, from creation until the call to Close. + * + * All trace and metric names begin with the package import path. The traces add + * the method name. For example, "gocloud.dev/blob/Attributes". The metrics are + * "completed_calls", a count of completed method calls by driver, method and + * status (error code); and "latency", a distribution of method latency by + * driver and method. For example, "gocloud.dev/blob/latency". + * + * It also collects the following metrics: + * + * - gocloud.dev/blob/bytes_read: the total number of bytes read, by driver. + * - gocloud.dev/blob/bytes_written: the total number of bytes written, by driver. + * + * To enable trace collection in your application, see "Configure Exporter" at + * https://opencensus.io/quickstart/go/tracing. To enable metric collection in + * your application, see "Exporting stats" at + * https://opencensus.io/quickstart/go/metrics. + */ +namespace blob { + /** + * Reader reads bytes from a blob. It implements io.ReadSeekCloser, and must + * be closed after reads are finished. + */ + interface Reader {} + interface Reader { + /** Read implements io.Reader (https://golang.org/pkg/io/#Reader). */ + read(p: string | Array): number + } + interface Reader { + /** Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker). */ + seek(offset: number, whence: number): number + } + interface Reader { + /** Close implements io.Closer (https://golang.org/pkg/io/#Closer). */ + close(): void + } + interface Reader { + /** ContentType returns the MIME type of the blob. */ + contentType(): string + } + interface Reader { + /** ModTime returns the time the blob was last modified. */ + modTime(): time.Time + } + interface Reader { + /** Size returns the size of the blob content in bytes. */ + size(): number + } + interface Reader { + /** + * As converts i to driver-specific types. See + * https://gocloud.dev/concepts/as/ for background information, the "As" + * examples in this package for examples, and the driver package + * documentation for the specific types supported for that driver. + */ + as(i: {}): boolean + } + interface Reader { + /** + * WriteTo reads from r and writes to w until there's no more data or an + * error occurs. The return value is the number of bytes written to w. + * + * It implements the io.WriterTo interface. + */ + writeTo(w: io.Writer): number + } + /** Attributes contains attributes about a blob. */ + interface Attributes { + /** + * CacheControl specifies caching attributes that services may use when + * serving the blob. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + */ + cacheControl: string + /** + * ContentDisposition specifies whether the blob content is expected to be + * displayed inline or as an attachment. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition + */ + contentDisposition: string + /** + * ContentEncoding specifies the encoding used for the blob's content, if + * any. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding + */ + contentEncoding: string + /** + * ContentLanguage specifies the language used in the blob's content, if + * any. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language + */ + contentLanguage: string + /** + * ContentType is the MIME type of the blob. It will not be empty. + * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type + */ + contentType: string + /** + * Metadata holds key/value pairs associated with the blob. Keys are + * guaranteed to be in lowercase, even if the backend service has + * case-sensitive keys (although note that Metadata written via this package + * will always be lowercased). If there are duplicate case-insensitive keys + * (e.g., "foo" and "FOO"), only one value will be kept, and it is undefined + * which one. + */ + metadata: _TygojaDict + /** + * CreateTime is the time the blob was created, if available. If not + * available, CreateTime will be the zero time. + */ + createTime: time.Time + /** ModTime is the time the blob was last modified. */ + modTime: time.Time + /** Size is the size of the blob's content in bytes. */ + size: number + /** MD5 is an MD5 hash of the blob contents or nil if not available. */ + md5: string | Array + /** ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag. */ + eTag: string + } + interface Attributes { + /** + * As converts i to driver-specific types. See + * https://gocloud.dev/concepts/as/ for background information, the "As" + * examples in this package for examples, and the driver package + * documentation for the specific types supported for that driver. + */ + as(i: {}): boolean + } + /** ListObject represents a single blob returned from List. */ + interface ListObject { + /** Key is the key for this blob. */ + key: string + /** ModTime is the time the blob was last modified. */ + modTime: time.Time + /** Size is the size of the blob's content in bytes. */ + size: number + /** MD5 is an MD5 hash of the blob contents or nil if not available. */ + md5: string | Array + /** + * IsDir indicates that this result represents a "directory" in the + * hierarchical namespace, ending in ListOptions.Delimiter. Key can be + * passed as ListOptions.Prefix to list items in the "directory". Fields + * other than Key and IsDir will not be set if IsDir is true. + */ + isDir: boolean + } + interface ListObject { + /** + * As converts i to driver-specific types. See + * https://gocloud.dev/concepts/as/ for background information, the "As" + * examples in this package for examples, and the driver package + * documentation for the specific types supported for that driver. + */ + as(i: {}): boolean + } +} + +/** + * Package jwt is a Go implementation of JSON Web Tokens: + * http://self-issued.info/docs/draft-jones-json-web-token.html + * + * See README.md for more info. + */ +namespace jwt { + /** + * MapClaims is a claims type that uses the map[string]interface{} for JSON + * decoding. This is the default claims type if you don't supply one + */ + interface MapClaims extends _TygojaDict {} + interface MapClaims { + /** + * VerifyAudience Compares the aud claim against cmp. If required is false, + * this method will return true if the value matches or is unset + */ + verifyAudience(cmp: string, req: boolean): boolean + } + interface MapClaims { + /** + * VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). If req + * is false, it will return true, if exp is unset. + */ + verifyExpiresAt(cmp: number, req: boolean): boolean + } + interface MapClaims { + /** + * VerifyIssuedAt compares the exp claim against cmp (cmp >= iat). If req is + * false, it will return true, if iat is unset. + */ + verifyIssuedAt(cmp: number, req: boolean): boolean + } + interface MapClaims { + /** + * VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). If req + * is false, it will return true, if nbf is unset. + */ + verifyNotBefore(cmp: number, req: boolean): boolean + } + interface MapClaims { + /** + * VerifyIssuer compares the iss claim against cmp. If required is false, + * this method will return true if the value matches or is unset + */ + verifyIssuer(cmp: string, req: boolean): boolean + } + interface MapClaims { + /** + * Valid validates time based claims "exp, iat, nbf". There is no accounting + * for clock skew. As well, if any of the above claims are not in the token, + * it will still be considered a valid claim. + */ + valid(): void + } +} + +/** + * Package types implements some commonly used db serializable types like + * datetime, json, etc. + */ +namespace types { + /** JsonArray defines a slice that is safe for json and db read/write. */ + interface JsonArray extends Array {} + interface JsonArray { + /** MarshalJSON implements the [json.Marshaler] interface. */ + marshalJSON(): string | Array + } + interface JsonArray { + /** Value implements the [driver.Valuer] interface. */ + value(): any + } + interface JsonArray { + /** + * Scan implements [sql.Scanner] interface to scan the provided value into + * the current JsonArray[T] instance. + */ + scan(value: any): void + } + /** JsonMap defines a map that is safe for json and db read/write. */ + interface JsonMap extends _TygojaDict {} + interface JsonMap { + /** MarshalJSON implements the [json.Marshaler] interface. */ + marshalJSON(): string | Array + } + interface JsonMap { + /** + * Get retrieves a single value from the current JsonMap. + * + * This helper was added primarily to assist the goja integration since + * custom map types don't have direct access to the map keys + * (https://pkg.go.dev/github.com/dop251/goja#hdr-Maps_with_methods). + */ + get(key: string): any + } + interface JsonMap { + /** + * Set sets a single value in the current JsonMap. + * + * This helper was added primarily to assist the goja integration since + * custom map types don't have direct access to the map keys + * (https://pkg.go.dev/github.com/dop251/goja#hdr-Maps_with_methods). + */ + set(key: string, value: any): void + } + interface JsonMap { + /** Value implements the [driver.Valuer] interface. */ + value(): any + } + interface JsonMap { + /** + * Scan implements [sql.Scanner] interface to scan the provided value into + * the current `JsonMap` instance. + */ + scan(value: any): void + } +} + namespace auth { /** AuthUser defines a standardized oauth2 user data structure. */ interface AuthUser { @@ -10326,16 +9148,32 @@ namespace auth { username: string email: string avatarUrl: string - rawUser: _TygojaDict accessToken: string refreshToken: string + expiry: types.DateTime + rawUser: _TygojaDict } /** Provider defines a common interface for an OAuth2 client. */ interface Provider { + [key: string]: any /** Scopes returns the context associated with the provider (if any). */ context(): context.Context /** SetContext assigns the specified context to the current provider. */ setContext(ctx: context.Context): void + /** PKCE indicates whether the provider can use the PKCE flow. */ + pkce(): boolean + /** + * SetPKCE toggles the state whether the provider can use the PKCE flow or + * not. + */ + setPKCE(enable: boolean): void + /** + * DisplayName usually returns provider name as it is officially written and + * it could be used directly in the UI. + */ + displayName(): string + /** SetDisplayName sets the provider's display name. */ + setDisplayName(displayName: string): void /** Scopes returns the provider access permissions that will be requested. */ scopes(): Array /** @@ -10371,27 +9209,24 @@ namespace auth { /** SetUserApiUrl sets the provider's UserApiUrl. */ setUserApiUrl(url: string): void /** Client returns an http client using the provided token. */ - client(token: oauth2.Token): any | undefined + client(token: oauth2.Token): any /** * BuildAuthUrl returns a URL to the provider's consent page that asks for * permissions for the required scopes explicitly. */ buildAuthUrl(state: string, ...opts: oauth2.AuthCodeOption[]): string /** FetchToken converts an authorization code to token. */ - fetchToken( - code: string, - ...opts: oauth2.AuthCodeOption[] - ): oauth2.Token | undefined + fetchToken(code: string, ...opts: oauth2.AuthCodeOption[]): oauth2.Token /** * FetchRawUserData requests and marshalizes into `result` the the OAuth * user api response. */ - fetchRawUserData(token: oauth2.Token): string + fetchRawUserData(token: oauth2.Token): string | Array /** * FetchAuthUser is similar to FetchRawUserData, but normalizes and * marshalizes the user api response into a standardized AuthUser struct. */ - fetchAuthUser(token: oauth2.Token): AuthUser | undefined + fetchAuthUser(token: oauth2.Token): AuthUser } } @@ -10440,14 +9275,15 @@ namespace echo { * handler. */ interface Context { + [key: string]: any /** Request returns `*http.Request`. */ - request(): http.Request | undefined + request(): http.Request /** SetRequest sets `*http.Request`. */ setRequest(r: http.Request): void /** SetResponse sets `*Response`. */ setResponse(r: Response): void /** Response returns `*Response`. */ - response(): Response | undefined + response(): Response /** IsTLS returns true if HTTP connection is TLS otherwise false. */ isTLS(): boolean /** IsWebSocket returns true if HTTP connection is WebSocket otherwise false. */ @@ -10509,11 +9345,11 @@ namespace echo { /** FormValues returns the form field values as `url.Values`. */ formValues(): url.Values /** FormFile returns the multipart form file for the provided name. */ - formFile(name: string): multipart.FileHeader | undefined + formFile(name: string): multipart.FileHeader /** MultipartForm returns the multipart form. */ - multipartForm(): multipart.Form | undefined + multipartForm(): multipart.Form /** Cookie returns the named cookie provided in the request. */ - cookie(name: string): http.Cookie | undefined + cookie(name: string): http.Cookie /** SetCookie adds a `Set-Cookie` header in HTTP response. */ setCookie(cookie: http.Cookie): void /** Cookies returns the HTTP cookies sent with the request. */ @@ -10540,7 +9376,7 @@ namespace echo { /** HTML sends an HTTP response with status code. */ html(code: number, html: string): void /** HTMLBlob sends an HTTP blob response with status code. */ - htmlBlob(code: number, b: string): void + htmlBlob(code: number, b: string | Array): void /** String sends a string response with status code. */ string(code: number, s: string): void /** JSON sends a JSON response with status code. */ @@ -10548,7 +9384,7 @@ namespace echo { /** JSONPretty sends a pretty-print JSON with status code. */ jsonPretty(code: number, i: {}, indent: string): void /** JSONBlob sends a JSON blob response with status code. */ - jsonBlob(code: number, b: string): void + jsonBlob(code: number, b: string | Array): void /** * JSONP sends a JSONP response with status code. It uses `callback` to * construct the JSONP payload. @@ -10558,15 +9394,15 @@ namespace echo { * JSONPBlob sends a JSONP blob response with status code. It uses * `callback` to construct the JSONP payload. */ - jsonpBlob(code: number, callback: string, b: string): void + jsonpBlob(code: number, callback: string, b: string | Array): void /** XML sends an XML response with status code. */ xml(code: number, i: {}): void /** XMLPretty sends a pretty-print XML with status code. */ xmlPretty(code: number, i: {}, indent: string): void /** XMLBlob sends an XML blob response with status code. */ - xmlBlob(code: number, b: string): void + xmlBlob(code: number, b: string | Array): void /** Blob sends a blob response with status code and content type. */ - blob(code: number, contentType: string, b: string): void + blob(code: number, contentType: string, b: string | Array): void /** Stream sends a streaming response with status code and content type. */ stream(code: number, contentType: string, r: io.Reader): void /** File sends a response with the content of the file. */ @@ -10606,7 +9442,7 @@ namespace echo { * ONLY when you are NOT mutating them anywhere in your code after Echo * server has started. */ - echo(): Echo | undefined + echo(): Echo } // @ts-ignore import stdContext = context @@ -10868,14 +9704,14 @@ namespace echo { * Host creates a new router group for the provided host and optional * host-level middleware. */ - host(name: string, ...m: MiddlewareFunc[]): Group | undefined + host(name: string, ...m: MiddlewareFunc[]): Group } interface Echo { /** * Group creates a new router group with prefix and optional group-level * middleware. */ - group(prefix: string, ...m: MiddlewareFunc[]): Group | undefined + group(prefix: string, ...m: MiddlewareFunc[]): Group } interface Echo { /** @@ -10926,254 +9762,992 @@ namespace echo { } /** - * Package blob provides an easy and portable way to interact with blobs within - * a storage location. Subpackages contain driver implementations of blob for - * supported services. + * Package exec runs external commands. It wraps os.StartProcess to make it + * easier to remap stdin and stdout, connect I/O with pipes, and do other + * adjustments. * - * See https://gocloud.dev/howto/blob/ for a detailed how-to guide. + * Unlike the "system" library call from C and other languages, the os/exec + * package intentionally does not invoke the system shell and does not expand + * any glob patterns or handle other expansions, pipelines, or redirections + * typically done by shells. The package behaves more like C's "exec" family of + * functions. To expand glob patterns, either call the shell directly, taking + * care to escape any dangerous input, or use the path/filepath package's Glob + * function. To expand environment variables, use package os's ExpandEnv. * - * *blob.Bucket implements io/fs.FS and io/fs.SubFS, so it can be used with - * functions in that package. + * Note that the examples in this package assume a Unix system. They may not run + * on Windows, and they do not run in the Go Playground used by golang.org and + * godoc.org. * - * # Errors + * # Executables in the current directory * - * The errors returned from this package can be inspected in several ways: + * The functions Command and LookPath look for a program in the directories + * listed in the current path, following the conventions of the host operating + * system. Operating systems have for decades included the current directory in + * this search, sometimes implicitly and sometimes configured explicitly that + * way by default. Modern practice is that including the current directory is + * usually unexpected and often leads to security problems. * - * The Code function from gocloud.dev/gcerrors will return an error code, also - * defined in that package, when invoked on an error. + * To avoid those security problems, as of Go 1.19, this package will not + * resolve a program using an implicit or explicit path entry relative to the + * current directory. That is, if you run exec.LookPath("go"), it will not + * successfully return ./go on Unix nor .\go.exe on Windows, no matter how the + * path is configured. Instead, if the usual path algorithms would result in + * that answer, these functions return an error err satisfying errors.Is(err, + * ErrDot). * - * The Bucket.ErrorAs method can retrieve the driver error underlying the - * returned error. + * For example, consider these two program snippets: * - * # OpenCensus Integration + * path, err := exec.LookPath("prog") + * if err != nil { + * log.Fatal(err) + * } + * use(path) * - * OpenCensus supports tracing and metric collection for multiple languages and - * backend providers. See https://opencensus.io. + * And * - * This API collects OpenCensus traces and metrics for the following methods: + * cmd := exec.Command("prog") + * if err := cmd.Run(); err != nil { + * log.Fatal(err) + * } * - * - Attributes - * - Copy - * - Delete - * - ListPage - * - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll - * are included because they call NewRangeReader.) - * - NewWriter, from creation until the call to Close. + * These will not find and run ./prog or .\prog.exe, no matter how the current + * path is configured. * - * All trace and metric names begin with the package import path. The traces add - * the method name. For example, "gocloud.dev/blob/Attributes". The metrics are - * "completed_calls", a count of completed method calls by driver, method and - * status (error code); and "latency", a distribution of method latency by - * driver and method. For example, "gocloud.dev/blob/latency". + * Code that always wants to run a program from the current directory can be + * rewritten to say "./prog" instead of "prog". * - * It also collects the following metrics: + * Code that insists on including results from relative path entries can instead + * override the error using an errors.Is check: * - * - gocloud.dev/blob/bytes_read: the total number of bytes read, by driver. - * - gocloud.dev/blob/bytes_written: the total number of bytes written, by driver. + * path, err := exec.LookPath("prog") + * if errors.Is(err, exec.ErrDot) { + * err = nil + * } + * if err != nil { + * log.Fatal(err) + * } + * use(path) * - * To enable trace collection in your application, see "Configure Exporter" at - * https://opencensus.io/quickstart/go/tracing. To enable metric collection in - * your application, see "Exporting stats" at - * https://opencensus.io/quickstart/go/metrics. + * And + * + * cmd := exec.Command("prog") + * if errors.Is(cmd.Err, exec.ErrDot) { + * cmd.Err = nil + * } + * if err := cmd.Run(); err != nil { + * log.Fatal(err) + * } + * + * Setting the environment variable GODEBUG=execerrdot=0 disables generation of + * ErrDot entirely, temporarily restoring the pre-Go 1.19 behavior for programs + * that are unable to apply more targeted fixes. A future version of Go may + * remove support for this variable. + * + * Before adding such overrides, make sure you understand the security + * implications of doing so. See https://go.dev/blog/path-security for more + * information. */ -namespace blob { +namespace exec { /** - * Reader reads bytes from a blob. It implements io.ReadSeekCloser, and must - * be closed after reads are finished. + * Cmd represents an external command being prepared or run. + * + * A Cmd cannot be reused after calling its Run, Output or CombinedOutput + * methods. */ - interface Reader {} - interface Reader { - /** Read implements io.Reader (https://golang.org/pkg/io/#Reader). */ - read(p: string): number - } - interface Reader { - /** Seek implements io.Seeker (https://golang.org/pkg/io/#Seeker). */ - seek(offset: number, whence: number): number - } - interface Reader { - /** Close implements io.Closer (https://golang.org/pkg/io/#Closer). */ - close(): void - } - interface Reader { - /** ContentType returns the MIME type of the blob. */ - contentType(): string - } - interface Reader { - /** ModTime returns the time the blob was last modified. */ - modTime(): time.Time - } - interface Reader { - /** Size returns the size of the blob content in bytes. */ - size(): number - } - interface Reader { + interface Cmd { /** - * As converts i to driver-specific types. See - * https://gocloud.dev/concepts/as/ for background information, the "As" - * examples in this package for examples, and the driver package - * documentation for the specific types supported for that driver. - */ - as(i: {}): boolean - } - interface Reader { - /** - * WriteTo reads from r and writes to w until there's no more data or an - * error occurs. The return value is the number of bytes written to w. + * Path is the path of the command to run. * - * It implements the io.WriterTo interface. + * This is the only field that must be set to a non-zero value. If Path is + * relative, it is evaluated relative to Dir. */ - writeTo(w: io.Writer): number + path: string + /** + * Args holds command line arguments, including the command as Args[0]. If + * the Args field is empty or nil, Run uses {Path}. + * + * In typical use, both Path and Args are set by calling Command. + */ + args: Array + /** + * Env specifies the environment of the process. Each entry is of the form + * "key=value". If Env is nil, the new process uses the current process's + * environment. If Env contains duplicate environment keys, only the last + * value in the slice for each duplicate key is used. As a special case on + * Windows, SYSTEMROOT is always added if missing and not explicitly set to + * the empty string. + */ + env: Array + /** + * Dir specifies the working directory of the command. If Dir is the empty + * string, Run runs the command in the calling process's current directory. + */ + dir: string + /** + * Stdin specifies the process's standard input. + * + * If Stdin is nil, the process reads from the null device (os.DevNull). + * + * If Stdin is an *os.File, the process's standard input is connected + * directly to that file. + * + * Otherwise, during the execution of the command a separate goroutine reads + * from Stdin and delivers that data to the command over a pipe. In this + * case, Wait does not complete until the goroutine stops copying, either + * because it has reached the end of Stdin (EOF or a read error), or because + * writing to the pipe returned an error, or because a nonzero WaitDelay was + * set and expired. + */ + stdin: io.Reader + /** + * Stdout and Stderr specify the process's standard output and error. + * + * If either is nil, Run connects the corresponding file descriptor to the + * null device (os.DevNull). + * + * If either is an *os.File, the corresponding output from the process is + * connected directly to that file. + * + * Otherwise, during the execution of the command a separate goroutine reads + * from the process over a pipe and delivers that data to the corresponding + * Writer. In this case, Wait does not complete until the goroutine reaches + * EOF or encounters an error or a nonzero WaitDelay expires. + * + * If Stdout and Stderr are the same writer, and have a type that can be + * compared with ==, at most one goroutine at a time will call Write. + */ + stdout: io.Writer + stderr: io.Writer + /** + * ExtraFiles specifies additional open files to be inherited by the new + * process. It does not include standard input, standard output, or standard + * error. If non-nil, entry i becomes file descriptor 3+i. + * + * ExtraFiles is not supported on Windows. + */ + extraFiles: Array + /** + * SysProcAttr holds optional, operating system-specific attributes. Run + * passes it to os.StartProcess as the os.ProcAttr's Sys field. + */ + sysProcAttr?: syscall.SysProcAttr + /** Process is the underlying process, once started. */ + process?: os.Process + /** + * ProcessState contains information about an exited process. If the process + * was started successfully, Wait or Run will populate its ProcessState when + * the command completes. + */ + processState?: os.ProcessState + err: Error // LookPath error, if any. + /** + * If Cancel is non-nil, the command must have been created with + * CommandContext and Cancel will be called when the command's Context is + * done. By default, CommandContext sets Cancel to call the Kill method on + * the command's Process. + * + * Typically a custom Cancel will send a signal to the command's Process, + * but it may instead take other actions to initiate cancellation, such as + * closing a stdin or stdout pipe or sending a shutdown request on a network + * socket. + * + * If the command exits with a success status after Cancel is called, and + * Cancel does not return an error equivalent to os.ErrProcessDone, then + * Wait and similar methods will return a non-nil error: either an error + * wrapping the one returned by Cancel, or the error from the Context. (If + * the command exits with a non-success status, or Cancel returns an error + * that wraps os.ErrProcessDone, Wait and similar methods continue to return + * the command's usual exit status.) + * + * If Cancel is set to nil, nothing will happen immediately when the + * command's Context is done, but a nonzero WaitDelay will still take + * effect. That may be useful, for example, to work around deadlocks in + * commands that do not support shutdown signals but are expected to always + * finish quickly. + * + * Cancel will not be called if Start returns a non-nil error. + */ + cancel: () => void + /** + * If WaitDelay is non-zero, it bounds the time spent waiting on two sources + * of unexpected delay in Wait: a child process that fails to exit after the + * associated Context is canceled, and a child process that exits but leaves + * its I/O pipes unclosed. + * + * The WaitDelay timer starts when either the associated Context is done or + * a call to Wait observes that the child process has exited, whichever + * occurs first. When the delay has elapsed, the command shuts down the + * child process and/or its I/O pipes. + * + * If the child process has failed to exit — perhaps because it ignored or + * failed to receive a shutdown signal from a Cancel function, or because no + * Cancel function was set — then it will be terminated using + * os.Process.Kill. + * + * Then, if the I/O pipes communicating with the child process are still + * open, those pipes are closed in order to unblock any goroutines currently + * blocked on Read or Write calls. + * + * If pipes are closed due to WaitDelay, no Cancel call has occurred, and + * the command has otherwise exited with a successful status, Wait and + * similar methods will return ErrWaitDelay instead of nil. + * + * If WaitDelay is zero (the default), I/O pipes will be read until EOF, + * which might not occur until orphaned subprocesses of the command have + * also closed their descriptors for the pipes. + */ + waitDelay: time.Duration } - /** Attributes contains attributes about a blob. */ - interface Attributes { + interface Cmd { /** - * CacheControl specifies caching attributes that services may use when - * serving the blob. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control + * String returns a human-readable description of c. It is intended only for + * debugging. In particular, it is not suitable for use as input to a shell. + * The output of String may vary across Go releases. */ - cacheControl: string - /** - * ContentDisposition specifies whether the blob content is expected to be - * displayed inline or as an attachment. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition - */ - contentDisposition: string - /** - * ContentEncoding specifies the encoding used for the blob's content, if - * any. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding - */ - contentEncoding: string - /** - * ContentLanguage specifies the language used in the blob's content, if - * any. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language - */ - contentLanguage: string - /** - * ContentType is the MIME type of the blob. It will not be empty. - * https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type - */ - contentType: string - /** - * Metadata holds key/value pairs associated with the blob. Keys are - * guaranteed to be in lowercase, even if the backend service has - * case-sensitive keys (although note that Metadata written via this package - * will always be lowercased). If there are duplicate case-insensitive keys - * (e.g., "foo" and "FOO"), only one value will be kept, and it is undefined - * which one. - */ - metadata: _TygojaDict - /** - * CreateTime is the time the blob was created, if available. If not - * available, CreateTime will be the zero time. - */ - createTime: time.Time - /** ModTime is the time the blob was last modified. */ - modTime: time.Time - /** Size is the size of the blob's content in bytes. */ - size: number - /** MD5 is an MD5 hash of the blob contents or nil if not available. */ - md5: string - /** ETag for the blob; see https://en.wikipedia.org/wiki/HTTP_ETag. */ - eTag: string + string(): string } - interface Attributes { + interface Cmd { /** - * As converts i to driver-specific types. See - * https://gocloud.dev/concepts/as/ for background information, the "As" - * examples in this package for examples, and the driver package - * documentation for the specific types supported for that driver. + * Run starts the specified command and waits for it to complete. + * + * The returned error is nil if the command runs, has no problems copying + * stdin, stdout, and stderr, and exits with a zero exit status. + * + * If the command starts but does not complete successfully, the error is of + * type *ExitError. Other error types may be returned for other situations. + * + * If the calling goroutine has locked the operating system thread with + * runtime.LockOSThread and modified any inheritable OS-level thread state + * (for example, Linux or Plan 9 name spaces), the new process will inherit + * the caller's thread state. */ - as(i: {}): boolean + run(): void } - /** ListObject represents a single blob returned from List. */ - interface ListObject { - /** Key is the key for this blob. */ - key: string - /** ModTime is the time the blob was last modified. */ - modTime: time.Time - /** Size is the size of the blob's content in bytes. */ - size: number - /** MD5 is an MD5 hash of the blob contents or nil if not available. */ - md5: string + interface Cmd { /** - * IsDir indicates that this result represents a "directory" in the - * hierarchical namespace, ending in ListOptions.Delimiter. Key can be - * passed as ListOptions.Prefix to list items in the "directory". Fields - * other than Key and IsDir will not be set if IsDir is true. + * Start starts the specified command but does not wait for it to complete. + * + * If Start returns successfully, the c.Process field will be set. + * + * After a successful call to Start the Wait method must be called in order + * to release associated system resources. */ - isDir: boolean + start(): void } - interface ListObject { + interface Cmd { /** - * As converts i to driver-specific types. See - * https://gocloud.dev/concepts/as/ for background information, the "As" - * examples in this package for examples, and the driver package - * documentation for the specific types supported for that driver. + * Wait waits for the command to exit and waits for any copying to stdin or + * copying from stdout or stderr to complete. + * + * The command must have been started by Start. + * + * The returned error is nil if the command runs, has no problems copying + * stdin, stdout, and stderr, and exits with a zero exit status. + * + * If the command fails to run or doesn't complete successfully, the error + * is of type *ExitError. Other error types may be returned for I/O + * problems. + * + * If any of c.Stdin, c.Stdout or c.Stderr are not an *os.File, Wait also + * waits for the respective I/O loop copying to or from the process to + * complete. + * + * Wait releases any resources associated with the Cmd. */ - as(i: {}): boolean + wait(): void + } + interface Cmd { + /** + * Output runs the command and returns its standard output. Any returned + * error will usually be of type *ExitError. If c.Stderr was nil, Output + * populates ExitError.Stderr. + */ + output(): string | Array + } + interface Cmd { + /** + * CombinedOutput runs the command and returns its combined standard output + * and standard error. + */ + combinedOutput(): string | Array + } + interface Cmd { + /** + * StdinPipe returns a pipe that will be connected to the command's standard + * input when the command starts. The pipe will be closed automatically + * after Wait sees the command exit. A caller need only call Close to force + * the pipe to close sooner. For example, if the command being run will not + * exit until standard input is closed, the caller must close the pipe. + */ + stdinPipe(): io.WriteCloser + } + interface Cmd { + /** + * StdoutPipe returns a pipe that will be connected to the command's + * standard output when the command starts. + * + * Wait will close the pipe after seeing the command exit, so most callers + * need not close the pipe themselves. It is thus incorrect to call Wait + * before all reads from the pipe have completed. For the same reason, it is + * incorrect to call Run when using StdoutPipe. See the example for + * idiomatic usage. + */ + stdoutPipe(): io.ReadCloser + } + interface Cmd { + /** + * StderrPipe returns a pipe that will be connected to the command's + * standard error when the command starts. + * + * Wait will close the pipe after seeing the command exit, so most callers + * need not close the pipe themselves. It is thus incorrect to call Wait + * before all reads from the pipe have completed. For the same reason, it is + * incorrect to use Run when using StderrPipe. See the StdoutPipe example + * for idiomatic usage. + */ + stderrPipe(): io.ReadCloser + } + interface Cmd { + /** + * Environ returns a copy of the environment in which the command would be + * run as it is currently configured. + */ + environ(): Array } } /** - * Package types implements some commonly used db serializable types like - * datetime, json, etc. + * Package sql provides a generic interface around SQL (or SQL-like) databases. + * + * The sql package must be used in conjunction with a database driver. See + * https://golang.org/s/sqldrivers for a list of drivers. + * + * Drivers that do not support context cancellation will not return until after + * the query is completed. + * + * For usage examples, see the wiki page at https://golang.org/s/sqlwiki. */ -namespace types { - /** JsonArray defines a slice that is safe for json and db read/write. */ - interface JsonArray extends Array {} - interface JsonArray { - /** MarshalJSON implements the [json.Marshaler] interface. */ - marshalJSON(): string - } - interface JsonArray { - /** Value implements the [driver.Valuer] interface. */ - value(): any - } - interface JsonArray { +namespace sql { + /** TxOptions holds the transaction options to be used in DB.BeginTx. */ + interface TxOptions { /** - * Scan implements [sql.Scanner] interface to scan the provided value into - * the current JsonArray[T] instance. + * Isolation is the transaction isolation level. If zero, the driver or + * database's default level is used. */ - scan(value: any): void + isolation: IsolationLevel + readOnly: boolean } - /** JsonMap defines a map that is safe for json and db read/write. */ - interface JsonMap extends _TygojaDict {} - interface JsonMap { - /** MarshalJSON implements the [json.Marshaler] interface. */ - marshalJSON(): string - } - interface JsonMap { + /** + * DB is a database handle representing a pool of zero or more underlying + * connections. It's safe for concurrent use by multiple goroutines. + * + * The sql package creates and frees connections automatically; it also + * maintains a free pool of idle connections. If the database has a concept of + * per-connection state, such state can be reliably observed within a + * transaction (Tx) or connection (Conn). Once DB.Begin is called, the + * returned Tx is bound to a single connection. Once Commit or Rollback is + * called on the transaction, that transaction's connection is returned to + * DB's idle connection pool. The pool size can be controlled with + * SetMaxIdleConns. + */ + interface DB {} + interface DB { /** - * Get retrieves a single value from the current JsonMap. + * PingContext verifies a connection to the database is still alive, + * establishing a connection if necessary. + */ + pingContext(ctx: context.Context): void + } + interface DB { + /** + * Ping verifies a connection to the database is still alive, establishing a + * connection if necessary. * - * This helper was added primarily to assist the goja integration since - * custom map types don't have direct access to the map keys - * (https://pkg.go.dev/github.com/dop251/goja#hdr-Maps_with_methods). + * Ping uses context.Background internally; to specify the context, use + * PingContext. */ - get(key: string): any + ping(): void } - interface JsonMap { + interface DB { /** - * Set sets a single value in the current JsonMap. + * Close closes the database and prevents new queries from starting. Close + * then waits for all queries that have started processing on the server to + * finish. * - * This helper was added primarily to assist the goja integration since - * custom map types don't have direct access to the map keys - * (https://pkg.go.dev/github.com/dop251/goja#hdr-Maps_with_methods). + * It is rare to Close a DB, as the DB handle is meant to be long-lived and + * shared between many goroutines. */ - set(key: string, value: any): void + close(): void } - interface JsonMap { - /** Value implements the [driver.Valuer] interface. */ - value(): any - } - interface JsonMap { + interface DB { /** - * Scan implements [sql.Scanner] interface to scan the provided value into - * the current `JsonMap` instance. + * SetMaxIdleConns sets the maximum number of connections in the idle + * connection pool. + * + * If MaxOpenConns is greater than 0 but less than the new MaxIdleConns, + * then the new MaxIdleConns will be reduced to match the MaxOpenConns + * limit. + * + * If n <= 0, no idle connections are retained. + * + * The default max idle connections is currently 2. This may change in a + * future release. */ - scan(value: any): void + setMaxIdleConns(n: number): void + } + interface DB { + /** + * SetMaxOpenConns sets the maximum number of open connections to the + * database. + * + * If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than + * MaxIdleConns, then MaxIdleConns will be reduced to match the new + * MaxOpenConns limit. + * + * If n <= 0, then there is no limit on the number of open connections. The + * default is 0 (unlimited). + */ + setMaxOpenConns(n: number): void + } + interface DB { + /** + * SetConnMaxLifetime sets the maximum amount of time a connection may be + * reused. + * + * Expired connections may be closed lazily before reuse. + * + * If d <= 0, connections are not closed due to a connection's age. + */ + setConnMaxLifetime(d: time.Duration): void + } + interface DB { + /** + * SetConnMaxIdleTime sets the maximum amount of time a connection may be + * idle. + * + * Expired connections may be closed lazily before reuse. + * + * If d <= 0, connections are not closed due to a connection's idle time. + */ + setConnMaxIdleTime(d: time.Duration): void + } + interface DB { + /** Stats returns database statistics. */ + stats(): DBStats + } + interface DB { + /** + * PrepareContext creates a prepared statement for later queries or + * executions. Multiple queries or executions may be run concurrently from + * the returned statement. The caller must call the statement's Close method + * when the statement is no longer needed. + * + * The provided context is used for the preparation of the statement, not + * for the execution of the statement. + */ + prepareContext(ctx: context.Context, query: string): Stmt + } + interface DB { + /** + * Prepare creates a prepared statement for later queries or executions. + * Multiple queries or executions may be run concurrently from the returned + * statement. The caller must call the statement's Close method when the + * statement is no longer needed. + * + * Prepare uses context.Background internally; to specify the context, use + * PrepareContext. + */ + prepare(query: string): Stmt + } + interface DB { + /** + * ExecContext executes a query without returning any rows. The args are for + * any placeholder parameters in the query. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface DB { + /** + * Exec executes a query without returning any rows. The args are for any + * placeholder parameters in the query. + * + * Exec uses context.Background internally; to specify the context, use + * ExecContext. + */ + exec(query: string, ...args: any[]): Result + } + interface DB { + /** + * QueryContext executes a query that returns rows, typically a SELECT. The + * args are for any placeholder parameters in the query. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): Rows + } + interface DB { + /** + * Query executes a query that returns rows, typically a SELECT. The args + * are for any placeholder parameters in the query. + * + * Query uses context.Background internally; to specify the context, use + * QueryContext. + */ + query(query: string, ...args: any[]): Rows + } + interface DB { + /** + * QueryRowContext executes a query that is expected to return at most one + * row. QueryRowContext always returns a non-nil value. Errors are deferred + * until Row's Scan method is called. If the query selects no rows, the + * *Row's Scan will return ErrNoRows. Otherwise, the *Row's Scan scans the + * first selected row and discards the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): Row + } + interface DB { + /** + * QueryRow executes a query that is expected to return at most one row. + * QueryRow always returns a non-nil value. Errors are deferred until Row's + * Scan method is called. If the query selects no rows, the *Row's Scan will + * return ErrNoRows. Otherwise, the *Row's Scan scans the first selected row + * and discards the rest. + * + * QueryRow uses context.Background internally; to specify the context, use + * QueryRowContext. + */ + queryRow(query: string, ...args: any[]): Row + } + interface DB { + /** + * BeginTx starts a transaction. + * + * The provided context is used until the transaction is committed or rolled + * back. If the context is canceled, the sql package will roll back the + * transaction. Tx.Commit will return an error if the context provided to + * BeginTx is canceled. + * + * The provided TxOptions is optional and may be nil if defaults should be + * used. If a non-default isolation level is used that the driver doesn't + * support, an error will be returned. + */ + beginTx(ctx: context.Context, opts: TxOptions): Tx + } + interface DB { + /** + * Begin starts a transaction. The default isolation level is dependent on + * the driver. + * + * Begin uses context.Background internally; to specify the context, use + * BeginTx. + */ + begin(): Tx + } + interface DB { + /** Driver returns the database's underlying driver. */ + driver(): any + } + interface DB { + /** + * Conn returns a single connection by either opening a new connection or + * returning an existing connection from the connection pool. Conn will + * block until either a connection is returned or ctx is canceled. Queries + * run on the same Conn will be run in the same database session. + * + * Every Conn must be returned to the database pool after use by calling + * Conn.Close. + */ + conn(ctx: context.Context): Conn + } + /** + * Tx is an in-progress database transaction. + * + * A transaction must end with a call to Commit or Rollback. + * + * After a call to Commit or Rollback, all operations on the transaction fail + * with ErrTxDone. + * + * The statements prepared for a transaction by calling the transaction's + * Prepare or Stmt methods are closed by the call to Commit or Rollback. + */ + interface Tx {} + interface Tx { + /** Commit commits the transaction. */ + commit(): void + } + interface Tx { + /** Rollback aborts the transaction. */ + rollback(): void + } + interface Tx { + /** + * PrepareContext creates a prepared statement for use within a transaction. + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + * + * To use an existing prepared statement on this transaction, see Tx.Stmt. + * + * The provided context will be used for the preparation of the context, not + * for the execution of the returned statement. The returned statement will + * run in the transaction context. + */ + prepareContext(ctx: context.Context, query: string): Stmt + } + interface Tx { + /** + * Prepare creates a prepared statement for use within a transaction. + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + * + * To use an existing prepared statement on this transaction, see Tx.Stmt. + * + * Prepare uses context.Background internally; to specify the context, use + * PrepareContext. + */ + prepare(query: string): Stmt + } + interface Tx { + /** + * StmtContext returns a transaction-specific prepared statement from an + * existing statement. + * + * Example: + * + * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") + * ... + * tx, err := db.Begin() + * ... + * res, err := tx.StmtContext(ctx, updateMoney).Exec(123.45, 98293203) + * + * The provided context is used for the preparation of the statement, not + * for the execution of the statement. + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + */ + stmtContext(ctx: context.Context, stmt: Stmt): Stmt + } + interface Tx { + /** + * Stmt returns a transaction-specific prepared statement from an existing + * statement. + * + * Example: + * + * updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?") + * ... + * tx, err := db.Begin() + * ... + * res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203) + * + * The returned statement operates within the transaction and will be closed + * when the transaction has been committed or rolled back. + * + * Stmt uses context.Background internally; to specify the context, use + * StmtContext. + */ + stmt(stmt: Stmt): Stmt + } + interface Tx { + /** + * ExecContext executes a query that doesn't return rows. For example: an + * INSERT and UPDATE. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface Tx { + /** + * Exec executes a query that doesn't return rows. For example: an INSERT + * and UPDATE. + * + * Exec uses context.Background internally; to specify the context, use + * ExecContext. + */ + exec(query: string, ...args: any[]): Result + } + interface Tx { + /** QueryContext executes a query that returns rows, typically a SELECT. */ + queryContext(ctx: context.Context, query: string, ...args: any[]): Rows + } + interface Tx { + /** + * Query executes a query that returns rows, typically a SELECT. + * + * Query uses context.Background internally; to specify the context, use + * QueryContext. + */ + query(query: string, ...args: any[]): Rows + } + interface Tx { + /** + * QueryRowContext executes a query that is expected to return at most one + * row. QueryRowContext always returns a non-nil value. Errors are deferred + * until Row's Scan method is called. If the query selects no rows, the + * *Row's Scan will return ErrNoRows. Otherwise, the *Row's Scan scans the + * first selected row and discards the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): Row + } + interface Tx { + /** + * QueryRow executes a query that is expected to return at most one row. + * QueryRow always returns a non-nil value. Errors are deferred until Row's + * Scan method is called. If the query selects no rows, the *Row's Scan will + * return ErrNoRows. Otherwise, the *Row's Scan scans the first selected row + * and discards the rest. + * + * QueryRow uses context.Background internally; to specify the context, use + * QueryRowContext. + */ + queryRow(query: string, ...args: any[]): Row + } + /** + * Stmt is a prepared statement. A Stmt is safe for concurrent use by multiple + * goroutines. + * + * If a Stmt is prepared on a Tx or Conn, it will be bound to a single + * underlying connection forever. If the Tx or Conn closes, the Stmt will + * become unusable and all operations will return an error. If a Stmt is + * prepared on a DB, it will remain usable for the lifetime of the DB. When + * the Stmt needs to execute on a new underlying connection, it will prepare + * itself on the new connection automatically. + */ + interface Stmt {} + interface Stmt { + /** + * ExecContext executes a prepared statement with the given arguments and + * returns a Result summarizing the effect of the statement. + */ + execContext(ctx: context.Context, ...args: any[]): Result + } + interface Stmt { + /** + * Exec executes a prepared statement with the given arguments and returns a + * Result summarizing the effect of the statement. + * + * Exec uses context.Background internally; to specify the context, use + * ExecContext. + */ + exec(...args: any[]): Result + } + interface Stmt { + /** + * QueryContext executes a prepared query statement with the given arguments + * and returns the query results as a *Rows. + */ + queryContext(ctx: context.Context, ...args: any[]): Rows + } + interface Stmt { + /** + * Query executes a prepared query statement with the given arguments and + * returns the query results as a *Rows. + * + * Query uses context.Background internally; to specify the context, use + * QueryContext. + */ + query(...args: any[]): Rows + } + interface Stmt { + /** + * QueryRowContext executes a prepared query statement with the given + * arguments. If an error occurs during the execution of the statement, that + * error will be returned by a call to Scan on the returned *Row, which is + * always non-nil. If the query selects no rows, the *Row's Scan will return + * ErrNoRows. Otherwise, the *Row's Scan scans the first selected row and + * discards the rest. + */ + queryRowContext(ctx: context.Context, ...args: any[]): Row + } + interface Stmt { + /** + * QueryRow executes a prepared query statement with the given arguments. If + * an error occurs during the execution of the statement, that error will be + * returned by a call to Scan on the returned *Row, which is always non-nil. + * If the query selects no rows, the *Row's Scan will return ErrNoRows. + * Otherwise, the *Row's Scan scans the first selected row and discards the + * rest. + * + * Example usage: + * + * var name string + * err := nameByUseridStmt.QueryRow(id).Scan(&name) + * + * QueryRow uses context.Background internally; to specify the context, use + * QueryRowContext. + */ + queryRow(...args: any[]): Row + } + interface Stmt { + /** Close closes the statement. */ + close(): void + } + /** + * Rows is the result of a query. Its cursor starts before the first row of + * the result set. Use Next to advance from row to row. + */ + interface Rows {} + interface Rows { + /** + * Next prepares the next result row for reading with the Scan method. It + * returns true on success, or false if there is no next result row or an + * error happened while preparing it. Err should be consulted to distinguish + * between the two cases. + * + * Every call to Scan, even the first one, must be preceded by a call to + * Next. + */ + next(): boolean + } + interface Rows { + /** + * NextResultSet prepares the next result set for reading. It reports + * whether there is further result sets, or false if there is no further + * result set or if there is an error advancing to it. The Err method should + * be consulted to distinguish between the two cases. + * + * After calling NextResultSet, the Next method should always be called + * before scanning. If there are further result sets they may not have rows + * in the result set. + */ + nextResultSet(): boolean + } + interface Rows { + /** + * Err returns the error, if any, that was encountered during iteration. Err + * may be called after an explicit or implicit Close. + */ + err(): void + } + interface Rows { + /** + * Columns returns the column names. Columns returns an error if the rows + * are closed. + */ + columns(): Array + } + interface Rows { + /** + * ColumnTypes returns column information such as column type, length, and + * nullable. Some information may not be available from some drivers. + */ + columnTypes(): Array + } + interface Rows { + /** + * Scan copies the columns in the current row into the values pointed at by + * dest. The number of values in dest must be the same as the number of + * columns in Rows. + * + * Scan converts columns read from the database into the following common Go + * types and special types provided by the sql package: + * + * *string + * *[]byte + * *int, *int8, *int16, *int32, *int64 + * *uint, *uint8, *uint16, *uint32, *uint64 + * *bool + * *float32, *float64 + * *interface{} + * *RawBytes + * *Rows (cursor value) + * any type implementing Scanner (see Scanner docs) + * + * In the most simple case, if the type of the value from the source column + * is an integer, bool or string type T and dest is of type *T, Scan simply + * assigns the value through the pointer. + * + * Scan also converts between string and numeric types, as long as no + * information would be lost. While Scan stringifies all numbers scanned + * from numeric database columns into *string, scans into numeric types are + * checked for overflow. For example, a float64 with value 300 or a string + * with value "300" can scan into a uint16, but not into a uint8, though + * float64(255) or "255" can scan into a uint8. One exception is that scans + * of some float64 numbers to strings may lose information when + * stringifying. In general, scan floating point columns into *float64. + * + * If a dest argument has type *[]byte, Scan saves in that argument a copy + * of the corresponding data. The copy is owned by the caller and can be + * modified and held indefinitely. The copy can be avoided by using an + * argument of type *RawBytes instead; see the documentation for RawBytes + * for restrictions on its use. + * + * If an argument has type *interface{}, Scan copies the value provided by + * the underlying driver without conversion. When scanning from a source + * value of type []byte to *interface{}, a copy of the slice is made and the + * caller owns the result. + * + * Source values of type time.Time may be scanned into values of type + * *time.Time, *interface{}, *string, or *[]byte. When converting to the + * latter two, time.RFC3339Nano is used. + * + * Source values of type bool may be scanned into types *bool, *interface{}, + * *string, *[]byte, or *RawBytes. + * + * For scanning into *bool, the source may be true, false, 1, 0, or string + * inputs parseable by strconv.ParseBool. + * + * Scan can also convert a cursor returned from a query, such as "select + * cursor(select * from my_table) from dual", into a *Rows value that can + * itself be scanned from. The parent select query will close any cursor + * *Rows if the parent *Rows is closed. + * + * If any of the first arguments implementing Scanner returns an error, that + * error will be wrapped in the returned error. + */ + scan(...dest: any[]): void + } + interface Rows { + /** + * Close closes the Rows, preventing further enumeration. If Next is called + * and returns false and there are no further result sets, the Rows are + * closed automatically and it will suffice to check the result of Err. + * Close is idempotent and does not affect the result of Err. + */ + close(): void + } + /** A Result summarizes an executed SQL command. */ + interface Result { + [key: string]: any + /** + * LastInsertId returns the integer generated by the database in response to + * a command. Typically this will be from an "auto increment" column when + * inserting a new row. Not all databases support this feature, and the + * syntax of such statements varies. + */ + lastInsertId(): number + /** + * RowsAffected returns the number of rows affected by an update, insert, or + * delete. Not every database or database driver may support this. + */ + rowsAffected(): number + } +} + +namespace migrate { + /** MigrationsList defines a list with migration definitions */ + interface MigrationsList {} + interface MigrationsList { + /** Item returns a single migration from the list by its index. */ + item(index: number): Migration + } + interface MigrationsList { + /** Items returns the internal migrations list slice. */ + items(): Array + } + interface MigrationsList { + /** + * Register adds new migration definition to the list. + * + * If `optFilename` is not provided, it will try to get the name from its + * .go file. + * + * The list will be sorted automatically based on the migrations file name. + */ + register( + up: (db: dbx.Builder) => void, + down: (db: dbx.Builder) => void, + ...optFilename: string[] + ): void } } @@ -11218,6 +10792,8 @@ namespace settings { instagramAuth: AuthProviderConfig vkAuth: AuthProviderConfig yandexAuth: AuthProviderConfig + patreonAuth: AuthProviderConfig + mailcowAuth: AuthProviderConfig } interface Settings { /** @@ -11232,14 +10808,14 @@ namespace settings { } interface Settings { /** Clone creates a new deep copy of the current settings. */ - clone(): Settings | undefined + clone(): Settings } interface Settings { /** * RedactClone creates a new deep copy of the current settings, while * replacing the secret values with `******`. */ - redactClone(): Settings | undefined + redactClone(): Settings } interface Settings { /** @@ -11250,6 +10826,1015 @@ namespace settings { } } +/** + * Package cobra is a commander providing a simple interface to create powerful + * modern CLI interfaces. In addition to providing an interface, Cobra + * simultaneously provides a controller to organize your application code. + */ +namespace cobra { + interface Command { + /** + * GenBashCompletion generates bash completion file and writes to the passed + * writer. + */ + genBashCompletion(w: io.Writer): void + } + interface Command { + /** GenBashCompletionFile generates bash completion file. */ + genBashCompletionFile(filename: string): void + } + interface Command { + /** GenBashCompletionFileV2 generates Bash completion version 2. */ + genBashCompletionFileV2(filename: string, includeDesc: boolean): void + } + interface Command { + /** + * GenBashCompletionV2 generates Bash completion file version 2 and writes + * it to the passed writer. + */ + genBashCompletionV2(w: io.Writer, includeDesc: boolean): void + } + // @ts-ignore + import flag = pflag + /** + * Command is just that, a command for your application. E.g. 'go run ...' - + * 'run' is the command. Cobra requires you to define the usage and + * description as part of your command definition to ensure usability. + */ + interface Command { + /** + * Use is the one-line usage message. Recommended syntax is as follows: + * + * [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. + * ... indicates that you can specify multiple values for the previous argument. + * | indicates mutually exclusive information. You can use the argument to the left of the separator or the + * argument to the right of the separator. You cannot use both arguments in a single use of the command. + * { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are + * optional, they are enclosed in brackets ([ ]). + * + * Example: add [-F file | -D dir]... [-f format] profile + */ + use: string + /** + * Aliases is an array of aliases that can be used instead of the first word + * in Use. + */ + aliases: Array + /** + * SuggestFor is an array of command names for which this command will be + * suggested - similar to aliases but only suggests. + */ + suggestFor: Array + /** Short is the short description shown in the 'help' output. */ + short: string + /** + * The group id under which this subcommand is grouped in the 'help' output + * of its parent. + */ + groupID: string + /** Long is the long message shown in the 'help ' output. */ + long: string + /** Example is examples of how to use the command. */ + example: string + /** + * ValidArgs is list of all valid non-flag arguments that are accepted in + * shell completions + */ + validArgs: Array + /** + * ValidArgsFunction is an optional function that provides valid non-flag + * arguments for shell completion. It is a dynamic version of using + * ValidArgs. Only one of ValidArgs and ValidArgsFunction can be used for a + * command. + */ + validArgsFunction: ( + cmd: Command, + args: Array, + toComplete: string, + ) => [Array, ShellCompDirective] + /** Expected arguments */ + args: PositionalArgs + /** + * ArgAliases is List of aliases for ValidArgs. These are not suggested to + * the user in the shell completion, but accepted if entered manually. + */ + argAliases: Array + /** + * BashCompletionFunction is custom bash functions used by the legacy bash + * autocompletion generator. For portability with other shells, it is + * recommended to instead use ValidArgsFunction + */ + bashCompletionFunction: string + /** + * Deprecated defines, if this command is deprecated and should print this + * string when used. + */ + deprecated: string + /** + * Annotations are key/value pairs that can be used by applications to + * identify or group commands or set special options. + */ + annotations: _TygojaDict + /** + * Version defines the version for this command. If this value is non-empty + * and the command does not define a "version" flag, a "version" boolean + * flag will be added to the command and, if specified, will print content + * of the "Version" variable. A shorthand "v" flag will also be added if the + * command does not define one. + */ + version: string + /** + * The *Run functions are executed in the following order: + * + * * PersistentPreRun() + * * PreRun() + * * Run() + * * PostRun() + * * PersistentPostRun() + * + * All functions get the same args, the arguments after the command name. + * The *PreRun and *PostRun functions will only be executed if the Run + * function of the current command has been declared. + * + * PersistentPreRun: children of this command will inherit and execute. + */ + persistentPreRun: (cmd: Command, args: Array) => void + /** PersistentPreRunE: PersistentPreRun but returns an error. */ + persistentPreRunE: (cmd: Command, args: Array) => void + /** PreRun: children of this command will not inherit. */ + preRun: (cmd: Command, args: Array) => void + /** PreRunE: PreRun but returns an error. */ + preRunE: (cmd: Command, args: Array) => void + /** + * Run: Typically the actual work function. Most commands will only + * implement this. + */ + run: (cmd: Command, args: Array) => void + /** RunE: Run but returns an error. */ + runE: (cmd: Command, args: Array) => void + /** PostRun: run after the Run command. */ + postRun: (cmd: Command, args: Array) => void + /** PostRunE: PostRun but returns an error. */ + postRunE: (cmd: Command, args: Array) => void + /** + * PersistentPostRun: children of this command will inherit and execute + * after PostRun. + */ + persistentPostRun: (cmd: Command, args: Array) => void + /** PersistentPostRunE: PersistentPostRun but returns an error. */ + persistentPostRunE: (cmd: Command, args: Array) => void + /** FParseErrWhitelist flag parse errors to be ignored */ + fParseErrWhitelist: FParseErrWhitelist + /** + * CompletionOptions is a set of options to control the handling of shell + * completion + */ + completionOptions: CompletionOptions + /** + * TraverseChildren parses flags on all parents before executing child + * command. + */ + traverseChildren: boolean + /** + * Hidden defines, if this command is hidden and should NOT show up in the + * list of available commands. + */ + hidden: boolean + /** SilenceErrors is an option to quiet errors down stream. */ + silenceErrors: boolean + /** SilenceUsage is an option to silence usage when an error occurs. */ + silenceUsage: boolean + /** + * DisableFlagParsing disables the flag parsing. If this is true all flags + * will be passed to the command as arguments. + */ + disableFlagParsing: boolean + /** + * DisableAutoGenTag defines, if gen tag ("Auto generated by + * spf13/cobra...") will be printed by generating docs for this command. + */ + disableAutoGenTag: boolean + /** + * DisableFlagsInUseLine will disable the addition of [flags] to the usage + * line of a command when printing help or generating docs + */ + disableFlagsInUseLine: boolean + /** + * DisableSuggestions disables the suggestions based on Levenshtein distance + * that go along with 'unknown command' messages. + */ + disableSuggestions: boolean + /** + * SuggestionsMinimumDistance defines minimum levenshtein distance to + * display suggestions. Must be > 0. + */ + suggestionsMinimumDistance: number + } + interface Command { + /** + * Context returns underlying command context. If command was executed with + * ExecuteContext or the context was set with SetContext, the previously set + * context will be returned. Otherwise, nil is returned. + * + * Notice that a call to Execute and ExecuteC will replace a nil context of + * a command with a context.Background, so a background context will be + * returned by Context after one of these functions has been called. + */ + context(): context.Context + } + interface Command { + /** + * SetContext sets context for the command. This context will be overwritten + * by Command.ExecuteContext or Command.ExecuteContextC. + */ + setContext(ctx: context.Context): void + } + interface Command { + /** + * SetArgs sets arguments for the command. It is set to os.Args[1:] by + * default, if desired, can be overridden particularly useful when testing. + */ + setArgs(a: Array): void + } + interface Command { + /** + * SetOutput sets the destination for usage and error messages. If output is + * nil, os.Stderr is used. Deprecated: Use SetOut and/or SetErr instead + */ + setOutput(output: io.Writer): void + } + interface Command { + /** + * SetOut sets the destination for usage messages. If newOut is nil, + * os.Stdout is used. + */ + setOut(newOut: io.Writer): void + } + interface Command { + /** + * SetErr sets the destination for error messages. If newErr is nil, + * os.Stderr is used. + */ + setErr(newErr: io.Writer): void + } + interface Command { + /** SetIn sets the source for input data If newIn is nil, os.Stdin is used. */ + setIn(newIn: io.Reader): void + } + interface Command { + /** SetUsageFunc sets usage function. Usage can be defined by application. */ + setUsageFunc(f: (_arg0: Command) => void): void + } + interface Command { + /** SetUsageTemplate sets usage template. Can be defined by Application. */ + setUsageTemplate(s: string): void + } + interface Command { + /** + * SetFlagErrorFunc sets a function to generate an error when flag parsing + * fails. + */ + setFlagErrorFunc(f: (_arg0: Command, _arg1: Error) => void): void + } + interface Command { + /** SetHelpFunc sets help function. Can be defined by Application. */ + setHelpFunc(f: (_arg0: Command, _arg1: Array) => void): void + } + interface Command { + /** SetHelpCommand sets help command. */ + setHelpCommand(cmd: Command): void + } + interface Command { + /** SetHelpCommandGroupID sets the group id of the help command. */ + setHelpCommandGroupID(groupID: string): void + } + interface Command { + /** SetCompletionCommandGroupID sets the group id of the completion command. */ + setCompletionCommandGroupID(groupID: string): void + } + interface Command { + /** + * SetHelpTemplate sets help template to be used. Application can use it to + * set custom template. + */ + setHelpTemplate(s: string): void + } + interface Command { + /** + * SetVersionTemplate sets version template to be used. Application can use + * it to set custom template. + */ + setVersionTemplate(s: string): void + } + interface Command { + /** + * SetErrPrefix sets error message prefix to be used. Application can use it + * to set custom prefix. + */ + setErrPrefix(s: string): void + } + interface Command { + /** + * SetGlobalNormalizationFunc sets a normalization function to all flag sets + * and also to child commands. The user should not have a cyclic dependency + * on commands. + */ + setGlobalNormalizationFunc(n: (f: any, name: string) => any): void + } + interface Command { + /** OutOrStdout returns output to stdout. */ + outOrStdout(): io.Writer + } + interface Command { + /** OutOrStderr returns output to stderr */ + outOrStderr(): io.Writer + } + interface Command { + /** ErrOrStderr returns output to stderr */ + errOrStderr(): io.Writer + } + interface Command { + /** InOrStdin returns input to stdin */ + inOrStdin(): io.Reader + } + interface Command { + /** + * UsageFunc returns either the function set by SetUsageFunc for this + * command or a parent, or it returns a default usage function. + */ + usageFunc(): (_arg0: Command) => void + } + interface Command { + /** + * Usage puts out the usage for the command. Used when a user provides + * invalid input. Can be defined by user by overriding UsageFunc. + */ + usage(): void + } + interface Command { + /** + * HelpFunc returns either the function set by SetHelpFunc for this command + * or a parent, or it returns a function with default help behavior. + */ + helpFunc(): (_arg0: Command, _arg1: Array) => void + } + interface Command { + /** + * Help puts out the help for the command. Used when a user calls help + * [command]. Can be defined by user by overriding HelpFunc. + */ + help(): void + } + interface Command { + /** UsageString returns usage string. */ + usageString(): string + } + interface Command { + /** + * FlagErrorFunc returns either the function set by SetFlagErrorFunc for + * this command or a parent, or it returns a function which returns the + * original error. + */ + flagErrorFunc(): (_arg0: Command, _arg1: Error) => void + } + interface Command { + /** UsagePadding return padding for the usage. */ + usagePadding(): number + } + interface Command { + /** CommandPathPadding return padding for the command path. */ + commandPathPadding(): number + } + interface Command { + /** NamePadding returns padding for the name. */ + namePadding(): number + } + interface Command { + /** UsageTemplate returns usage template for the command. */ + usageTemplate(): string + } + interface Command { + /** HelpTemplate return help template for the command. */ + helpTemplate(): string + } + interface Command { + /** VersionTemplate return version template for the command. */ + versionTemplate(): string + } + interface Command { + /** ErrPrefix return error message prefix for the command */ + errPrefix(): string + } + interface Command { + /** + * Find the target command given the args and command tree Meant to be run + * on the highest node. Only searches down. + */ + find(args: Array): [Command, Array] + } + interface Command { + /** + * Traverse the command tree to find the command, and parse args for each + * parent. + */ + traverse(args: Array): [Command, Array] + } + interface Command { + /** SuggestionsFor provides suggestions for the typedName. */ + suggestionsFor(typedName: string): Array + } + interface Command { + /** + * VisitParents visits all parents of the command and invokes fn on each + * parent. + */ + visitParents(fn: (_arg0: Command) => void): void + } + interface Command { + /** Root finds root command. */ + root(): Command + } + interface Command { + /** + * ArgsLenAtDash will return the length of c.Flags().Args at the moment when + * a -- was found during args parsing. + */ + argsLenAtDash(): number + } + interface Command { + /** + * ExecuteContext is the same as Execute(), but sets the ctx on the command. + * Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or + * ValidArgs functions. + */ + executeContext(ctx: context.Context): void + } + interface Command { + /** + * Execute uses the args (os.Args[1:] by default) and run through the + * command tree finding appropriate matches for commands and then + * corresponding flags. + */ + execute(): void + } + interface Command { + /** + * ExecuteContextC is the same as ExecuteC(), but sets the ctx on the + * command. Retrieve ctx by calling cmd.Context() inside your *Run lifecycle + * or ValidArgs functions. + */ + executeContextC(ctx: context.Context): Command + } + interface Command { + /** ExecuteC executes the command. */ + executeC(): Command + } + interface Command { + validateArgs(args: Array): void + } + interface Command { + /** + * ValidateRequiredFlags validates all required flags are present and + * returns an error otherwise + */ + validateRequiredFlags(): void + } + interface Command { + /** + * InitDefaultHelpFlag adds default help flag to c. It is called + * automatically by executing the c or by calling help and usage. If c + * already has help flag, it will do nothing. + */ + initDefaultHelpFlag(): void + } + interface Command { + /** + * InitDefaultVersionFlag adds default version flag to c. It is called + * automatically by executing the c. If c already has a version flag, it + * will do nothing. If c.Version is empty, it will do nothing. + */ + initDefaultVersionFlag(): void + } + interface Command { + /** + * InitDefaultHelpCmd adds default help command to c. It is called + * automatically by executing the c or by calling help and usage. If c + * already has help command or c has no subcommands, it will do nothing. + */ + initDefaultHelpCmd(): void + } + interface Command { + /** ResetCommands delete parent, subcommand and help command from c. */ + resetCommands(): void + } + interface Command { + /** Commands returns a sorted slice of child commands. */ + commands(): Array + } + interface Command { + /** AddCommand adds one or more commands to this parent command. */ + addCommand(...cmds: (Command | undefined)[]): void + } + interface Command { + /** Groups returns a slice of child command groups. */ + groups(): Array + } + interface Command { + /** + * AllChildCommandsHaveGroup returns if all subcommands are assigned to a + * group + */ + allChildCommandsHaveGroup(): boolean + } + interface Command { + /** ContainsGroup return if groupID exists in the list of command groups. */ + containsGroup(groupID: string): boolean + } + interface Command { + /** AddGroup adds one or more command groups to this parent command. */ + addGroup(...groups: (Group | undefined)[]): void + } + interface Command { + /** RemoveCommand removes one or more commands from a parent command. */ + removeCommand(...cmds: (Command | undefined)[]): void + } + interface Command { + /** + * Print is a convenience method to Print to the defined output, fallback to + * Stderr if not set. + */ + print(...i: {}[]): void + } + interface Command { + /** + * Println is a convenience method to Println to the defined output, + * fallback to Stderr if not set. + */ + println(...i: {}[]): void + } + interface Command { + /** + * Printf is a convenience method to Printf to the defined output, fallback + * to Stderr if not set. + */ + printf(format: string, ...i: {}[]): void + } + interface Command { + /** + * PrintErr is a convenience method to Print to the defined Err output, + * fallback to Stderr if not set. + */ + printErr(...i: {}[]): void + } + interface Command { + /** + * PrintErrln is a convenience method to Println to the defined Err output, + * fallback to Stderr if not set. + */ + printErrln(...i: {}[]): void + } + interface Command { + /** + * PrintErrf is a convenience method to Printf to the defined Err output, + * fallback to Stderr if not set. + */ + printErrf(format: string, ...i: {}[]): void + } + interface Command { + /** CommandPath returns the full path to this command. */ + commandPath(): string + } + interface Command { + /** UseLine puts out the full usage for a given command (including parents). */ + useLine(): string + } + interface Command { + /** + * DebugFlags used to determine which flags have been assigned to which + * commands and which persist. nolint:goconst + */ + debugFlags(): void + } + interface Command { + /** Name returns the command's name: the first word in the use line. */ + name(): string + } + interface Command { + /** HasAlias determines if a given string is an alias of the command. */ + hasAlias(s: string): boolean + } + interface Command { + /** + * CalledAs returns the command name or alias that was used to invoke this + * command or an empty string if the command has not been called. + */ + calledAs(): string + } + interface Command { + /** NameAndAliases returns a list of the command name and all aliases */ + nameAndAliases(): string + } + interface Command { + /** HasExample determines if the command has example. */ + hasExample(): boolean + } + interface Command { + /** Runnable determines if the command is itself runnable. */ + runnable(): boolean + } + interface Command { + /** HasSubCommands determines if the command has children commands. */ + hasSubCommands(): boolean + } + interface Command { + /** + * IsAvailableCommand determines if a command is available as a non-help + * command (this includes all non deprecated/hidden commands). + */ + isAvailableCommand(): boolean + } + interface Command { + /** + * IsAdditionalHelpTopicCommand determines if a command is an additional + * help topic command; additional help topic command is determined by the + * fact that it is NOT runnable/hidden/deprecated, and has no sub commands + * that are runnable/hidden/deprecated. Concrete example: + * https://github.com/spf13/cobra/issues/393#issuecomment-282741924. + */ + isAdditionalHelpTopicCommand(): boolean + } + interface Command { + /** + * HasHelpSubCommands determines if a command has any available 'help' sub + * commands that need to be shown in the usage/help default template under + * 'additional help topics'. + */ + hasHelpSubCommands(): boolean + } + interface Command { + /** + * HasAvailableSubCommands determines if a command has available sub + * commands that need to be shown in the usage/help default template under + * 'available commands'. + */ + hasAvailableSubCommands(): boolean + } + interface Command { + /** HasParent determines if the command is a child command. */ + hasParent(): boolean + } + interface Command { + /** + * GlobalNormalizationFunc returns the global normalization function or nil + * if it doesn't exist. + */ + globalNormalizationFunc(): (f: any, name: string) => any + } + interface Command { + /** + * Flags returns the complete FlagSet that applies to this command (local + * and persistent declared here and by all parents). + */ + flags(): any + } + interface Command { + /** + * LocalNonPersistentFlags are flags specific to this command which will NOT + * persist to subcommands. + */ + localNonPersistentFlags(): any + } + interface Command { + /** + * LocalFlags returns the local FlagSet specifically set in the current + * command. + */ + localFlags(): any + } + interface Command { + /** + * InheritedFlags returns all flags which were inherited from parent + * commands. + */ + inheritedFlags(): any + } + interface Command { + /** + * NonInheritedFlags returns all flags which were not inherited from parent + * commands. + */ + nonInheritedFlags(): any + } + interface Command { + /** + * PersistentFlags returns the persistent FlagSet specifically set in the + * current command. + */ + persistentFlags(): any + } + interface Command { + /** ResetFlags deletes all flags from command. */ + resetFlags(): void + } + interface Command { + /** + * HasFlags checks if the command contains any flags (local plus persistent + * from the entire structure). + */ + hasFlags(): boolean + } + interface Command { + /** HasPersistentFlags checks if the command contains persistent flags. */ + hasPersistentFlags(): boolean + } + interface Command { + /** + * HasLocalFlags checks if the command has flags specifically declared + * locally. + */ + hasLocalFlags(): boolean + } + interface Command { + /** + * HasInheritedFlags checks if the command has flags inherited from its + * parent command. + */ + hasInheritedFlags(): boolean + } + interface Command { + /** + * HasAvailableFlags checks if the command contains any flags (local plus + * persistent from the entire structure) which are not hidden or + * deprecated. + */ + hasAvailableFlags(): boolean + } + interface Command { + /** + * HasAvailablePersistentFlags checks if the command contains persistent + * flags which are not hidden or deprecated. + */ + hasAvailablePersistentFlags(): boolean + } + interface Command { + /** + * HasAvailableLocalFlags checks if the command has flags specifically + * declared locally which are not hidden or deprecated. + */ + hasAvailableLocalFlags(): boolean + } + interface Command { + /** + * HasAvailableInheritedFlags checks if the command has flags inherited from + * its parent command which are not hidden or deprecated. + */ + hasAvailableInheritedFlags(): boolean + } + interface Command { + /** Flag climbs up the command tree looking for matching flag. */ + flag(name: string): any + } + interface Command { + /** ParseFlags parses persistent flag tree and local flags. */ + parseFlags(args: Array): void + } + interface Command { + /** Parent returns a commands parent command. */ + parent(): Command + } + interface Command { + /** + * RegisterFlagCompletionFunc should be called to register a function to + * provide completion for a flag. + */ + registerFlagCompletionFunc( + flagName: string, + f: ( + cmd: Command, + args: Array, + toComplete: string, + ) => [Array, ShellCompDirective], + ): void + } + interface Command { + /** + * GetFlagCompletionFunc returns the completion function for the given flag + * of the command, if available. + */ + getFlagCompletionFunc( + flagName: string, + ): [ + ( + _arg0: Command, + _arg1: Array, + _arg2: string, + ) => [Array, ShellCompDirective], + boolean, + ] + } + interface Command { + /** + * InitDefaultCompletionCmd adds a default 'completion' command to c. This + * function will do nothing if any of the following is true: + * + * 1. The feature has been explicitly disabled by the program, + * 2. C has no subcommands (to avoid creating one), + * 3. C already has a 'completion' command provided by the program. + */ + initDefaultCompletionCmd(): void + } + interface Command { + /** + * GenFishCompletion generates fish completion file and writes to the passed + * writer. + */ + genFishCompletion(w: io.Writer, includeDesc: boolean): void + } + interface Command { + /** GenFishCompletionFile generates fish completion file. */ + genFishCompletionFile(filename: string, includeDesc: boolean): void + } + interface Command { + /** + * MarkFlagsRequiredTogether marks the given flags with annotations so that + * Cobra errors if the command is invoked with a subset (but not all) of the + * given flags. + */ + markFlagsRequiredTogether(...flagNames: string[]): void + } + interface Command { + /** + * MarkFlagsOneRequired marks the given flags with annotations so that Cobra + * errors if the command is invoked without at least one flag from the given + * set of flags. + */ + markFlagsOneRequired(...flagNames: string[]): void + } + interface Command { + /** + * MarkFlagsMutuallyExclusive marks the given flags with annotations so that + * Cobra errors if the command is invoked with more than one flag from the + * given set of flags. + */ + markFlagsMutuallyExclusive(...flagNames: string[]): void + } + interface Command { + /** + * ValidateFlagGroups validates the + * mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the first + * error encountered. + */ + validateFlagGroups(): void + } + interface Command { + /** + * GenPowerShellCompletionFile generates powershell completion file without + * descriptions. + */ + genPowerShellCompletionFile(filename: string): void + } + interface Command { + /** + * GenPowerShellCompletion generates powershell completion file without + * descriptions and writes it to the passed writer. + */ + genPowerShellCompletion(w: io.Writer): void + } + interface Command { + /** + * GenPowerShellCompletionFileWithDesc generates powershell completion file + * with descriptions. + */ + genPowerShellCompletionFileWithDesc(filename: string): void + } + interface Command { + /** + * GenPowerShellCompletionWithDesc generates powershell completion file with + * descriptions and writes it to the passed writer. + */ + genPowerShellCompletionWithDesc(w: io.Writer): void + } + interface Command { + /** + * MarkFlagRequired instructs the various shell completion implementations + * to prioritize the named flag when performing completion, and causes your + * command to report an error if invoked without the flag. + */ + markFlagRequired(name: string): void + } + interface Command { + /** + * MarkPersistentFlagRequired instructs the various shell completion + * implementations to prioritize the named persistent flag when performing + * completion, and causes your command to report an error if invoked without + * the flag. + */ + markPersistentFlagRequired(name: string): void + } + interface Command { + /** + * MarkFlagFilename instructs the various shell completion implementations + * to limit completions for the named flag to the specified file + * extensions. + */ + markFlagFilename(name: string, ...extensions: string[]): void + } + interface Command { + /** + * MarkFlagCustom adds the BashCompCustom annotation to the named flag, if + * it exists. The bash completion script will call the bash function f for + * the flag. + * + * This will only work for bash completion. It is recommended to instead use + * c.RegisterFlagCompletionFunc(...) which allows to register a Go function + * which will work across all shells. + */ + markFlagCustom(name: string, f: string): void + } + interface Command { + /** + * MarkPersistentFlagFilename instructs the various shell completion + * implementations to limit completions for the named persistent flag to the + * specified file extensions. + */ + markPersistentFlagFilename(name: string, ...extensions: string[]): void + } + interface Command { + /** + * MarkFlagDirname instructs the various shell completion implementations to + * limit completions for the named flag to directory names. + */ + markFlagDirname(name: string): void + } + interface Command { + /** + * MarkPersistentFlagDirname instructs the various shell completion + * implementations to limit completions for the named persistent flag to + * directory names. + */ + markPersistentFlagDirname(name: string): void + } + interface Command { + /** + * GenZshCompletionFile generates zsh completion file including + * descriptions. + */ + genZshCompletionFile(filename: string): void + } + interface Command { + /** + * GenZshCompletion generates zsh completion file including descriptions and + * writes it to the passed writer. + */ + genZshCompletion(w: io.Writer): void + } + interface Command { + /** + * GenZshCompletionFileNoDesc generates zsh completion file without + * descriptions. + */ + genZshCompletionFileNoDesc(filename: string): void + } + interface Command { + /** + * GenZshCompletionNoDesc generates zsh completion file without descriptions + * and writes it to the passed writer. + */ + genZshCompletionNoDesc(w: io.Writer): void + } + interface Command { + /** + * MarkZshCompPositionalArgumentFile only worked for zsh and its behavior + * was not consistent with Bash completion. It has therefore been disabled. + * Instead, when no other completion is specified, file completion is done + * by default for every argument. One can disable file completion on a + * per-argument basis by using ValidArgsFunction and + * ShellCompDirectiveNoFileComp. To achieve file extension filtering, one + * can use ValidArgsFunction and ShellCompDirectiveFilterFileExt. + * + * Deprecated + */ + markZshCompPositionalArgumentFile( + argPosition: number, + ...patterns: string[] + ): void + } + interface Command { + /** + * MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore + * been disabled. To achieve the same behavior across all shells, one can + * use ValidArgs (for the first argument only) or ValidArgsFunction for any + * argument (can include the first one also). + * + * Deprecated + */ + markZshCompPositionalArgumentWords( + argPosition: number, + ...words: string[] + ): void + } +} + /** * Package schema implements custom Schema and SchemaField datatypes for * handling the Collection schema definitions. @@ -11269,7 +11854,7 @@ namespace schema { } interface Schema { /** Clone creates a deep clone of the current schema. */ - clone(): Schema | undefined + clone(): Schema } interface Schema { /** @@ -11280,11 +11865,11 @@ namespace schema { } interface Schema { /** GetFieldById returns a single field by its id. */ - getFieldById(id: string): SchemaField | undefined + getFieldById(id: string): SchemaField } interface Schema { /** GetFieldByName returns a single field by its name. */ - getFieldByName(name: string): SchemaField | undefined + getFieldByName(name: string): SchemaField } interface Schema { /** @@ -11317,7 +11902,7 @@ namespace schema { } interface Schema { /** MarshalJSON implements the [json.Marshaler] interface. */ - marshalJSON(): string + marshalJSON(): string | Array } interface Schema { /** @@ -11325,7 +11910,7 @@ namespace schema { * * On success, all schema field options are auto initialized. */ - unmarshalJSON(data: string): void + unmarshalJSON(data: string | Array): void } interface Schema { /** Value implements the [driver.Valuer] interface. */ @@ -11342,8 +11927,8 @@ namespace schema { /** Package models implements all PocketBase DB models and DTOs. */ namespace models { - type _subAzCSd = BaseModel - interface Admin extends _subAzCSd { + type _subGeaoE = BaseModel + interface Admin extends _subGeaoE { avatar: number email: string tokenKey: string @@ -11373,8 +11958,8 @@ namespace models { } // @ts-ignore import validation = ozzo_validation - type _subdySrd = BaseModel - interface Collection extends _subdySrd { + type _subvYPhz = BaseModel + interface Collection extends _subvYPhz { name: string type: string system: boolean @@ -11410,7 +11995,7 @@ namespace models { } interface Collection { /** MarshalJSON implements the [json.Marshaler] interface. */ - marshalJSON(): string + marshalJSON(): string | Array } interface Collection { /** @@ -11454,8 +12039,8 @@ namespace models { */ setOptions(typedOptions: any): void } - type _subsGIYZ = BaseModel - interface ExternalAuth extends _subsGIYZ { + type _subZrUNb = BaseModel + interface ExternalAuth extends _subZrUNb { collectionId: string recordId: string provider: string @@ -11464,8 +12049,8 @@ namespace models { interface ExternalAuth { tableName(): string } - type _subntRqh = BaseModel - interface Record extends _subntRqh {} + type _subdDjDh = BaseModel + interface Record extends _subdDjDh {} interface Record { /** TableName returns the table name associated to the current Record model. */ tableName(): string @@ -11475,7 +12060,7 @@ namespace models { * Collection returns the Collection model associated to the current Record * model. */ - collection(): Collection | undefined + collection(): Collection } interface Record { /** @@ -11483,14 +12068,14 @@ namespace models { * its ORIGINAL data state (aka. the initially loaded) and everything else * reset to the defaults. */ - originalCopy(): Record | undefined + originalCopy(): Record } interface Record { /** * CleanCopy returns a copy of the current record model populated only with * its LATEST data state and everything else reset to the defaults. */ - cleanCopy(): Record | undefined + cleanCopy(): Record } interface Record { /** Expand returns a shallow copy of the current Record model expand data. */ @@ -11598,7 +12183,7 @@ namespace models { * * Returns nil if there is no such expand relation loaded. */ - expandedOne(relField: string): Record | undefined + expandedOne(relField: string): Record } interface Record { /** @@ -11634,7 +12219,7 @@ namespace models { * FindFileFieldByFile returns the first file type field for which any of * the record's data contains the provided filename. */ - findFileFieldByFile(filename: string): schema.SchemaField | undefined + findFileFieldByFile(filename: string): schema.SchemaField } interface Record { /** Load bulk loads the provided data into the current Record model. */ @@ -11659,11 +12244,11 @@ namespace models { * * Only the data exported by `PublicExport()` will be serialized. */ - marshalJSON(): string + marshalJSON(): string | Array } interface Record { /** UnmarshalJSON implements the [json.Unmarshaler] interface. */ - unmarshalJSON(data: string): void + unmarshalJSON(data: string | Array): void } interface Record { /** @@ -11814,12 +12399,12 @@ namespace models { * `@request.*` filter resolver. */ interface RequestInfo { - method: string query: _TygojaDict data: _TygojaDict headers: _TygojaDict authRecord?: Record admin?: Admin + method: string } interface RequestInfo { /** @@ -11838,26 +12423,23 @@ namespace models { namespace daos { interface Dao { /** AdminQuery returns a new Admin select query. */ - adminQuery(): dbx.SelectQuery | undefined + adminQuery(): dbx.SelectQuery } interface Dao { /** FindAdminById finds the admin with the provided id. */ - findAdminById(id: string): models.Admin | undefined + findAdminById(id: string): models.Admin } interface Dao { /** FindAdminByEmail finds the admin with the provided email address. */ - findAdminByEmail(email: string): models.Admin | undefined + findAdminByEmail(email: string): models.Admin } interface Dao { /** - * FindAdminByToken finds the admin associated with the provided JWT token. + * FindAdminByToken finds the admin associated with the provided JWT. * - * Returns an error if the JWT token is invalid or expired. + * Returns an error if the JWT is invalid or expired. */ - findAdminByToken( - token: string, - baseTokenKey: string, - ): models.Admin | undefined + findAdminByToken(token: string, baseTokenKey: string): models.Admin } interface Dao { /** TotalAdmins returns the number of existing admin records. */ @@ -11954,21 +12536,21 @@ namespace daos { * Clone returns a new Dao with the same configuration options as the * current one. */ - clone(): Dao | undefined + clone(): Dao } interface Dao { /** * WithoutHooks returns a new Dao with the same configuration options as the * current one, but without create/update/delete hooks. */ - withoutHooks(): Dao | undefined + withoutHooks(): Dao } interface Dao { /** * ModelQuery creates a new preconfigured select query with preset SELECT, * FROM and other common fields based on the provided model. */ - modelQuery(m: models.Model): dbx.SelectQuery | undefined + modelQuery(m: models.Model): dbx.SelectQuery } interface Dao { /** @@ -12001,7 +12583,7 @@ namespace daos { } interface Dao { /** CollectionQuery returns a new Collection select query. */ - collectionQuery(): dbx.SelectQuery | undefined + collectionQuery(): dbx.SelectQuery } interface Dao { /** FindCollectionsByType finds all collections by the given type. */ @@ -12014,7 +12596,7 @@ namespace daos { * FindCollectionByNameOrId finds a single collection by its name (case * insensitive) or id. */ - findCollectionByNameOrId(nameOrId: string): models.Collection | undefined + findCollectionByNameOrId(nameOrId: string): models.Collection } interface Dao { /** @@ -12083,7 +12665,7 @@ namespace daos { } interface Dao { /** ExternalAuthQuery returns a new ExternalAuth select query. */ - externalAuthQuery(): dbx.SelectQuery | undefined + externalAuthQuery(): dbx.SelectQuery } interface Dao { /** @@ -12094,15 +12676,6 @@ namespace daos { authRecord: models.Record, ): Array } - interface Dao { - /** - * FindExternalAuthByProvider returns the first available ExternalAuth model - * for the specified provider and providerId. - */ - findExternalAuthByProvider( - provider: string, - ): models.ExternalAuth | undefined - } interface Dao { /** * FindExternalAuthByRecordAndProvider returns the first available @@ -12111,7 +12684,14 @@ namespace daos { findExternalAuthByRecordAndProvider( authRecord: models.Record, provider: string, - ): models.ExternalAuth | undefined + ): models.ExternalAuth + } + interface Dao { + /** + * FindFirstExternalAuthByExpr returns the first available ExternalAuth + * model that satisfies the non-nil expression. + */ + findFirstExternalAuthByExpr(expr: dbx.Expression): models.ExternalAuth } interface Dao { /** SaveExternalAuth upserts the provided ExternalAuth model. */ @@ -12121,13 +12701,33 @@ namespace daos { /** DeleteExternalAuth deletes the provided ExternalAuth model. */ deleteExternalAuth(model: models.ExternalAuth): void } + interface Dao { + /** LogQuery returns a new Log select query. */ + logQuery(): dbx.SelectQuery + } + interface Dao { + /** FindLogById finds a single Log entry by its id. */ + findLogById(id: string): models.Log + } + interface Dao { + /** LogsStats returns hourly grouped requests logs statistics. */ + logsStats(expr: dbx.Expression): Array + } + interface Dao { + /** DeleteOldLogs delete all requests that are created before createdBefore. */ + deleteOldLogs(createdBefore: time.Time): void + } + interface Dao { + /** SaveLog upserts the provided Log model. */ + saveLog(log: models.Log): void + } interface Dao { /** ParamQuery returns a new Param select query. */ - paramQuery(): dbx.SelectQuery | undefined + paramQuery(): dbx.SelectQuery } interface Dao { /** FindParamByKey finds the first Param model with the provided key. */ - findParamByKey(key: string): models.Param | undefined + findParamByKey(key: string): models.Param } interface Dao { /** @@ -12153,7 +12753,7 @@ namespace daos { * context and will fail once an executor (Row(), One(), All(), etc.) is * called. */ - recordQuery(collectionModelOrIdentifier: any): dbx.SelectQuery | undefined + recordQuery(collectionModelOrIdentifier: any): dbx.SelectQuery } interface Dao { /** FindRecordById finds the Record model by its id. */ @@ -12161,7 +12761,7 @@ namespace daos { collectionNameOrId: string, recordId: string, ...optFilters: ((q: dbx.SelectQuery) => void)[] - ): models.Record | undefined + ): models.Record } interface Dao { /** @@ -12205,7 +12805,7 @@ namespace daos { collectionNameOrId: string, key: string, value: any, - ): models.Record | undefined + ): models.Record } interface Dao { /** @@ -12234,9 +12834,9 @@ namespace daos { findRecordsByFilter( collectionNameOrId: string, filter: string, - sort: string, - limit: number, - offset: number, + sort?: string, + limit?: number, + offset?: number, ...params: dbx.Params[] ): Array } @@ -12255,7 +12855,7 @@ namespace daos { collectionNameOrId: string, filter: string, ...params: dbx.Params[] - ): models.Record | undefined + ): models.Record } interface Dao { /** @@ -12280,15 +12880,12 @@ namespace daos { interface Dao { /** * FindAuthRecordByToken finds the auth record associated with the provided - * JWT token. + * JWT. * - * Returns an error if the JWT token is invalid, expired or not associated - * to an auth collection record. + * Returns an error if the JWT is invalid, expired or not associated to an + * auth collection record. */ - findAuthRecordByToken( - token: string, - baseTokenKey: string, - ): models.Record | undefined + findAuthRecordByToken(token: string, baseTokenKey: string): models.Record } interface Dao { /** @@ -12301,7 +12898,7 @@ namespace daos { findAuthRecordByEmail( collectionNameOrId: string, email: string, - ): models.Record | undefined + ): models.Record } interface Dao { /** @@ -12314,7 +12911,7 @@ namespace daos { findAuthRecordByUsername( collectionNameOrId: string, username: string, - ): models.Record | undefined + ): models.Record } interface Dao { /** @@ -12392,7 +12989,7 @@ namespace daos { expandRecord( record: models.Record, expands: Array, - optFetchFunc: ExpandFetchFunc, + optFetchFunc: ExpandFetchFunc | null | undefined, ): _TygojaDict } interface Dao { @@ -12425,29 +13022,6 @@ namespace daos { oldCollection: models.Collection, ): void } - interface Dao { - /** RequestQuery returns a new Request logs select query. */ - requestQuery(): dbx.SelectQuery | undefined - } - interface Dao { - /** FindRequestById finds a single Request log by its id. */ - findRequestById(id: string): models.Request | undefined - } - interface Dao { - /** RequestsStats returns hourly grouped requests logs statistics. */ - requestsStats(expr: dbx.Expression): Array - } - interface Dao { - /** - * DeleteOldRequests delete all requests that are created before - * createdBefore. - */ - deleteOldRequests(createdBefore: time.Time): void - } - interface Dao { - /** SaveRequest upserts the provided Request model. */ - saveRequest(request: models.Request): void - } interface Dao { /** * FindSettings returns and decode the serialized app settings param value. @@ -12458,7 +13032,7 @@ namespace daos { * * Returns an error if it fails to decode the stored serialized param value. */ - findSettings(...optEncryptionKey: string[]): settings.Settings | undefined + findSettings(...optEncryptionKey: string[]): settings.Settings } interface Dao { /** @@ -12556,7 +13130,7 @@ namespace daos { viewCollectionNameOrId: string, fileFieldName: string, filename: string, - ): models.Record | undefined + ): models.Record } } @@ -12568,6 +13142,7 @@ namespace daos { namespace core { /** App defines the main PocketBase app interface. */ interface App { + [key: string]: any /** * Deprecated: This method may get removed in the near future. It is * recommended to access the app db instance from app.Dao().DB() or if you @@ -12576,7 +13151,7 @@ namespace core { * * DB returns the default app database instance. */ - db(): dbx.DB | undefined + db(): dbx.DB /** * Dao returns the default app Dao instance. * @@ -12584,7 +13159,7 @@ namespace core { * default app database. For example, trying to access the request logs * table will result in error. */ - dao(): daos.Dao | undefined + dao(): daos.Dao /** * Deprecated: This method may get removed in the near future. It is * recommended to access the logs db instance from app.LogsDao().DB() or if @@ -12593,7 +13168,7 @@ namespace core { * * LogsDB returns the app logs database instance. */ - logsDB(): dbx.DB | undefined + logsDB(): dbx.DB /** * LogsDao returns the app logs Dao instance. * @@ -12601,7 +13176,9 @@ namespace core { * logs database. For example, trying to access the users table from LogsDao * will result in error. */ - logsDao(): daos.Dao | undefined + logsDao(): daos.Dao + /** Logger returns the active app logger. */ + logger(): slog.Logger /** DataDir returns the app data directory path. */ dataDir(): string /** @@ -12609,20 +13186,19 @@ namespace core { * settings encryption). */ encryptionEnv(): string - /** - * IsDebug returns whether the app is in debug mode (showing more detailed - * error logs, executed sql statements, etc.). - */ - isDebug(): boolean + /** IsDev returns whether the app is in dev mode. */ + isDev(): boolean /** Settings returns the loaded app settings. */ - settings(): settings.Settings | undefined - /** Cache returns the app internal cache store. */ - cache(): store.Store | undefined + settings(): settings.Settings + /** Deprecated: Use app.Store() instead. */ + cache(): store.Store + /** Store returns the app runtime store. */ + store(): store.Store /** * SubscriptionsBroker returns the app realtime subscriptions broker * instance. */ - subscriptionsBroker(): subscriptions.Broker | undefined + subscriptionsBroker(): subscriptions.Broker /** NewMailClient creates and returns a configured app mail client. */ newMailClient(): mailer.Mailer /** @@ -12632,7 +13208,7 @@ namespace core { * NB! Make sure to call Close() on the returned result after you are done * working with it. */ - newFilesystem(): filesystem.System | undefined + newFilesystem(): filesystem.System /** * NewBackupsFilesystem creates and returns a configured filesystem.System * instance for managing app backups. @@ -12640,7 +13216,7 @@ namespace core { * NB! Make sure to call Close() on the returned result after you are done * working with it. */ - newBackupsFilesystem(): filesystem.System | undefined + newBackupsFilesystem(): filesystem.System /** * RefreshSettings reinitializes and reloads the stored application * settings. @@ -12670,7 +13246,7 @@ namespace core { * Backups can be stored on S3 if it is configured in * app.Settings().Backups. * - * Please refer to the godoc of the specific core.App implementation for + * Please refer to the godoc of the specific CoreApp implementation for * details on the backup procedures. */ createBackup(ctx: context.Context, name: string): void @@ -12681,7 +13257,7 @@ namespace core { * The safely perform the restore it is recommended to have free disk space * for at least 2x the size of the restored pb_data backup. * - * Please refer to the godoc of the specific core.App implementation for + * Please refer to the godoc of the specific CoreApp implementation for * details on the restore procedures. * * NB! This feature is experimental and currently is expected to work only @@ -12699,35 +13275,35 @@ namespace core { * OnBeforeBootstrap hook is triggered before initializing the main * application resources (eg. before db open and initial settings load). */ - onBeforeBootstrap(): hook.Hook | undefined + onBeforeBootstrap(): hook.Hook /** * OnAfterBootstrap hook is triggered after initializing the main * application resources (eg. after db open and initial settings load). */ - onAfterBootstrap(): hook.Hook | undefined + onAfterBootstrap(): hook.Hook /** * OnBeforeServe hook is triggered before serving the internal router * (echo), allowing you to adjust its options and attach new routes or * middlewares. */ - onBeforeServe(): hook.Hook | undefined + onBeforeServe(): hook.Hook /** * OnBeforeApiError hook is triggered right before sending an error API * response to the client, allowing you to further modify the error data or * to return a completely different API response. */ - onBeforeApiError(): hook.Hook | undefined + onBeforeApiError(): hook.Hook /** * OnAfterApiError hook is triggered right after sending an error API * response to the client. It could be used to log the final API error in * external services. */ - onAfterApiError(): hook.Hook | undefined + onAfterApiError(): hook.Hook /** * OnTerminate hook is triggered when the app is in the process of being * terminated (eg. on SIGTERM signal). */ - onTerminate(): hook.Hook | undefined + onTerminate(): hook.Hook /** * OnModelBeforeCreate hook is triggered before inserting a new model in the * DB, allowing you to modify or validate the stored data. @@ -12739,7 +13315,7 @@ namespace core { */ onModelBeforeCreate( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnModelAfterCreate hook is triggered after successfully inserting a new * model in the DB. @@ -12751,7 +13327,7 @@ namespace core { */ onModelAfterCreate( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnModelBeforeUpdate hook is triggered before updating existing model in * the DB, allowing you to modify or validate the stored data. @@ -12763,7 +13339,7 @@ namespace core { */ onModelBeforeUpdate( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnModelAfterUpdate hook is triggered after successfully updating existing * model in the DB. @@ -12775,7 +13351,7 @@ namespace core { */ onModelAfterUpdate( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnModelBeforeDelete hook is triggered before deleting an existing model * from the DB. @@ -12787,7 +13363,7 @@ namespace core { */ onModelBeforeDelete( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnModelAfterDelete hook is triggered after successfully deleting an * existing model from the DB. @@ -12799,22 +13375,22 @@ namespace core { */ onModelAfterDelete( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnMailerBeforeAdminResetPasswordSend hook is triggered right before * sending a password reset email to an admin, allowing you to inspect and * customize the email message that is being sent. */ - onMailerBeforeAdminResetPasswordSend(): - | hook.Hook - | undefined + onMailerBeforeAdminResetPasswordSend(): hook.Hook< + MailerAdminEvent | undefined + > /** * OnMailerAfterAdminResetPasswordSend hook is triggered after admin * password reset email was successfully sent. */ - onMailerAfterAdminResetPasswordSend(): - | hook.Hook - | undefined + onMailerAfterAdminResetPasswordSend(): hook.Hook< + MailerAdminEvent | undefined + > /** * OnMailerBeforeRecordResetPasswordSend hook is triggered right before * sending a password reset email to an auth record, allowing you to inspect @@ -12826,7 +13402,7 @@ namespace core { */ onMailerBeforeRecordResetPasswordSend( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnMailerAfterRecordResetPasswordSend hook is triggered after an auth * record password reset email was successfully sent. @@ -12837,7 +13413,7 @@ namespace core { */ onMailerAfterRecordResetPasswordSend( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnMailerBeforeRecordVerificationSend hook is triggered right before * sending a verification email to an auth record, allowing you to inspect @@ -12849,7 +13425,7 @@ namespace core { */ onMailerBeforeRecordVerificationSend( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnMailerAfterRecordVerificationSend hook is triggered after a * verification email was successfully sent to an auth record. @@ -12860,7 +13436,7 @@ namespace core { */ onMailerAfterRecordVerificationSend( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnMailerBeforeRecordChangeEmailSend hook is triggered right before * sending a confirmation new address email to an auth record, allowing you @@ -12872,7 +13448,7 @@ namespace core { */ onMailerBeforeRecordChangeEmailSend( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnMailerAfterRecordChangeEmailSend hook is triggered after a verification * email was successfully sent to an auth record. @@ -12883,53 +13459,47 @@ namespace core { */ onMailerAfterRecordChangeEmailSend( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRealtimeConnectRequest hook is triggered right before establishing the * SSE client connection. */ - onRealtimeConnectRequest(): - | hook.Hook - | undefined + onRealtimeConnectRequest(): hook.Hook /** * OnRealtimeDisconnectRequest hook is triggered on disconnected/interrupted * SSE client connection. */ - onRealtimeDisconnectRequest(): - | hook.Hook - | undefined + onRealtimeDisconnectRequest(): hook.Hook< + RealtimeDisconnectEvent | undefined + > /** - * OnRealtimeBeforeMessage hook is triggered right before sending an SSE + * OnRealtimeBeforeMessageSend hook is triggered right before sending an SSE * message to a client. * * Returning [hook.StopPropagation] will prevent sending the message. * Returning any other non-nil error will close the realtime connection. */ - onRealtimeBeforeMessageSend(): - | hook.Hook - | undefined + onRealtimeBeforeMessageSend(): hook.Hook /** - * OnRealtimeBeforeMessage hook is triggered right after sending an SSE + * OnRealtimeAfterMessageSend hook is triggered right after sending an SSE * message to a client. */ - onRealtimeAfterMessageSend(): - | hook.Hook - | undefined + onRealtimeAfterMessageSend(): hook.Hook /** * OnRealtimeBeforeSubscribeRequest hook is triggered before changing the * client subscriptions, allowing you to further validate and modify the * submitted change. */ - onRealtimeBeforeSubscribeRequest(): - | hook.Hook - | undefined + onRealtimeBeforeSubscribeRequest(): hook.Hook< + RealtimeSubscribeEvent | undefined + > /** * OnRealtimeAfterSubscribeRequest hook is triggered after the client * subscriptions were successfully changed. */ - onRealtimeAfterSubscribeRequest(): - | hook.Hook - | undefined + onRealtimeAfterSubscribeRequest(): hook.Hook< + RealtimeSubscribeEvent | undefined + > /** * OnSettingsListRequest hook is triggered on each successful API Settings * list request. @@ -12937,9 +13507,7 @@ namespace core { * Could be used to validate or modify the response before returning it to * the client. */ - onSettingsListRequest(): - | hook.Hook - | undefined + onSettingsListRequest(): hook.Hook /** * OnSettingsBeforeUpdateRequest hook is triggered before each API Settings * update request (after request data load and before settings @@ -12948,16 +13516,12 @@ namespace core { * Could be used to additionally validate the request data or implement * completely different persistence behavior. */ - onSettingsBeforeUpdateRequest(): - | hook.Hook - | undefined + onSettingsBeforeUpdateRequest(): hook.Hook /** * OnSettingsAfterUpdateRequest hook is triggered after each successful API * Settings update request. */ - onSettingsAfterUpdateRequest(): - | hook.Hook - | undefined + onSettingsAfterUpdateRequest(): hook.Hook /** * OnFileDownloadRequest hook is triggered before each API File download * request. @@ -12967,7 +13531,7 @@ namespace core { */ onFileDownloadRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnFileBeforeTokenRequest hook is triggered before each file token API * request. @@ -12982,7 +13546,7 @@ namespace core { */ onFileBeforeTokenRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnFileAfterTokenRequest hook is triggered after each successful file * token API request. @@ -12993,21 +13557,21 @@ namespace core { */ onFileAfterTokenRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnAdminsListRequest hook is triggered on each API Admins list request. * * Could be used to validate or modify the response before returning it to * the client. */ - onAdminsListRequest(): hook.Hook | undefined + onAdminsListRequest(): hook.Hook /** * OnAdminViewRequest hook is triggered on each API Admin view request. * * Could be used to validate or modify the response before returning it to * the client. */ - onAdminViewRequest(): hook.Hook | undefined + onAdminViewRequest(): hook.Hook /** * OnAdminBeforeCreateRequest hook is triggered before each API Admin create * request (after request data load and before model persistence). @@ -13015,16 +13579,12 @@ namespace core { * Could be used to additionally validate the request data or implement * completely different persistence behavior. */ - onAdminBeforeCreateRequest(): - | hook.Hook - | undefined + onAdminBeforeCreateRequest(): hook.Hook /** * OnAdminAfterCreateRequest hook is triggered after each successful API * Admin create request. */ - onAdminAfterCreateRequest(): - | hook.Hook - | undefined + onAdminAfterCreateRequest(): hook.Hook /** * OnAdminBeforeUpdateRequest hook is triggered before each API Admin update * request (after request data load and before model persistence). @@ -13032,16 +13592,12 @@ namespace core { * Could be used to additionally validate the request data or implement * completely different persistence behavior. */ - onAdminBeforeUpdateRequest(): - | hook.Hook - | undefined + onAdminBeforeUpdateRequest(): hook.Hook /** * OnAdminAfterUpdateRequest hook is triggered after each successful API * Admin update request. */ - onAdminAfterUpdateRequest(): - | hook.Hook - | undefined + onAdminAfterUpdateRequest(): hook.Hook /** * OnAdminBeforeDeleteRequest hook is triggered before each API Admin delete * request (after model load and before actual deletion). @@ -13049,16 +13605,12 @@ namespace core { * Could be used to additionally validate the request data or implement * completely different delete behavior. */ - onAdminBeforeDeleteRequest(): - | hook.Hook - | undefined + onAdminBeforeDeleteRequest(): hook.Hook /** * OnAdminAfterDeleteRequest hook is triggered after each successful API * Admin delete request. */ - onAdminAfterDeleteRequest(): - | hook.Hook - | undefined + onAdminAfterDeleteRequest(): hook.Hook /** * OnAdminAuthRequest hook is triggered on each successful API Admin * authentication request (sign-in, token refresh, etc.). @@ -13066,7 +13618,7 @@ namespace core { * Could be used to additionally validate or modify the authenticated admin * data and token. */ - onAdminAuthRequest(): hook.Hook | undefined + onAdminAuthRequest(): hook.Hook /** * OnAdminBeforeAuthWithPasswordRequest hook is triggered before each Admin * auth with password API request (after request data load and before @@ -13076,16 +13628,16 @@ namespace core { * locate a different Admin identity (by assigning * [AdminAuthWithPasswordEvent.Admin]). */ - onAdminBeforeAuthWithPasswordRequest(): - | hook.Hook - | undefined + onAdminBeforeAuthWithPasswordRequest(): hook.Hook< + AdminAuthWithPasswordEvent | undefined + > /** * OnAdminAfterAuthWithPasswordRequest hook is triggered after each * successful Admin auth with password API request. */ - onAdminAfterAuthWithPasswordRequest(): - | hook.Hook - | undefined + onAdminAfterAuthWithPasswordRequest(): hook.Hook< + AdminAuthWithPasswordEvent | undefined + > /** * OnAdminBeforeAuthRefreshRequest hook is triggered before each Admin auth * refresh API request (right before generating a new auth token). @@ -13093,16 +13645,16 @@ namespace core { * Could be used to additionally validate the request data or implement * completely different auth refresh behavior. */ - onAdminBeforeAuthRefreshRequest(): - | hook.Hook - | undefined + onAdminBeforeAuthRefreshRequest(): hook.Hook< + AdminAuthRefreshEvent | undefined + > /** * OnAdminAfterAuthRefreshRequest hook is triggered after each successful * auth refresh API request (right after generating a new auth token). */ - onAdminAfterAuthRefreshRequest(): - | hook.Hook - | undefined + onAdminAfterAuthRefreshRequest(): hook.Hook< + AdminAuthRefreshEvent | undefined + > /** * OnAdminBeforeRequestPasswordResetRequest hook is triggered before each * Admin request password reset API request (after request data load and @@ -13111,16 +13663,16 @@ namespace core { * Could be used to additionally validate the request data or implement * completely different password reset behavior. */ - onAdminBeforeRequestPasswordResetRequest(): - | hook.Hook - | undefined + onAdminBeforeRequestPasswordResetRequest(): hook.Hook< + AdminRequestPasswordResetEvent | undefined + > /** * OnAdminAfterRequestPasswordResetRequest hook is triggered after each * successful request password reset API request. */ - onAdminAfterRequestPasswordResetRequest(): - | hook.Hook - | undefined + onAdminAfterRequestPasswordResetRequest(): hook.Hook< + AdminRequestPasswordResetEvent | undefined + > /** * OnAdminBeforeConfirmPasswordResetRequest hook is triggered before each * Admin confirm password reset API request (after request data load and @@ -13129,16 +13681,16 @@ namespace core { * Could be used to additionally validate the request data or implement * completely different persistence behavior. */ - onAdminBeforeConfirmPasswordResetRequest(): - | hook.Hook - | undefined + onAdminBeforeConfirmPasswordResetRequest(): hook.Hook< + AdminConfirmPasswordResetEvent | undefined + > /** * OnAdminAfterConfirmPasswordResetRequest hook is triggered after each * successful confirm password reset API request. */ - onAdminAfterConfirmPasswordResetRequest(): - | hook.Hook - | undefined + onAdminAfterConfirmPasswordResetRequest(): hook.Hook< + AdminConfirmPasswordResetEvent | undefined + > /** * OnRecordAuthRequest hook is triggered on each successful API record * authentication request (sign-in, token refresh, etc.). @@ -13152,7 +13704,7 @@ namespace core { */ onRecordAuthRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeAuthWithPasswordRequest hook is triggered before each * Record auth with password API request (after request data load and before @@ -13168,7 +13720,7 @@ namespace core { */ onRecordBeforeAuthWithPasswordRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterAuthWithPasswordRequest hook is triggered after each * successful Record auth with password API request. @@ -13179,7 +13731,7 @@ namespace core { */ onRecordAfterAuthWithPasswordRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeAuthWithOAuth2Request hook is triggered before each Record * OAuth2 sign-in/sign-up API request (after token exchange and before @@ -13197,7 +13749,7 @@ namespace core { */ onRecordBeforeAuthWithOAuth2Request( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterAuthWithOAuth2Request hook is triggered after each * successful Record OAuth2 API request. @@ -13208,7 +13760,7 @@ namespace core { */ onRecordAfterAuthWithOAuth2Request( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeAuthRefreshRequest hook is triggered before each Record * auth refresh API request (right before generating a new auth token). @@ -13222,7 +13774,7 @@ namespace core { */ onRecordBeforeAuthRefreshRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterAuthRefreshRequest hook is triggered after each successful * auth refresh API request (right after generating a new auth token). @@ -13233,7 +13785,7 @@ namespace core { */ onRecordAfterAuthRefreshRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordListExternalAuthsRequest hook is triggered on each API record * external auths list request. @@ -13247,7 +13799,7 @@ namespace core { */ onRecordListExternalAuthsRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeUnlinkExternalAuthRequest hook is triggered before each API * record external auth unlink request (after models load and before the @@ -13262,7 +13814,7 @@ namespace core { */ onRecordBeforeUnlinkExternalAuthRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterUnlinkExternalAuthRequest hook is triggered after each * successful API record external auth unlink request. @@ -13273,7 +13825,7 @@ namespace core { */ onRecordAfterUnlinkExternalAuthRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeRequestPasswordResetRequest hook is triggered before each * Record request password reset API request (after request data load and @@ -13288,7 +13840,7 @@ namespace core { */ onRecordBeforeRequestPasswordResetRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterRequestPasswordResetRequest hook is triggered after each * successful request password reset API request. @@ -13299,7 +13851,7 @@ namespace core { */ onRecordAfterRequestPasswordResetRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeConfirmPasswordResetRequest hook is triggered before each * Record confirm password reset API request (after request data load and @@ -13314,7 +13866,7 @@ namespace core { */ onRecordBeforeConfirmPasswordResetRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterConfirmPasswordResetRequest hook is triggered after each * successful confirm password reset API request. @@ -13325,7 +13877,7 @@ namespace core { */ onRecordAfterConfirmPasswordResetRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeRequestVerificationRequest hook is triggered before each * Record request verification API request (after request data load and @@ -13340,7 +13892,7 @@ namespace core { */ onRecordBeforeRequestVerificationRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterRequestVerificationRequest hook is triggered after each * successful request verification API request. @@ -13351,7 +13903,7 @@ namespace core { */ onRecordAfterRequestVerificationRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeConfirmVerificationRequest hook is triggered before each * Record confirm verification API request (after request data load and @@ -13366,7 +13918,7 @@ namespace core { */ onRecordBeforeConfirmVerificationRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterConfirmVerificationRequest hook is triggered after each * successful confirm verification API request. @@ -13377,7 +13929,7 @@ namespace core { */ onRecordAfterConfirmVerificationRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeRequestEmailChangeRequest hook is triggered before each * Record request email change API request (after request data load and @@ -13392,7 +13944,7 @@ namespace core { */ onRecordBeforeRequestEmailChangeRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterRequestEmailChangeRequest hook is triggered after each * successful request email change API request. @@ -13403,7 +13955,7 @@ namespace core { */ onRecordAfterRequestEmailChangeRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeConfirmEmailChangeRequest hook is triggered before each * Record confirm email change API request (after request data load and @@ -13418,7 +13970,7 @@ namespace core { */ onRecordBeforeConfirmEmailChangeRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterConfirmEmailChangeRequest hook is triggered after each * successful confirm email change API request. @@ -13429,7 +13981,7 @@ namespace core { */ onRecordAfterConfirmEmailChangeRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordsListRequest hook is triggered on each API Records list request. * @@ -13442,7 +13994,7 @@ namespace core { */ onRecordsListRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordViewRequest hook is triggered on each API Record view request. * @@ -13455,7 +14007,7 @@ namespace core { */ onRecordViewRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeCreateRequest hook is triggered before each API Record * create request (after request data load and before model persistence). @@ -13469,7 +14021,7 @@ namespace core { */ onRecordBeforeCreateRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterCreateRequest hook is triggered after each successful API * Record create request. @@ -13480,7 +14032,7 @@ namespace core { */ onRecordAfterCreateRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeUpdateRequest hook is triggered before each API Record * update request (after request data load and before model persistence). @@ -13494,7 +14046,7 @@ namespace core { */ onRecordBeforeUpdateRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterUpdateRequest hook is triggered after each successful API * Record update request. @@ -13505,7 +14057,7 @@ namespace core { */ onRecordAfterUpdateRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordBeforeDeleteRequest hook is triggered before each API Record * delete request (after model load and before actual deletion). @@ -13519,7 +14071,7 @@ namespace core { */ onRecordBeforeDeleteRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnRecordAfterDeleteRequest hook is triggered after each successful API * Record delete request. @@ -13530,7 +14082,7 @@ namespace core { */ onRecordAfterDeleteRequest( ...tags: string[] - ): hook.TaggedHook | undefined + ): hook.TaggedHook /** * OnCollectionsListRequest hook is triggered on each API Collections list * request. @@ -13538,9 +14090,7 @@ namespace core { * Could be used to validate or modify the response before returning it to * the client. */ - onCollectionsListRequest(): - | hook.Hook - | undefined + onCollectionsListRequest(): hook.Hook /** * OnCollectionViewRequest hook is triggered on each API Collection view * request. @@ -13548,9 +14098,7 @@ namespace core { * Could be used to validate or modify the response before returning it to * the client. */ - onCollectionViewRequest(): - | hook.Hook - | undefined + onCollectionViewRequest(): hook.Hook /** * OnCollectionBeforeCreateRequest hook is triggered before each API * Collection create request (after request data load and before model @@ -13559,16 +14107,16 @@ namespace core { * Could be used to additionally validate the request data or implement * completely different persistence behavior. */ - onCollectionBeforeCreateRequest(): - | hook.Hook - | undefined + onCollectionBeforeCreateRequest(): hook.Hook< + CollectionCreateEvent | undefined + > /** * OnCollectionAfterCreateRequest hook is triggered after each successful * API Collection create request. */ - onCollectionAfterCreateRequest(): - | hook.Hook - | undefined + onCollectionAfterCreateRequest(): hook.Hook< + CollectionCreateEvent | undefined + > /** * OnCollectionBeforeUpdateRequest hook is triggered before each API * Collection update request (after request data load and before model @@ -13577,16 +14125,16 @@ namespace core { * Could be used to additionally validate the request data or implement * completely different persistence behavior. */ - onCollectionBeforeUpdateRequest(): - | hook.Hook - | undefined + onCollectionBeforeUpdateRequest(): hook.Hook< + CollectionUpdateEvent | undefined + > /** * OnCollectionAfterUpdateRequest hook is triggered after each successful * API Collection update request. */ - onCollectionAfterUpdateRequest(): - | hook.Hook - | undefined + onCollectionAfterUpdateRequest(): hook.Hook< + CollectionUpdateEvent | undefined + > /** * OnCollectionBeforeDeleteRequest hook is triggered before each API * Collection delete request (after model load and before actual deletion). @@ -13594,16 +14142,16 @@ namespace core { * Could be used to additionally validate the request data or implement * completely different delete behavior. */ - onCollectionBeforeDeleteRequest(): - | hook.Hook - | undefined + onCollectionBeforeDeleteRequest(): hook.Hook< + CollectionDeleteEvent | undefined + > /** * OnCollectionAfterDeleteRequest hook is triggered after each successful * API Collection delete request. */ - onCollectionAfterDeleteRequest(): - | hook.Hook - | undefined + onCollectionAfterDeleteRequest(): hook.Hook< + CollectionDeleteEvent | undefined + > /** * OnCollectionsBeforeImportRequest hook is triggered before each API * collections import request (after request data load and before the actual @@ -13612,16 +14160,16 @@ namespace core { * Could be used to additionally validate the imported collections or to * implement completely different import behavior. */ - onCollectionsBeforeImportRequest(): - | hook.Hook - | undefined + onCollectionsBeforeImportRequest(): hook.Hook< + CollectionsImportEvent | undefined + > /** * OnCollectionsAfterImportRequest hook is triggered after each successful * API collections import request. */ - onCollectionsAfterImportRequest(): - | hook.Hook - | undefined + onCollectionsAfterImportRequest(): hook.Hook< + CollectionsImportEvent | undefined + > } } @@ -13636,10 +14184,29 @@ namespace core { * they are safe for parallel execution. */ namespace io { + /** + * Writer is the interface that wraps the basic Write method. + * + * Write writes len(p) bytes from p to the underlying data stream. It returns + * the number of bytes written from p (0 <= n <= len(p)) and any error + * encountered that caused the write to stop early. Write must return a + * non-nil error if it returns n < len(p). Write must not modify the slice + * data, even temporarily. + * + * Implementations must not retain p. + */ + interface Writer { + [key: string]: any + write(p: string | Array): number + } /** ReadCloser is the interface that groups the basic Read and Close methods. */ - interface ReadCloser {} + interface ReadCloser { + [key: string]: any + } /** WriteCloser is the interface that groups the basic Write and Close methods. */ - interface WriteCloser {} + interface WriteCloser { + [key: string]: any + } } /** @@ -13703,7 +14270,7 @@ namespace syscall { * The calendrical calculations always assume a Gregorian calendar, with no leap * seconds. * - * Monotonic Clocks + * # Monotonic Clocks * * Operating systems provide both a “wall clock,” which is subject to changes * for clock synchronization, and a “monotonic clock,” which is not. The general @@ -13741,10 +14308,10 @@ namespace syscall { * t.Round(0). * * If Times t and u both contain monotonic clock readings, the operations - * t.After(u), t.Before(u), t.Equal(u), and t.Sub(u) are carried out using the - * monotonic clock readings alone, ignoring the wall clock readings. If either t - * or u contains no monotonic clock reading, these operations fall back to using - * the wall clock readings. + * t.After(u), t.Before(u), t.Equal(u), t.Compare(u), and t.Sub(u) are carried + * out using the monotonic clock readings alone, ignoring the wall clock + * readings. If either t or u contains no monotonic clock reading, these + * operations fall back to using the wall clock readings. * * On some systems the monotonic clock will stop if the computer goes to sleep. * On such a system, t.Sub(u) may not accurately reflect the actual time that @@ -13758,6 +14325,9 @@ namespace syscall { * t.GobDecode, t.UnmarshalBinary. t.UnmarshalJSON, and t.UnmarshalText always * create times with no monotonic clock reading. * + * The monotonic clock reading exists only in Time values. It is not a part of + * Duration values or the Unix times returned by t.Unix and friends. + * * Note that the Go == operator compares not just the time instant but also the * Location and the monotonic clock reading. See the documentation for the Time * type for a discussion of equality testing for Time values. @@ -13804,6 +14374,56 @@ namespace time { */ namespace fs {} +/** + * Package context defines the Context type, which carries deadlines, + * cancellation signals, and other request-scoped values across API boundaries + * and between processes. + * + * Incoming requests to a server should create a [Context], and outgoing calls + * to servers should accept a Context. The chain of function calls between them + * must propagate the Context, optionally replacing it with a derived Context + * created using [WithCancel], [WithDeadline], [WithTimeout], or [WithValue]. + * When a Context is canceled, all Contexts derived from it are also canceled. + * + * The [WithCancel], [WithDeadline], and [WithTimeout] functions take a Context + * (the parent) and return a derived Context (the child) and a [CancelFunc]. + * Calling the CancelFunc cancels the child and its children, removes the + * parent's reference to the child, and stops any associated timers. Failing to + * call the CancelFunc leaks the child and its children until the parent is + * canceled or the timer fires. The go vet tool checks that CancelFuncs are used + * on all control-flow paths. + * + * The [WithCancelCause] function returns a [CancelCauseFunc], which takes an + * error and records it as the cancellation cause. Calling [Cause] on the + * canceled context or any of its children retrieves the cause. If no cause is + * specified, Cause(ctx) returns the same value as ctx.Err(). + * + * Programs that use Contexts should follow these rules to keep interfaces + * consistent across packages and enable static analysis tools to check context + * propagation: + * + * Do not store Contexts inside a struct type; instead, pass a Context + * explicitly to each function that needs it. The Context should be the first + * parameter, typically named ctx: + * + * func DoSomething(ctx context.Context, arg Arg) error { + * // ... use ctx ... + * } + * + * Do not pass a nil [Context], even if a function permits it. Pass + * [context.TODO] if you are unsure about which Context to use. + * + * Use context Values only for request-scoped data that transits processes and + * APIs, not for passing optional parameters to functions. + * + * The same Context may be passed to functions running in different goroutines; + * Contexts are safe for simultaneous use by multiple goroutines. + * + * See https://blog.golang.org/context for example code for a server that uses + * Contexts. + */ +namespace context {} + /** Package url parses URLs and implements query escaping. */ namespace url { /** @@ -13820,11 +14440,14 @@ namespace url { * Note that the Path field is stored in decoded form: /%47%6f%2f becomes * /Go/. A consequence is that it is impossible to tell which slashes in the * Path were slashes in the raw URL and which were %2f. This distinction is - * rarely important, but when it is, the code should use RawPath, an optional - * field which only gets set if the default encoding is different from Path. + * rarely important, but when it is, the code should use the EscapedPath + * method, which preserves the original encoding of Path. * - * URL's String method uses the EscapedPath method to obtain the path. See the - * EscapedPath method for more details. + * The RawPath field is an optional field which is only set when the default + * encoding of Path is different from the escaped path. See the EscapedPath + * method for more details. + * + * URL's String method uses the EscapedPath method to obtain the path. */ interface URL { scheme: string @@ -13833,6 +14456,7 @@ namespace url { host: string // host or host:port path: string // path (relative paths may omit leading slash) rawPath: string // encoded path hint (see EscapedPath method) + omitHost: boolean // do not emit empty host (authority) forceQuery: boolean // append a query ('?') even if RawQuery is empty rawQuery: string // encoded query values, without '?' fragment: string // fragment for references, without '#' @@ -13876,22 +14500,22 @@ namespace url { * * In the second form, the following rules apply: * - * - if u.Scheme is empty, scheme: is omitted. - * - if u.User is nil, userinfo@ is omitted. - * - if u.Host is empty, host/ is omitted. - * - if u.Scheme and u.Host are empty and u.User is nil, - * the entire scheme://userinfo@host/ is omitted. - * - if u.Host is non-empty and u.Path begins with a /, - * the form host/path does not add its own /. - * - if u.RawQuery is empty, ?query is omitted. - * - if u.Fragment is empty, #fragment is omitted. + * - if u.Scheme is empty, scheme: is omitted. + * - if u.User is nil, userinfo@ is omitted. + * - if u.Host is empty, host/ is omitted. + * - if u.Scheme and u.Host are empty and u.User is nil, + * the entire scheme://userinfo@host/ is omitted. + * - if u.Host is non-empty and u.Path begins with a /, + * the form host/path does not add its own /. + * - if u.RawQuery is empty, ?query is omitted. + * - if u.Fragment is empty, #fragment is omitted. */ string(): string } interface URL { /** * Redacted is like String but replaces any password with "xxxxx". Only the - * password in u.URL is redacted. + * password in u.User is redacted. */ redacted(): string } @@ -13930,7 +14554,7 @@ namespace url { } interface Values { /** - * Encode encodes the values into ``URL encoded'' form ("bar=baz&foo=quux") + * Encode encodes the values into “URL encoded” form ("bar=baz&foo=quux") * sorted by key. */ encode(): string @@ -13948,7 +14572,7 @@ namespace url { * be relative or absolute. Parse returns nil, err on parse failure, * otherwise its return value is the same as ResolveReference. */ - parse(ref: string): URL | undefined + parse(ref: string): URL } interface URL { /** @@ -13959,7 +14583,7 @@ namespace url { * ref is an absolute URL, then ResolveReference ignores base and returns a * copy of ref. */ - resolveReference(ref: URL): URL | undefined + resolveReference(ref: URL): URL } interface URL { /** @@ -13994,260 +14618,18 @@ namespace url { port(): string } interface URL { - marshalBinary(): string + marshalBinary(): string | Array } interface URL { - unmarshalBinary(text: string): void + unmarshalBinary(text: string | Array): void } -} - -/** - * Package context defines the Context type, which carries deadlines, - * cancellation signals, and other request-scoped values across API boundaries - * and between processes. - * - * Incoming requests to a server should create a Context, and outgoing calls to - * servers should accept a Context. The chain of function calls between them - * must propagate the Context, optionally replacing it with a derived Context - * created using WithCancel, WithDeadline, WithTimeout, or WithValue. When a - * Context is canceled, all Contexts derived from it are also canceled. - * - * The WithCancel, WithDeadline, and WithTimeout functions take a Context (the - * parent) and return a derived Context (the child) and a CancelFunc. Calling - * the CancelFunc cancels the child and its children, removes the parent's - * reference to the child, and stops any associated timers. Failing to call the - * CancelFunc leaks the child and its children until the parent is canceled or - * the timer fires. The go vet tool checks that CancelFuncs are used on all - * control-flow paths. - * - * Programs that use Contexts should follow these rules to keep interfaces - * consistent across packages and enable static analysis tools to check context - * propagation: - * - * Do not store Contexts inside a struct type; instead, pass a Context - * explicitly to each function that needs it. The Context should be the first - * parameter, typically named ctx: - * - * func DoSomething(ctx context.Context, arg Arg) error { - * // ... use ctx ... - * } - * - * Do not pass a nil Context, even if a function permits it. Pass context.TODO - * if you are unsure about which Context to use. - * - * Use context Values only for request-scoped data that transits processes and - * APIs, not for passing optional parameters to functions. - * - * The same Context may be passed to functions running in different goroutines; - * Contexts are safe for simultaneous use by multiple goroutines. - * - * See https://blog.golang.org/context for example code for a server that uses - * Contexts. - */ -namespace context {} - -/** - * Package sql provides a generic interface around SQL (or SQL-like) databases. - * - * The sql package must be used in conjunction with a database driver. See - * https://golang.org/s/sqldrivers for a list of drivers. - * - * Drivers that do not support context cancellation will not return until after - * the query is completed. - * - * For usage examples, see the wiki page at https://golang.org/s/sqlwiki. - */ -namespace sql { - /** IsolationLevel is the transaction isolation level used in TxOptions. */ - interface IsolationLevel extends Number {} - interface IsolationLevel { - /** String returns the name of the transaction isolation level. */ - string(): string - } - /** DBStats contains database statistics. */ - interface DBStats { - maxOpenConnections: number // Maximum number of open connections to the database. - /** Pool Status */ - openConnections: number // The number of established connections both in use and idle. - inUse: number // The number of connections currently in use. - idle: number // The number of idle connections. - /** Counters */ - waitCount: number // The total number of connections waited for. - waitDuration: time.Duration // The total time blocked waiting for a new connection. - maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. - maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. - maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. - } - /** - * Conn represents a single database connection rather than a pool of database - * connections. Prefer running queries from DB unless there is a specific need - * for a continuous single database connection. - * - * A Conn must call Close to return the connection to the database pool and - * may do so concurrently with a running query. - * - * After a call to Close, all operations on the connection fail with - * ErrConnDone. - */ - interface Conn {} - interface Conn { - /** PingContext verifies the connection to the database is still alive. */ - pingContext(ctx: context.Context): void - } - interface Conn { + interface URL { /** - * ExecContext executes a query without returning any rows. The args are for - * any placeholder parameters in the query. + * JoinPath returns a new URL with the provided path elements joined to any + * existing path and the resulting path cleaned of any ./ or ../ elements. + * Any sequences of multiple / characters will be reduced to a single /. */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface Conn { - /** - * QueryContext executes a query that returns rows, typically a SELECT. The - * args are for any placeholder parameters in the query. - */ - queryContext( - ctx: context.Context, - query: string, - ...args: any[] - ): Rows | undefined - } - interface Conn { - /** - * QueryRowContext executes a query that is expected to return at most one - * row. QueryRowContext always returns a non-nil value. Errors are deferred - * until Row's Scan method is called. If the query selects no rows, the - * *Row's Scan will return ErrNoRows. Otherwise, the *Row's Scan scans the - * first selected row and discards the rest. - */ - queryRowContext( - ctx: context.Context, - query: string, - ...args: any[] - ): Row | undefined - } - interface Conn { - /** - * PrepareContext creates a prepared statement for later queries or - * executions. Multiple queries or executions may be run concurrently from - * the returned statement. The caller must call the statement's Close method - * when the statement is no longer needed. - * - * The provided context is used for the preparation of the statement, not - * for the execution of the statement. - */ - prepareContext(ctx: context.Context, query: string): Stmt | undefined - } - interface Conn { - /** - * Raw executes f exposing the underlying driver connection for the duration - * of f. The driverConn must not be used outside of f. - * - * Once f returns and err is not driver.ErrBadConn, the Conn will continue - * to be usable until Conn.Close is called. - */ - raw(f: (driverConn: any) => void): void - } - interface Conn { - /** - * BeginTx starts a transaction. - * - * The provided context is used until the transaction is committed or rolled - * back. If the context is canceled, the sql package will roll back the - * transaction. Tx.Commit will return an error if the context provided to - * BeginTx is canceled. - * - * The provided TxOptions is optional and may be nil if defaults should be - * used. If a non-default isolation level is used that the driver doesn't - * support, an error will be returned. - */ - beginTx(ctx: context.Context, opts: TxOptions): Tx | undefined - } - interface Conn { - /** - * Close returns the connection to the connection pool. All operations after - * a Close will return with ErrConnDone. Close is safe to call concurrently - * with other operations and will block until all other operations finish. - * It may be useful to first cancel any used context and then call close - * directly after. - */ - close(): void - } - /** ColumnType contains the name and type of a column. */ - interface ColumnType {} - interface ColumnType { - /** Name returns the name or alias of the column. */ - name(): string - } - interface ColumnType { - /** - * Length returns the column type length for variable length column types - * such as text and binary field types. If the type length is unbounded the - * value will be math.MaxInt64 (any database limits will still apply). If - * the column type is not variable length, such as an int, or if not - * supported by the driver ok is false. - */ - length(): [number, boolean] - } - interface ColumnType { - /** - * DecimalSize returns the scale and precision of a decimal type. If not - * applicable or if not supported ok is false. - */ - decimalSize(): [number, boolean] - } - interface ColumnType { - /** - * ScanType returns a Go type suitable for scanning into using Rows.Scan. If - * a driver does not support this property ScanType will return the type of - * an empty interface. - */ - scanType(): any - } - interface ColumnType { - /** - * Nullable reports whether the column may be null. If a driver does not - * support this property ok will be false. - */ - nullable(): boolean - } - interface ColumnType { - /** - * DatabaseTypeName returns the database system name of the column type. If - * an empty string is returned, then the driver type name is not supported. - * Consult your driver documentation for a list of driver data types. Length - * specifiers are not included. Common type names include "VARCHAR", "TEXT", - * "NVARCHAR", "DECIMAL", "BOOL", "INT", and "BIGINT". - */ - databaseTypeName(): string - } - /** Row is the result of calling QueryRow to select a single row. */ - interface Row {} - interface Row { - /** - * Scan copies the columns from the matched row into the values pointed at - * by dest. See the documentation on Rows.Scan for details. If more than one - * row matches the query, Scan uses the first row and discards the rest. If - * no row matches the query, Scan returns ErrNoRows. - */ - scan(...dest: any[]): void - } - interface Row { - /** - * Err provides a way for wrapping packages to check for query errors - * without calling Scan. Err returns the error, if any, that was encountered - * while running the query. If this error is not nil, this error will also - * be returned from Scan. - */ - err(): void - } -} - -namespace migrate { - interface Migration { - file: string - up: (db: dbx.Builder) => void - down: (db: dbx.Builder) => void + joinPath(...elem: string[]): URL } } @@ -14285,7 +14667,7 @@ namespace migrate { * go handleConnection(conn) * } * - * Name Resolution + * # Name Resolution * * The method for resolving domain names, whether indirectly with functions like * Dial or directly with functions like LookupHost and LookupAddr, varies by @@ -14311,7 +14693,7 @@ namespace migrate { * GODEBUG environment variable (see package runtime) to go or cgo, as in: * * export GODEBUG=netdns=go # force pure Go resolver - * export GODEBUG=netdns=cgo # force cgo resolver + * export GODEBUG=netdns=cgo # force native resolver (cgo, win32) * * The decision can also be forced while building the Go source tree by setting * the netgo or netcgo build tag. @@ -14321,10 +14703,14 @@ namespace migrate { * resolver while also printing debugging information, join the two settings by * a plus sign, as in GODEBUG=netdns=go+1. * + * On macOS, if Go code that uses the net package is built with + * -buildmode=c-archive, linking the resulting archive into a C program requires + * passing -lresolv when linking the C code. + * * On Plan 9, the resolver always accesses /net/cs and /net/dns. * - * On Windows, the resolver always uses C library functions, such as GetAddrInfo - * and DnsQuery. + * On Windows, in Go 1.18.x and earlier, the resolver always used C library + * functions, such as GetAddrInfo and DnsQuery. */ namespace net { /** @@ -14333,18 +14719,19 @@ namespace net { * Multiple goroutines may invoke methods on a Conn simultaneously. */ interface Conn { + [key: string]: any /** * Read reads data from the connection. Read can be made to time out and * return an error after a fixed time limit; see SetDeadline and * SetReadDeadline. */ - read(b: string): number + read(b: string | Array): number /** * Write writes data to the connection. Write can be made to time out and * return an error after a fixed time limit; see SetDeadline and * SetWriteDeadline. */ - write(b: string): number + write(b: string | Array): number /** * Close closes the connection. Any blocked Read or Write operations will be * unblocked and return errors. @@ -14398,6 +14785,7 @@ namespace net { * Multiple goroutines may invoke methods on a Listener simultaneously. */ interface Listener { + [key: string]: any /** Accept waits for and returns the next connection to the listener. */ accept(): Conn /** @@ -14411,47 +14799,50 @@ namespace net { } /** - * Package cobra is a commander providing a simple interface to create powerful - * modern CLI interfaces. In addition to providing an interface, Cobra - * simultaneously provides a controller to organize your application code. + * Package types implements some commonly used db serializable types like + * datetime, json, etc. */ -namespace cobra { - interface PositionalArgs { - (cmd: Command, args: Array): void - } - // @ts-ignore - import flag = pflag - /** FParseErrWhitelist configures Flag parse errors to be ignored */ - interface FParseErrWhitelist extends _TygojaAny {} - /** Group Structure to manage groups for commands */ - interface Group { - id: string - title: string - } +namespace types { /** - * ShellCompDirective is a bit map representing the different behaviors the - * shell can be instructed to have once completions have been provided. + * DateTime represents a [time.Time] instance in UTC that is wrapped and + * serialized using the app default date layout. */ - interface ShellCompDirective extends Number {} - /** CompletionOptions are the options to control shell completion */ - interface CompletionOptions { + interface DateTime {} + interface DateTime { + /** Time returns the internal [time.Time] instance. */ + time(): time.Time + } + interface DateTime { + /** IsZero checks whether the current DateTime instance has zero time value. */ + isZero(): boolean + } + interface DateTime { /** - * DisableDefaultCmd prevents Cobra from creating a default 'completion' - * command + * String serializes the current DateTime instance into a formatted UTC date + * string. + * + * The zero value is serialized to an empty string. */ - disableDefaultCmd: boolean + string(): string + } + interface DateTime { + /** MarshalJSON implements the [json.Marshaler] interface. */ + marshalJSON(): string | Array + } + interface DateTime { + /** UnmarshalJSON implements the [json.Unmarshaler] interface. */ + unmarshalJSON(b: string | Array): void + } + interface DateTime { + /** Value implements the [driver.Valuer] interface. */ + value(): any + } + interface DateTime { /** - * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' - * flag for shells that support completion descriptions + * Scan implements [sql.Scanner] interface to scan the provided value into + * the current DateTime instance. */ - disableNoDescFlag: boolean - /** - * DisableDescriptions turns off all completion descriptions for shells that - * support them - */ - disableDescriptions: boolean - /** HiddenDefaultCmd makes the default 'completion' command hidden */ - hiddenDefaultCmd: boolean + scan(value: any): void } } @@ -14520,6 +14911,23 @@ namespace textproto { * * The implementation is sufficient for HTTP (RFC 2388) and the multipart bodies * generated by popular browsers. + * + * # Limits + * + * To protect against malicious inputs, this package sets limits on the size of + * the MIME data it processes. + * + * Reader.NextPart and Reader.NextRawPart limit the number of headers in a part + * to 10000 and Reader.ReadForm limits the total number of headers in all + * FileHeaders to 10000. These limits may be adjusted with the + * GODEBUG=multipartmaxheaders= setting. + * + * Reader.ReadForm further limits the number of parts in a form to 1000. This + * limit may be adjusted with the GODEBUG=multipartmaxparts= setting. + */ +/** + * Copyright 2023 The Go Authors. All rights reserved. Use of this source code + * is governed by a BSD-style license that can be found in the LICENSE file. */ namespace multipart { interface Reader { @@ -14530,7 +14938,7 @@ namespace multipart { * stored in memory will be stored on disk in temporary files. It returns * ErrMessageTooLarge if all non-file parts can't be stored in memory. */ - readForm(maxMemory: number): Form | undefined + readForm(maxMemory: number): Form } /** * Form is a parsed multipart form. Its File parts are stored either in memory @@ -14550,7 +14958,9 @@ namespace multipart { * contents may be either stored in memory or on disk. If stored on disk, the * File's underlying concrete type will be an *os.File. */ - interface File {} + interface File { + [key: string]: any + } /** * Reader is an iterator over parts in a MIME multipart body. Reader's * underlying parser consumes its input as needed. Seeking isn't supported. @@ -14565,7 +14975,7 @@ namespace multipart { * of "quoted-printable", that header is instead hidden and the body is * transparently decoded during Read calls. */ - nextPart(): Part | undefined + nextPart(): Part } interface Reader { /** @@ -14575,7 +14985,7 @@ namespace multipart { * Unlike NextPart, it does not have special handling for * "Content-Transfer-Encoding: quoted-printable". */ - nextRawPart(): Part | undefined + nextRawPart(): Part } } @@ -14591,7 +15001,7 @@ namespace multipart { * resp, err := http.PostForm("http://example.com/form", * url.Values{"key": {"Value"}, "id": {"123"}}) * - * The client must close the response body when finished with it: + * The caller must close the response body when finished with it: * * resp, err := http.Get("http://example.com/") * if err != nil { @@ -14601,6 +15011,8 @@ namespace multipart { * body, err := io.ReadAll(resp.Body) * // ... * + * # Clients and Transports + * * For control over HTTP client headers, redirect policy, and other settings, * create a Client: * @@ -14631,6 +15043,8 @@ namespace multipart { * Clients and Transports are safe for concurrent use by multiple goroutines and * for efficiency should only be created once and re-used. * + * # Servers + * * ListenAndServe starts an HTTP server with a given address and handler. The * handler is usually nil, which means to use DefaultServeMux. Handle and * HandleFunc add handlers to DefaultServeMux: @@ -14655,18 +15069,19 @@ namespace multipart { * } * log.Fatal(s.ListenAndServe()) * + * # HTTP/2 + * * Starting with Go 1.6, the http package has transparent support for the HTTP/2 * protocol when using HTTPS. Programs that must disable HTTP/2 can do so by * setting Transport.TLSNextProto (for clients) or Server.TLSNextProto (for * servers) to a non-nil, empty map. Alternatively, the following GODEBUG - * environment variables are currently supported: + * settings are currently supported: * * GODEBUG=http2client=0 # disable HTTP/2 client support * GODEBUG=http2server=0 # disable HTTP/2 server support * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps * - * The GODEBUG variables are not covered by Go's API compatibility promise. * Please report any issues before disabling HTTP/2 support: * https://golang.org/s/http2bug * @@ -14748,7 +15163,8 @@ namespace http { * Get gets the first value associated with the given key. If there are no * values associated with the key, Get returns "". It is case insensitive; * textproto.CanonicalMIMEHeaderKey is used to canonicalize the provided - * key. To use non-canonical keys, access the map directly. + * key. Get assumes that all keys are stored in canonical form. To use + * non-canonical keys, access the map directly. */ get(key: string): string } @@ -14895,7 +15311,7 @@ namespace http { * Relative redirects are resolved relative to the Response's Request. * ErrNoLocation is returned if no Location header is present. */ - location(): url.URL | undefined + location(): url.URL } interface Response { /** @@ -14911,9 +15327,15 @@ namespace http { * * This method consults the following fields of the response r: * - * StatusCode ProtoMajor ProtoMinor Request.Method TransferEncoding Trailer - * Body ContentLength Header, values for non-canonical keys will have - * unpredictable behavior + * StatusCode + * ProtoMajor + * ProtoMinor + * Request.Method + * TransferEncoding + * Trailer + * Body + * ContentLength + * Header, values for non-canonical keys will have unpredictable behavior * * The Response Body is closed after it is sent. */ @@ -14943,6 +15365,7 @@ namespace http { * the server doesn't log an error, panic with the value ErrAbortHandler. */ interface Handler { + [key: string]: any serveHTTP(_arg0: ResponseWriter, _arg1: Request): void } /** @@ -14955,6 +15378,88 @@ namespace http { } } +/** + * Package oauth2 provides support for making OAuth2 authorized and + * authenticated HTTP requests, as specified in RFC 6749. It can additionally + * grant authorization with Bearer JWT. + */ +/** + * Copyright 2023 The Go Authors. All rights reserved. Use of this source code + * is governed by a BSD-style license that can be found in the LICENSE file. + */ +namespace oauth2 { + /** An AuthCodeOption is passed to Config.AuthCodeURL. */ + interface AuthCodeOption { + [key: string]: any + } + /** + * Token represents the credentials used to authorize the requests to access + * protected resources on the OAuth 2.0 provider's backend. + * + * Most users of this package should not access fields of Token directly. + * They're exported mostly for use by related packages implementing derivative + * OAuth2 flows. + */ + interface Token { + /** AccessToken is the token that authorizes and authenticates the requests. */ + accessToken: string + /** + * TokenType is the type of token. The Type method returns either this or + * "Bearer", the default. + */ + tokenType: string + /** + * RefreshToken is a token that's used by the application (as opposed to the + * user) to refresh the access token if it expires. + */ + refreshToken: string + /** + * Expiry is the optional expiration time of the access token. + * + * If zero, TokenSource implementations will reuse the same token forever + * and RefreshToken or equivalent mechanisms for that TokenSource will not + * be used. + */ + expiry: time.Time + } + interface Token { + /** Type returns t.TokenType if non-empty, else "Bearer". */ + type(): string + } + interface Token { + /** + * SetAuthHeader sets the Authorization header to r using the access token + * in t. + * + * This method is unnecessary when using Transport or an HTTP Client + * returned by this package. + */ + setAuthHeader(r: http.Request): void + } + interface Token { + /** + * WithExtra returns a new Token that's a clone of t, but using the provided + * raw extra map. This is only intended for use by packages implementing + * derivative OAuth2 flows. + */ + withExtra(extra: {}): Token + } + interface Token { + /** + * Extra returns an extra field. Extra fields are key-value pairs returned + * by the server as a part of the token retrieval response. + */ + extra(key: string): {} + } + interface Token { + /** + * Valid reports whether t is non-nil, has an AccessToken, and is not + * expired. + */ + valid(): boolean + } +} + namespace store { /** Store defines a concurrent safe in memory key-value data store. */ interface Store {} @@ -15018,370 +15523,10 @@ namespace store { } } -/** - * Package types implements some commonly used db serializable types like - * datetime, json, etc. - */ -namespace types { - /** - * DateTime represents a [time.Time] instance in UTC that is wrapped and - * serialized using the app default date layout. - */ - interface DateTime {} - interface DateTime { - /** Time returns the internal [time.Time] instance. */ - time(): time.Time - } - interface DateTime { - /** IsZero checks whether the current DateTime instance has zero time value. */ - isZero(): boolean - } - interface DateTime { - /** - * String serializes the current DateTime instance into a formatted UTC date - * string. - * - * The zero value is serialized to an empty string. - */ - string(): string - } - interface DateTime { - /** MarshalJSON implements the [json.Marshaler] interface. */ - marshalJSON(): string - } - interface DateTime { - /** UnmarshalJSON implements the [json.Unmarshaler] interface. */ - unmarshalJSON(b: string): void - } - interface DateTime { - /** Value implements the [driver.Valuer] interface. */ - value(): any - } - interface DateTime { - /** - * Scan implements [sql.Scanner] interface to scan the provided value into - * the current DateTime instance. - */ - scan(value: any): void - } -} - -/** - * Package schema implements custom Schema and SchemaField datatypes for - * handling the Collection schema definitions. - */ -namespace schema { - // @ts-ignore - import validation = ozzo_validation - /** SchemaField defines a single schema field structure. */ - interface SchemaField { - system: boolean - id: string - name: string - type: string - required: boolean - /** - * Presentable indicates whether the field is suitable for visualization - * purposes (eg. in the Admin UI relation views). - */ - presentable: boolean - /** - * Deprecated: This field is no-op and will be removed in future versions. - * Please use the collection.Indexes field to define a unique constraint. - */ - unique: boolean - options: any - } - interface SchemaField { - /** ColDefinition returns the field db column type definition as string. */ - colDefinition(): string - } - interface SchemaField { - /** String serializes and returns the current field as string. */ - string(): string - } - interface SchemaField { - /** MarshalJSON implements the [json.Marshaler] interface. */ - marshalJSON(): string - } - interface SchemaField { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - * - * The schema field options are auto initialized on success. - */ - unmarshalJSON(data: string): void - } - interface SchemaField { - /** - * Validate makes `SchemaField` validatable by implementing - * [validation.Validatable] interface. - */ - validate(): void - } - interface SchemaField { - /** - * InitOptions initializes the current field options based on its type. - * - * Returns error on unknown field type. - */ - initOptions(): void - } - interface SchemaField { - /** PrepareValue returns normalized and properly formatted field value. */ - prepareValue(value: any): any - } - interface SchemaField { - /** - * PrepareValueWithModifier returns normalized and properly formatted field - * value by "merging" baseValue with the modifierValue based on the - * specified modifier (+ or -). - */ - prepareValueWithModifier( - baseValue: any, - modifier: string, - modifierValue: any, - ): any - } -} - -/** Package models implements all PocketBase DB models and DTOs. */ -namespace models { - /** - * Model defines an interface with common methods that all db models should - * have. - */ - interface Model { - tableName(): string - isNew(): boolean - markAsNew(): void - markAsNotNew(): void - hasId(): boolean - getId(): string - setId(id: string): void - getCreated(): types.DateTime - getUpdated(): types.DateTime - refreshId(): void - refreshCreated(): void - refreshUpdated(): void - } - /** BaseModel defines common fields and methods used by all other models. */ - interface BaseModel { - id: string - created: types.DateTime - updated: types.DateTime - } - interface BaseModel { - /** HasId returns whether the model has a nonzero id. */ - hasId(): boolean - } - interface BaseModel { - /** GetId returns the model id. */ - getId(): string - } - interface BaseModel { - /** SetId sets the model id to the provided string value. */ - setId(id: string): void - } - interface BaseModel { - /** MarkAsNew marks the model as "new" (aka. enforces m.IsNew() to be true). */ - markAsNew(): void - } - interface BaseModel { - /** - * MarkAsNotNew marks the model as "not new" (aka. enforces m.IsNew() to be - * false) - */ - markAsNotNew(): void - } - interface BaseModel { - /** - * IsNew indicates what type of db query (insert or update) should be used - * with the model instance. - */ - isNew(): boolean - } - interface BaseModel { - /** GetCreated returns the model Created datetime. */ - getCreated(): types.DateTime - } - interface BaseModel { - /** GetUpdated returns the model Updated datetime. */ - getUpdated(): types.DateTime - } - interface BaseModel { - /** - * RefreshId generates and sets a new model id. - * - * The generated id is a cryptographically random 15 characters length - * string. - */ - refreshId(): void - } - interface BaseModel { - /** RefreshCreated updates the model Created field with the current datetime. */ - refreshCreated(): void - } - interface BaseModel { - /** RefreshUpdated updates the model Updated field with the current datetime. */ - refreshUpdated(): void - } - interface BaseModel { - /** - * PostScan implements the [dbx.PostScanner] interface. - * - * It is executed right after the model was populated with the db row - * values. - */ - postScan(): void - } - // @ts-ignore - import validation = ozzo_validation - /** CollectionBaseOptions defines the "base" Collection.Options fields. */ - interface CollectionBaseOptions {} - interface CollectionBaseOptions { - /** Validate implements [validation.Validatable] interface. */ - validate(): void - } - /** CollectionAuthOptions defines the "auth" Collection.Options fields. */ - interface CollectionAuthOptions { - manageRule?: string - allowOAuth2Auth: boolean - allowUsernameAuth: boolean - allowEmailAuth: boolean - requireEmail: boolean - exceptEmailDomains: Array - onlyEmailDomains: Array - minPasswordLength: number - } - interface CollectionAuthOptions { - /** Validate implements [validation.Validatable] interface. */ - validate(): void - } - /** CollectionViewOptions defines the "view" Collection.Options fields. */ - interface CollectionViewOptions { - query: string - } - interface CollectionViewOptions { - /** Validate implements [validation.Validatable] interface. */ - validate(): void - } - type _subztSzL = BaseModel - interface Param extends _subztSzL { - key: string - value: types.JsonRaw - } - interface Param { - tableName(): string - } - type _subOyDRN = BaseModel - interface Request extends _subOyDRN { - url: string - method: string - status: number - auth: string - userIp: string - remoteIp: string - referer: string - userAgent: string - meta: types.JsonMap - } - interface Request { - tableName(): string - } - interface TableInfoRow { - /** - * The `db:"pk"` tag has special semantic so we cannot rename the original - * field without specifying a custom mapper - */ - pk: number - index: number - name: string - type: string - notNull: boolean - defaultValue: types.JsonRaw - } -} - -/** - * Package oauth2 provides support for making OAuth2 authorized and - * authenticated HTTP requests, as specified in RFC 6749. It can additionally - * grant authorization with Bearer JWT. - */ -namespace oauth2 { - /** An AuthCodeOption is passed to Config.AuthCodeURL. */ - interface AuthCodeOption {} - /** - * Token represents the credentials used to authorize the requests to access - * protected resources on the OAuth 2.0 provider's backend. - * - * Most users of this package should not access fields of Token directly. - * They're exported mostly for use by related packages implementing derivative - * OAuth2 flows. - */ - interface Token { - /** AccessToken is the token that authorizes and authenticates the requests. */ - accessToken: string - /** - * TokenType is the type of token. The Type method returns either this or - * "Bearer", the default. - */ - tokenType: string - /** - * RefreshToken is a token that's used by the application (as opposed to the - * user) to refresh the access token if it expires. - */ - refreshToken: string - /** - * Expiry is the optional expiration time of the access token. - * - * If zero, TokenSource implementations will reuse the same token forever - * and RefreshToken or equivalent mechanisms for that TokenSource will not - * be used. - */ - expiry: time.Time - } - interface Token { - /** Type returns t.TokenType if non-empty, else "Bearer". */ - type(): string - } - interface Token { - /** - * SetAuthHeader sets the Authorization header to r using the access token - * in t. - * - * This method is unnecessary when using Transport or an HTTP Client - * returned by this package. - */ - setAuthHeader(r: http.Request): void - } - interface Token { - /** - * WithExtra returns a new Token that's a clone of t, but using the provided - * raw extra map. This is only intended for use by packages implementing - * derivative OAuth2 flows. - */ - withExtra(extra: {}): Token | undefined - } - interface Token { - /** - * Extra returns an extra field. Extra fields are key-value pairs returned - * by the server as a part of the token retrieval response. - */ - extra(key: string): {} - } - interface Token { - /** - * Valid reports whether t is non-nil, has an AccessToken, and is not - * expired. - */ - valid(): boolean - } -} - namespace mailer { /** Mailer defines a base mail client interface. */ interface Mailer { + [key: string]: any /** Send sends an email with the provided Message. */ send(message: Message): void } @@ -15428,6 +15573,7 @@ namespace mailer { namespace echo { /** Binder is the interface that wraps the Bind method. */ interface Binder { + [key: string]: any bind(c: Context, i: {}): void } /** @@ -15436,6 +15582,7 @@ namespace echo { * Router. */ interface ServableContext { + [key: string]: any /** * Reset resets the context after request completes. It must be called along * with `Echo#AcquireContext()` and `Echo#ReleaseContext()`. See @@ -15450,6 +15597,7 @@ namespace echo { * interfaces. */ interface JSONSerializer { + [key: string]: any serialize(c: Context, i: {}, indent: string): void deserialize(c: Context, i: {}): void } @@ -15459,10 +15607,12 @@ namespace echo { } /** Validator is the interface that wraps the Validate function. */ interface Validator { + [key: string]: any validate(i: {}): void } /** Renderer is the interface that wraps the Render function. */ interface Renderer { + [key: string]: any render(_arg0: io.Writer, _arg1: string, _arg2: {}, _arg3: Context): void } /** @@ -15573,7 +15723,7 @@ namespace echo { * allowed) cases. If this kind of behaviour is needed then add a catch-all * route `/*` for the group which handler returns always 404 */ - group(prefix: string, ...middleware: MiddlewareFunc[]): Group | undefined + group(prefix: string, ...middleware: MiddlewareFunc[]): Group } interface Group { /** Static implements `Echo#Static()` for sub-routes within the Group. */ @@ -15645,6 +15795,7 @@ namespace echo { * choice. */ interface Logger { + [key: string]: any /** * Write provides writer interface for http.Server `ErrorLog` and for * logging startup messages. `http.Server.ErrorLog` logs errors from @@ -15652,7 +15803,7 @@ namespace echo { * FileSystem errors. `logger` middleware will use this method to write its * JSON payload. */ - write(p: string): number + write(p: string | Array): number /** Error logs the error */ error(err: Error): void } @@ -15704,7 +15855,7 @@ namespace echo { } interface Response { /** Write writes the data to the connection as part of an HTTP reply. */ - write(b: string): number + write(b: string | Array): number } interface Response { /** @@ -15720,7 +15871,7 @@ namespace echo { * take over the connection. See * [http.Hijacker](https://golang.org/pkg/net/http/#Hijacker) */ - hijack(): [net.Conn, bufio.ReadWriter | undefined] + hijack(): [net.Conn, bufio.ReadWriter] } interface Response { /** @@ -15767,6 +15918,7 @@ namespace echo { * And optionally can set additional information to Context with RoutableContext.Set */ interface Router { + [key: string]: any /** Add registers Routable with the Router and returns registered RouteInfo */ add(routable: Routable): RouteInfo /** Remove removes route from the Router */ @@ -15789,6 +15941,7 @@ namespace echo { * privileges used with route etc.) */ interface Routable { + [key: string]: any /** * ToRouteInfo converts Routable to RouteInfo * @@ -15821,6 +15974,7 @@ namespace echo { * identifies the Route. Name can have duplicates. */ interface RouteInfo { + [key: string]: any method(): string path(): string name(): string @@ -15842,6 +15996,195 @@ namespace echo { } } +/** + * Package sql provides a generic interface around SQL (or SQL-like) databases. + * + * The sql package must be used in conjunction with a database driver. See + * https://golang.org/s/sqldrivers for a list of drivers. + * + * Drivers that do not support context cancellation will not return until after + * the query is completed. + * + * For usage examples, see the wiki page at https://golang.org/s/sqlwiki. + */ +namespace sql { + /** IsolationLevel is the transaction isolation level used in TxOptions. */ + interface IsolationLevel extends Number {} + interface IsolationLevel { + /** String returns the name of the transaction isolation level. */ + string(): string + } + /** DBStats contains database statistics. */ + interface DBStats { + maxOpenConnections: number // Maximum number of open connections to the database. + /** Pool Status */ + openConnections: number // The number of established connections both in use and idle. + inUse: number // The number of connections currently in use. + idle: number // The number of idle connections. + /** Counters */ + waitCount: number // The total number of connections waited for. + waitDuration: time.Duration // The total time blocked waiting for a new connection. + maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. + maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. + maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. + } + /** + * Conn represents a single database connection rather than a pool of database + * connections. Prefer running queries from DB unless there is a specific need + * for a continuous single database connection. + * + * A Conn must call Close to return the connection to the database pool and + * may do so concurrently with a running query. + * + * After a call to Close, all operations on the connection fail with + * ErrConnDone. + */ + interface Conn {} + interface Conn { + /** PingContext verifies the connection to the database is still alive. */ + pingContext(ctx: context.Context): void + } + interface Conn { + /** + * ExecContext executes a query without returning any rows. The args are for + * any placeholder parameters in the query. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface Conn { + /** + * QueryContext executes a query that returns rows, typically a SELECT. The + * args are for any placeholder parameters in the query. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): Rows + } + interface Conn { + /** + * QueryRowContext executes a query that is expected to return at most one + * row. QueryRowContext always returns a non-nil value. Errors are deferred + * until Row's Scan method is called. If the query selects no rows, the + * *Row's Scan will return ErrNoRows. Otherwise, the *Row's Scan scans the + * first selected row and discards the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): Row + } + interface Conn { + /** + * PrepareContext creates a prepared statement for later queries or + * executions. Multiple queries or executions may be run concurrently from + * the returned statement. The caller must call the statement's Close method + * when the statement is no longer needed. + * + * The provided context is used for the preparation of the statement, not + * for the execution of the statement. + */ + prepareContext(ctx: context.Context, query: string): Stmt + } + interface Conn { + /** + * Raw executes f exposing the underlying driver connection for the duration + * of f. The driverConn must not be used outside of f. + * + * Once f returns and err is not driver.ErrBadConn, the Conn will continue + * to be usable until Conn.Close is called. + */ + raw(f: (driverConn: any) => void): void + } + interface Conn { + /** + * BeginTx starts a transaction. + * + * The provided context is used until the transaction is committed or rolled + * back. If the context is canceled, the sql package will roll back the + * transaction. Tx.Commit will return an error if the context provided to + * BeginTx is canceled. + * + * The provided TxOptions is optional and may be nil if defaults should be + * used. If a non-default isolation level is used that the driver doesn't + * support, an error will be returned. + */ + beginTx(ctx: context.Context, opts: TxOptions): Tx + } + interface Conn { + /** + * Close returns the connection to the connection pool. All operations after + * a Close will return with ErrConnDone. Close is safe to call concurrently + * with other operations and will block until all other operations finish. + * It may be useful to first cancel any used context and then call close + * directly after. + */ + close(): void + } + /** ColumnType contains the name and type of a column. */ + interface ColumnType {} + interface ColumnType { + /** Name returns the name or alias of the column. */ + name(): string + } + interface ColumnType { + /** + * Length returns the column type length for variable length column types + * such as text and binary field types. If the type length is unbounded the + * value will be math.MaxInt64 (any database limits will still apply). If + * the column type is not variable length, such as an int, or if not + * supported by the driver ok is false. + */ + length(): [number, boolean] + } + interface ColumnType { + /** + * DecimalSize returns the scale and precision of a decimal type. If not + * applicable or if not supported ok is false. + */ + decimalSize(): [number, boolean] + } + interface ColumnType { + /** + * ScanType returns a Go type suitable for scanning into using Rows.Scan. If + * a driver does not support this property ScanType will return the type of + * an empty interface. + */ + scanType(): any + } + interface ColumnType { + /** + * Nullable reports whether the column may be null. If a driver does not + * support this property ok will be false. + */ + nullable(): boolean + } + interface ColumnType { + /** + * DatabaseTypeName returns the database system name of the column type. If + * an empty string is returned, then the driver type name is not supported. + * Consult your driver documentation for a list of driver data types. Length + * specifiers are not included. Common type names include "VARCHAR", "TEXT", + * "NVARCHAR", "DECIMAL", "BOOL", "INT", and "BIGINT". + */ + databaseTypeName(): string + } + /** Row is the result of calling QueryRow to select a single row. */ + interface Row {} + interface Row { + /** + * Scan copies the columns from the matched row into the values pointed at + * by dest. See the documentation on Rows.Scan for details. If more than one + * row matches the query, Scan uses the first row and discards the rest. If + * no row matches the query, Scan returns ErrNoRows. + */ + scan(...dest: any[]): void + } + interface Row { + /** + * Err provides a way for wrapping packages to check for query errors + * without calling Scan. Err returns the error, if any, that was encountered + * while running the query. If this error is not nil, this error will also + * be returned from Scan. + */ + err(): void + } +} + namespace settings { // @ts-ignore import validation = ozzo_validation @@ -15948,6 +16291,8 @@ namespace settings { } interface LogsConfig { maxDays: number + minLevel: number + logIp: boolean } interface LogsConfig { /** @@ -15963,6 +16308,8 @@ namespace settings { authUrl: string tokenUrl: string userApiUrl: string + displayName: string + pkce?: boolean } interface AuthProviderConfig { /** @@ -15991,12 +16338,249 @@ namespace settings { } } +/** + * Package schema implements custom Schema and SchemaField datatypes for + * handling the Collection schema definitions. + */ +namespace schema { + // @ts-ignore + import validation = ozzo_validation + /** SchemaField defines a single schema field structure. */ + interface SchemaField { + system: boolean + id: string + name: string + type: string + required: boolean + /** + * Presentable indicates whether the field is suitable for visualization + * purposes (eg. in the Admin UI relation views). + */ + presentable: boolean + /** + * Deprecated: This field is no-op and will be removed in future versions. + * Please use the collection.Indexes field to define a unique constraint. + */ + unique: boolean + options: any + } + interface SchemaField { + /** ColDefinition returns the field db column type definition as string. */ + colDefinition(): string + } + interface SchemaField { + /** String serializes and returns the current field as string. */ + string(): string + } + interface SchemaField { + /** MarshalJSON implements the [json.Marshaler] interface. */ + marshalJSON(): string | Array + } + interface SchemaField { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + * + * The schema field options are auto initialized on success. + */ + unmarshalJSON(data: string | Array): void + } + interface SchemaField { + /** + * Validate makes `SchemaField` validatable by implementing + * [validation.Validatable] interface. + */ + validate(): void + } + interface SchemaField { + /** + * InitOptions initializes the current field options based on its type. + * + * Returns error on unknown field type. + */ + initOptions(): void + } + interface SchemaField { + /** PrepareValue returns normalized and properly formatted field value. */ + prepareValue(value: any): any + } + interface SchemaField { + /** + * PrepareValueWithModifier returns normalized and properly formatted field + * value by "merging" baseValue with the modifierValue based on the + * specified modifier (+ or -). + */ + prepareValueWithModifier( + baseValue: any, + modifier: string, + modifierValue: any, + ): any + } +} + +/** Package models implements all PocketBase DB models and DTOs. */ +namespace models { + /** + * Model defines an interface with common methods that all db models should + * have. + */ + interface Model { + [key: string]: any + tableName(): string + isNew(): boolean + markAsNew(): void + markAsNotNew(): void + hasId(): boolean + getId(): string + setId(id: string): void + getCreated(): types.DateTime + getUpdated(): types.DateTime + refreshId(): void + refreshCreated(): void + refreshUpdated(): void + } + /** BaseModel defines common fields and methods used by all other models. */ + interface BaseModel { + id: string + created: types.DateTime + updated: types.DateTime + } + interface BaseModel { + /** HasId returns whether the model has a nonzero id. */ + hasId(): boolean + } + interface BaseModel { + /** GetId returns the model id. */ + getId(): string + } + interface BaseModel { + /** SetId sets the model id to the provided string value. */ + setId(id: string): void + } + interface BaseModel { + /** MarkAsNew marks the model as "new" (aka. enforces m.IsNew() to be true). */ + markAsNew(): void + } + interface BaseModel { + /** + * MarkAsNotNew marks the model as "not new" (aka. enforces m.IsNew() to be + * false) + */ + markAsNotNew(): void + } + interface BaseModel { + /** + * IsNew indicates what type of db query (insert or update) should be used + * with the model instance. + */ + isNew(): boolean + } + interface BaseModel { + /** GetCreated returns the model Created datetime. */ + getCreated(): types.DateTime + } + interface BaseModel { + /** GetUpdated returns the model Updated datetime. */ + getUpdated(): types.DateTime + } + interface BaseModel { + /** + * RefreshId generates and sets a new model id. + * + * The generated id is a cryptographically random 15 characters length + * string. + */ + refreshId(): void + } + interface BaseModel { + /** RefreshCreated updates the model Created field with the current datetime. */ + refreshCreated(): void + } + interface BaseModel { + /** RefreshUpdated updates the model Updated field with the current datetime. */ + refreshUpdated(): void + } + interface BaseModel { + /** + * PostScan implements the [dbx.PostScanner] interface. + * + * It is executed right after the model was populated with the db row + * values. + */ + postScan(): void + } + // @ts-ignore + import validation = ozzo_validation + /** CollectionBaseOptions defines the "base" Collection.Options fields. */ + interface CollectionBaseOptions {} + interface CollectionBaseOptions { + /** Validate implements [validation.Validatable] interface. */ + validate(): void + } + /** CollectionAuthOptions defines the "auth" Collection.Options fields. */ + interface CollectionAuthOptions { + manageRule?: string + allowOAuth2Auth: boolean + allowUsernameAuth: boolean + allowEmailAuth: boolean + requireEmail: boolean + exceptEmailDomains: Array + onlyVerified: boolean + onlyEmailDomains: Array + minPasswordLength: number + } + interface CollectionAuthOptions { + /** Validate implements [validation.Validatable] interface. */ + validate(): void + } + /** CollectionViewOptions defines the "view" Collection.Options fields. */ + interface CollectionViewOptions { + query: string + } + interface CollectionViewOptions { + /** Validate implements [validation.Validatable] interface. */ + validate(): void + } + type _subhrWXI = BaseModel + interface Log extends _subhrWXI { + data: types.JsonMap + message: string + level: number + } + interface Log { + tableName(): string + } + type _subGSloU = BaseModel + interface Param extends _subGSloU { + key: string + value: types.JsonRaw + } + interface Param { + tableName(): string + } + interface TableInfoRow { + /** + * The `db:"pk"` tag has special semantic so we cannot rename the original + * field without specifying a custom mapper + */ + pk: number + index: number + name: string + type: string + notNull: boolean + defaultValue: types.JsonRaw + } +} + /** * Package daos handles common PocketBase DB model manipulations. * * Think of daos as DB repository and service layer in one. */ namespace daos { + interface LogsStatsItem { + total: number + date: types.DateTime + } /** * ExpandFetchFunc defines the function that is used to fetch the expanded * relation records. @@ -16009,42 +16593,6 @@ namespace daos { } // @ts-ignore import validation = ozzo_validation - interface RequestsStatsItem { - total: number - date: types.DateTime - } -} - -namespace subscriptions { - /** Broker defines a struct for managing subscriptions clients. */ - interface Broker {} - interface Broker { - /** - * Clients returns a shallow copy of all registered clients indexed with - * their connection id. - */ - clients(): _TygojaDict - } - interface Broker { - /** - * ClientById finds a registered client by its id. - * - * Returns non-nil error when client with clientId is not registered. - */ - clientById(clientId: string): Client - } - interface Broker { - /** Register adds a new client to the broker instance. */ - register(client: Client): void - } - interface Broker { - /** - * Unregister removes a single client by its id. - * - * If client with clientId doesn't exist, this method does nothing. - */ - unregister(clientId: string): void - } } namespace hook { @@ -16101,8 +16649,8 @@ namespace hook { * only if the TaggedHook.tags are empty or includes at least one of the event * data tag(s). */ - type _subUCeYn = mainHook - interface TaggedHook extends _subUCeYn {} + type _subWIsLS = mainHook + interface TaggedHook extends _subWIsLS {} interface TaggedHook { /** * CanTriggerOn checks if the current TaggedHook can be triggered with the @@ -16132,6 +16680,450 @@ namespace hook { } } +/** + * Package slog provides structured logging, in which log records include a + * message, a severity level, and various other attributes expressed as + * key-value pairs. + * + * It defines a type, [Logger], which provides several methods (such as + * [Logger.Info] and [Logger.Error]) for reporting events of interest. + * + * Each Logger is associated with a [Handler]. A Logger output method creates a + * [Record] from the method arguments and passes it to the Handler, which + * decides how to handle it. There is a default Logger accessible through + * top-level functions (such as [Info] and [Error]) that call the corresponding + * Logger methods. + * + * A log record consists of a time, a level, a message, and a set of key-value + * pairs, where the keys are strings and the values may be of any type. As an + * example, + * + * slog.Info('hello', 'count', 3) + * + * Creates a record containing the time of the call, a level of Info, the + * message "hello", and a single pair with key "count" and value 3. + * + * The [Info] top-level function calls the [Logger.Info] method on the default + * Logger. In addition to [Logger.Info], there are methods for Debug, Warn and + * Error levels. Besides these convenience methods for common levels, there is + * also a [Logger.Log] method which takes the level as an argument. Each of + * these methods has a corresponding top-level function that uses the default + * logger. + * + * The default handler formats the log record's message, time, level, and + * attributes as a string and passes it to the [log] package. + * + * 2022/11/08 15:28:26 INFO hello count=3 + * + * For more control over the output format, create a logger with a different + * handler. This statement uses [New] to create a new logger with a TextHandler + * that writes structured records in text form to standard error: + * + * logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + * + * [TextHandler] output is a sequence of key=value pairs, easily and + * unambiguously parsed by machine. This statement: + * + * logger.Info('hello', 'count', 3) + * + * Produces this output: + * + * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 + * + * The package also provides [JSONHandler], whose output is line-delimited JSON: + * + * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) + * logger.Info("hello", "count", 3) + * + * Produces this output: + * + * { + * "time": "2022-11-08T15:28:26.000000000-05:00", + * "level": "INFO", + * "msg": "hello", + * "count": 3 + * } + * + * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. + * There are options for setting the minimum level (see Levels, below), + * displaying the source file and line of the log call, and modifying attributes + * before they are logged. + * + * Setting a logger as the default with + * + * slog.SetDefault(logger) + * + * Will cause the top-level functions like [Info] to use it. [SetDefault] also + * updates the default logger used by the [log] package, so that existing + * applications that use [log.Printf] and related functions will send log + * records to the logger's handler without needing to be rewritten. + * + * Some attributes are common to many log calls. For example, you may wish to + * include the URL or trace identifier of a server request with all log events + * arising from the request. Rather than repeat the attribute with every log + * call, you can use [Logger.With] to construct a new Logger containing the + * attributes: + * + * logger2 := logger.With("url", r.URL) + * + * The arguments to With are the same key-value pairs used in [Logger.Info]. The + * result is a new Logger with the same handler as the original, but additional + * attributes that will appear in the output of every call. + * + * # Levels + * + * A [Level] is an integer representing the importance or severity of a log + * event. The higher the level, the more severe the event. This package defines + * constants for the most common levels, but any int can be used as a level. + * + * In an application, you may wish to log messages only at a certain level or + * greater. One common configuration is to log messages at Info or higher + * levels, suppressing debug logging until it is needed. The built-in handlers + * can be configured with the minimum level to output by setting + * [HandlerOptions.Level]. The program's `main` function typically does this. + * The default value is LevelInfo. + * + * Setting the [HandlerOptions.Level] field to a [Level] value fixes the + * handler's minimum level throughout its lifetime. Setting it to a [LevelVar] + * allows the level to be varied dynamically. A LevelVar holds a Level and is + * safe to read or write from multiple goroutines. To vary the level dynamically + * for an entire program, first initialize a global LevelVar: + * + * var programLevel = new slog.LevelVar() // Info by default + * + * Then use the LevelVar to construct a handler, and make it the default: + * + * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) + * slog.SetDefault(slog.New(h)) + * + * Now the program can change its logging level with a single statement: + * + * programLevel.Set(slog.LevelDebug) + * + * # Groups + * + * Attributes can be collected into groups. A group has a name that is used to + * qualify the names of its attributes. How this qualification is displayed + * depends on the handler. [TextHandler] separates the group and attribute names + * with a dot. [JSONHandler] treats each group as a separate JSON object, with + * the group name as the key. + * + * Use [Group] to create a Group attribute from a name and a list of key-value + * pairs: + * + * slog.Group('request', 'method', r.Method, 'url', r.URL) + * + * TextHandler would display this group as + * + * request.method=GET request.url=http://example.com + * + * JSONHandler would display it as + * + * "request":{"method":"GET","url":"http://example.com"} + * + * Use [Logger.WithGroup] to qualify all of a Logger's output with a group name. + * Calling WithGroup on a Logger results in a new Logger with the same Handler + * as the original, but with all its attributes qualified by the group name. + * + * This can help prevent duplicate attribute keys in large systems, where + * subsystems might use the same keys. Pass each subsystem a different Logger + * with its own group name so that potential duplicates are qualified: + * + * logger := slog.Default().With("id", systemID) + * parserLogger := logger.WithGroup("parser") + * parseInput(input, parserLogger) + * + * When parseInput logs with parserLogger, its keys will be qualified with + * "parser", so even if it uses the common key "id", the log line will have + * distinct keys. + * + * # Contexts + * + * Some handlers may wish to include information from the [context.Context] that + * is available at the call site. One example of such information is the + * identifier for the current span when tracing is enabled. + * + * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first + * argument, as do their corresponding top-level functions. + * + * Although the convenience methods on Logger (Info and so on) and the + * corresponding top-level functions do not take a context, the alternatives + * ending in "Context" do. For example, + * + * slog.InfoContext(ctx, 'message') + * + * It is recommended to pass a context to an output method if one is available. + * + * # Attrs and Values + * + * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well + * as alternating keys and values. The statement + * + * slog.Info('hello', slog.Int('count', 3)) + * + * Behaves the same as + * + * slog.Info('hello', 'count', 3) + * + * There are convenience constructors for [Attr] such as [Int], [String], and + * [Bool] for common types, as well as the function [Any] for constructing Attrs + * of any type. + * + * The value part of an Attr is a type called [Value]. Like an [any], a Value + * can hold any Go value, but it can represent typical values, including all + * numbers and strings, without an allocation. + * + * For the most efficient log output, use [Logger.LogAttrs]. It is similar to + * [Logger.Log] but accepts only Attrs, not alternating keys and values; this + * allows it, too, to avoid allocation. + * + * The call + * + * logger.LogAttrs(ctx, slog.LevelInfo, 'hello', slog.Int('count', 3)) + * + * Is the most efficient way to achieve the same output as + * + * slog.Info('hello', 'count', 3) + * + * # Customizing a type's logging behavior + * + * If a type implements the [LogValuer] interface, the [Value] returned from its + * LogValue method is used for logging. You can use this to control how values + * of the type appear in logs. For example, you can redact secret information + * like passwords, or gather a struct's fields in a Group. See the examples + * under [LogValuer] for details. + * + * A LogValue method may return a Value that itself implements [LogValuer]. The + * [Value.Resolve] method handles these cases carefully, avoiding infinite loops + * and unbounded recursion. Handler authors and others may wish to use + * Value.Resolve instead of calling LogValue directly. + * + * # Wrapping output methods + * + * The logger functions use reflection over the call stack to find the file name + * and line number of the logging call within the application. This can produce + * incorrect source information for functions that wrap slog. For instance, if + * you define this function in file mylog.go: + * + * func Infof(format string, args ...any) { + * slog.Default().Info(fmt.Sprintf(format, args...)) + * } + * + * And you call it like this in main.go: + * + * Infof(slog.Default(), 'hello, %s', 'world') + * + * Then slog will report the source file as mylog.go, not main.go. + * + * A correct implementation of Infof will obtain the source location (pc) and + * pass it to NewRecord. The Infof function in the package-level example called + * "wrapping" demonstrates how to do this. + * + * # Working with Records + * + * Sometimes a Handler will need to modify a Record before passing it on to + * another Handler or backend. A Record contains a mixture of simple public + * fields (e.g. Time, Level, Message) and hidden fields that refer to state + * (such as attributes) indirectly. This means that modifying a simple copy of a + * Record (e.g. by calling [Record.Add] or [Record.AddAttrs] to add attributes) + * may have unexpected effects on the original. Before modifying a Record, use + * [Record.Clone] to create a copy that shares no state with the original, or + * create a new Record with [NewRecord] and build up its Attrs by traversing the + * old ones with [Record.Attrs]. + * + * # Performance considerations + * + * If profiling your application demonstrates that logging is taking significant + * time, the following suggestions may help. + * + * If many log lines have a common attribute, use [Logger.With] to create a + * Logger with that attribute. The built-in handlers will format that attribute + * only once, at the call to [Logger.With]. The [Handler] interface is designed + * to allow that optimization, and a well-written Handler should take advantage + * of it. + * + * The arguments to a log call are always evaluated, even if the log event is + * discarded. If possible, defer computation so that it happens only if the + * value is actually logged. For example, consider the call + * + * slog.Info('starting request', 'url', r.URL.String()) // may compute String unnecessarily + * + * The URL.String method will be called even if the logger discards Info-level + * events. Instead, pass the URL directly: + * + * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed + * + * The built-in [TextHandler] will call its String method, but only if the log + * event is enabled. Avoiding the call to String also preserves the structure of + * the underlying value. For example [JSONHandler] emits the components of the + * parsed URL as a JSON object. If you want to avoid eagerly paying the cost of + * the String call without causing the handler to potentially inspect the + * structure of the value, wrap the value in a fmt.Stringer implementation that + * hides its Marshal methods. + * + * You can also use the [LogValuer] interface to avoid unnecessary work in + * disabled log calls. Say you need to log some expensive value: + * + * slog.Debug('frobbing', 'value', computeExpensiveValue(arg)) + * + * Even if this line is disabled, computeExpensiveValue will be called. To avoid + * that, define a type implementing LogValuer: + * + * type expensive struct { arg int } + * + * func (e expensive) LogValue() slog.Value { + * return slog.AnyValue(computeExpensiveValue(e.arg)) + * } + * + * Then use a value of that type in log calls: + * + * slog.Debug("frobbing", "value", expensive{arg}) + * + * Now computeExpensiveValue will only be called when the line is enabled. + * + * The built-in handlers acquire a lock before calling [io.Writer.Write] to + * ensure that each record is written in one piece. User-defined handlers are + * responsible for their own locking. + * + * # Writing a handler + * + * For a guide to writing a custom handler, see + * https://golang.org/s/slog-handler-guide. + */ +namespace slog { + // @ts-ignore + import loginternal = internal + /** + * A Logger records structured information about each call to its Log, Debug, + * Info, Warn, and Error methods. For each call, it creates a Record and + * passes it to a Handler. + * + * To create a new Logger, call [New] or a Logger method that begins "With". + */ + interface Logger {} + interface Logger { + /** Handler returns l's Handler. */ + handler(): Handler + } + interface Logger { + /** + * With returns a Logger that includes the given attributes in each output + * operation. Arguments are converted to attributes as if by [Logger.Log]. + */ + with(...args: any[]): Logger + } + interface Logger { + /** + * WithGroup returns a Logger that starts a group, if name is non-empty. The + * keys of all attributes added to the Logger will be qualified by the given + * name. (How that qualification happens depends on the [Handler.WithGroup] + * method of the Logger's Handler.) + * + * If name is empty, WithGroup returns the receiver. + */ + withGroup(name: string): Logger + } + interface Logger { + /** + * Enabled reports whether l emits log records at the given context and + * level. + */ + enabled(ctx: context.Context, level: Level): boolean + } + interface Logger { + /** + * Log emits a log record with the current time and the given level and + * message. The Record's Attrs consist of the Logger's attributes followed + * by the Attrs specified by args. + * + * The attribute arguments are processed as follows: + * + * - If an argument is an Attr, it is used as is. + * - If an argument is a string and this is not the last argument, + * the following argument is treated as the value and the two are combined + * into an Attr. + * - Otherwise, the argument is treated as a value with key "!BADKEY". + */ + log(ctx: context.Context, level: Level, msg: string, ...args: any[]): void + } + interface Logger { + /** + * LogAttrs is a more efficient version of [Logger.Log] that accepts only + * Attrs. + */ + logAttrs( + ctx: context.Context, + level: Level, + msg: string, + ...attrs: Attr[] + ): void + } + interface Logger { + /** Debug logs at LevelDebug. */ + debug(msg: string, ...args: any[]): void + } + interface Logger { + /** DebugContext logs at LevelDebug with the given context. */ + debugContext(ctx: context.Context, msg: string, ...args: any[]): void + } + interface Logger { + /** Info logs at LevelInfo. */ + info(msg: string, ...args: any[]): void + } + interface Logger { + /** InfoContext logs at LevelInfo with the given context. */ + infoContext(ctx: context.Context, msg: string, ...args: any[]): void + } + interface Logger { + /** Warn logs at LevelWarn. */ + warn(msg: string, ...args: any[]): void + } + interface Logger { + /** WarnContext logs at LevelWarn with the given context. */ + warnContext(ctx: context.Context, msg: string, ...args: any[]): void + } + interface Logger { + /** Error logs at LevelError. */ + error(msg: string, ...args: any[]): void + } + interface Logger { + /** ErrorContext logs at LevelError with the given context. */ + errorContext(ctx: context.Context, msg: string, ...args: any[]): void + } +} + +namespace subscriptions { + /** Broker defines a struct for managing subscriptions clients. */ + interface Broker {} + interface Broker { + /** + * Clients returns a shallow copy of all registered clients indexed with + * their connection id. + */ + clients(): _TygojaDict + } + interface Broker { + /** + * ClientById finds a registered client by its id. + * + * Returns non-nil error when client with clientId is not registered. + */ + clientById(clientId: string): Client + } + interface Broker { + /** Register adds a new client to the broker instance. */ + register(client: Client): void + } + interface Broker { + /** + * Unregister removes a single client by its id. + * + * If client with clientId doesn't exist, this method does nothing. + */ + unregister(clientId: string): void + } +} + /** * Package core is the backbone of PocketBase. * @@ -16143,6 +17135,7 @@ namespace core { } interface TerminateEvent { app: App + isRestart: boolean } interface ServeEvent { app: App @@ -16154,12 +17147,12 @@ namespace core { httpContext: echo.Context error: Error } - type _subKXAIm = BaseModelEvent - interface ModelEvent extends _subKXAIm { + type _subNQtmT = BaseModelEvent + interface ModelEvent extends _subNQtmT { dao?: daos.Dao } - type _subALxYL = BaseCollectionEvent - interface MailerRecordEvent extends _subALxYL { + type _subPreQB = BaseCollectionEvent + interface MailerRecordEvent extends _subPreQB { mailClient: mailer.Mailer message?: mailer.Message record?: models.Record @@ -16199,50 +17192,50 @@ namespace core { oldSettings?: settings.Settings newSettings?: settings.Settings } - type _subqgWfZ = BaseCollectionEvent - interface RecordsListEvent extends _subqgWfZ { + type _subWhnvy = BaseCollectionEvent + interface RecordsListEvent extends _subWhnvy { httpContext: echo.Context records: Array result?: search.Result } - type _subaGZvN = BaseCollectionEvent - interface RecordViewEvent extends _subaGZvN { + type _subRsnOH = BaseCollectionEvent + interface RecordViewEvent extends _subRsnOH { httpContext: echo.Context record?: models.Record } - type _subnDhIp = BaseCollectionEvent - interface RecordCreateEvent extends _subnDhIp { + type _subMxLdO = BaseCollectionEvent + interface RecordCreateEvent extends _subMxLdO { httpContext: echo.Context record?: models.Record uploadedFiles: _TygojaDict } - type _subMTcMC = BaseCollectionEvent - interface RecordUpdateEvent extends _subMTcMC { + type _subYqTMc = BaseCollectionEvent + interface RecordUpdateEvent extends _subYqTMc { httpContext: echo.Context record?: models.Record uploadedFiles: _TygojaDict } - type _subCcvUq = BaseCollectionEvent - interface RecordDeleteEvent extends _subCcvUq { + type _subHEvOB = BaseCollectionEvent + interface RecordDeleteEvent extends _subHEvOB { httpContext: echo.Context record?: models.Record } - type _subXtzhW = BaseCollectionEvent - interface RecordAuthEvent extends _subXtzhW { + type _subNJbGa = BaseCollectionEvent + interface RecordAuthEvent extends _subNJbGa { httpContext: echo.Context record?: models.Record token: string meta: any } - type _subPbBjK = BaseCollectionEvent - interface RecordAuthWithPasswordEvent extends _subPbBjK { + type _subYIAsl = BaseCollectionEvent + interface RecordAuthWithPasswordEvent extends _subYIAsl { httpContext: echo.Context record?: models.Record identity: string password: string } - type _subNhxHA = BaseCollectionEvent - interface RecordAuthWithOAuth2Event extends _subNhxHA { + type _subqPBfc = BaseCollectionEvent + interface RecordAuthWithOAuth2Event extends _subqPBfc { httpContext: echo.Context providerName: string providerClient: auth.Provider @@ -16250,49 +17243,49 @@ namespace core { oAuth2User?: auth.AuthUser isNewRecord: boolean } - type _subbCsYs = BaseCollectionEvent - interface RecordAuthRefreshEvent extends _subbCsYs { + type _subfHqNn = BaseCollectionEvent + interface RecordAuthRefreshEvent extends _subfHqNn { httpContext: echo.Context record?: models.Record } - type _subszMYh = BaseCollectionEvent - interface RecordRequestPasswordResetEvent extends _subszMYh { + type _subEEQvo = BaseCollectionEvent + interface RecordRequestPasswordResetEvent extends _subEEQvo { httpContext: echo.Context record?: models.Record } - type _subnRVNn = BaseCollectionEvent - interface RecordConfirmPasswordResetEvent extends _subnRVNn { + type _subOnHAd = BaseCollectionEvent + interface RecordConfirmPasswordResetEvent extends _subOnHAd { httpContext: echo.Context record?: models.Record } - type _subJpyCD = BaseCollectionEvent - interface RecordRequestVerificationEvent extends _subJpyCD { + type _subyQkZe = BaseCollectionEvent + interface RecordRequestVerificationEvent extends _subyQkZe { httpContext: echo.Context record?: models.Record } - type _subHNoIf = BaseCollectionEvent - interface RecordConfirmVerificationEvent extends _subHNoIf { + type _subEYLJV = BaseCollectionEvent + interface RecordConfirmVerificationEvent extends _subEYLJV { httpContext: echo.Context record?: models.Record } - type _subCPOmq = BaseCollectionEvent - interface RecordRequestEmailChangeEvent extends _subCPOmq { + type _subUiZui = BaseCollectionEvent + interface RecordRequestEmailChangeEvent extends _subUiZui { httpContext: echo.Context record?: models.Record } - type _subNMhOI = BaseCollectionEvent - interface RecordConfirmEmailChangeEvent extends _subNMhOI { + type _subORfFa = BaseCollectionEvent + interface RecordConfirmEmailChangeEvent extends _subORfFa { httpContext: echo.Context record?: models.Record } - type _subJkPWJ = BaseCollectionEvent - interface RecordListExternalAuthsEvent extends _subJkPWJ { + type _subyksbK = BaseCollectionEvent + interface RecordListExternalAuthsEvent extends _subyksbK { httpContext: echo.Context record?: models.Record externalAuths: Array } - type _submsDbp = BaseCollectionEvent - interface RecordUnlinkExternalAuthEvent extends _submsDbp { + type _subgmReu = BaseCollectionEvent + interface RecordUnlinkExternalAuthEvent extends _subgmReu { httpContext: echo.Context record?: models.Record externalAuth?: models.ExternalAuth @@ -16346,33 +17339,33 @@ namespace core { collections: Array result?: search.Result } - type _subqyaHS = BaseCollectionEvent - interface CollectionViewEvent extends _subqyaHS { + type _subNKoJU = BaseCollectionEvent + interface CollectionViewEvent extends _subNKoJU { httpContext: echo.Context } - type _subhPtnc = BaseCollectionEvent - interface CollectionCreateEvent extends _subhPtnc { + type _subIJGdN = BaseCollectionEvent + interface CollectionCreateEvent extends _subIJGdN { httpContext: echo.Context } - type _subYyOGv = BaseCollectionEvent - interface CollectionUpdateEvent extends _subYyOGv { + type _subvMBZb = BaseCollectionEvent + interface CollectionUpdateEvent extends _subvMBZb { httpContext: echo.Context } - type _subHHXdk = BaseCollectionEvent - interface CollectionDeleteEvent extends _subHHXdk { + type _subYEbNP = BaseCollectionEvent + interface CollectionDeleteEvent extends _subYEbNP { httpContext: echo.Context } interface CollectionsImportEvent { httpContext: echo.Context collections: Array } - type _subHVSrj = BaseModelEvent - interface FileTokenEvent extends _subHVSrj { + type _subFqOIc = BaseModelEvent + interface FileTokenEvent extends _subFqOIc { httpContext: echo.Context token: string } - type _subGAPsI = BaseCollectionEvent - interface FileDownloadEvent extends _subGAPsI { + type _subUxgNn = BaseCollectionEvent + interface FileDownloadEvent extends _subUxgNn { httpContext: echo.Context record?: models.Record fileField?: schema.SchemaField @@ -16381,6 +17374,59 @@ namespace core { } } +/** + * Package cobra is a commander providing a simple interface to create powerful + * modern CLI interfaces. In addition to providing an interface, Cobra + * simultaneously provides a controller to organize your application code. + */ +namespace cobra { + interface PositionalArgs { + (cmd: Command, args: Array): void + } + // @ts-ignore + import flag = pflag + /** FParseErrWhitelist configures Flag parse errors to be ignored */ + interface FParseErrWhitelist extends _TygojaAny {} + /** Group Structure to manage groups for commands */ + interface Group { + id: string + title: string + } + /** + * ShellCompDirective is a bit map representing the different behaviors the + * shell can be instructed to have once completions have been provided. + */ + interface ShellCompDirective extends Number {} + /** CompletionOptions are the options to control shell completion */ + interface CompletionOptions { + /** + * DisableDefaultCmd prevents Cobra from creating a default 'completion' + * command + */ + disableDefaultCmd: boolean + /** + * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' + * flag for shells that support completion descriptions + */ + disableNoDescFlag: boolean + /** + * DisableDescriptions turns off all completion descriptions for shells that + * support them + */ + disableDescriptions: boolean + /** HiddenDefaultCmd makes the default 'completion' command hidden */ + hiddenDefaultCmd: boolean + } +} + +namespace migrate { + interface Migration { + file: string + up: (db: dbx.Builder) => void + down: (db: dbx.Builder) => void + } +} + /** * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer * object, creating another object (Reader or Writer) that also implements the @@ -16391,8 +17437,8 @@ namespace bufio { * ReadWriter stores pointers to a Reader and a Writer. It implements * io.ReadWriter. */ - type _subLYkix = Reader & Writer - interface ReadWriter extends _subLYkix {} + type _subnROWM = Reader & Writer + interface ReadWriter extends _subnROWM {} } /** @@ -16429,7 +17475,7 @@ namespace bufio { * go handleConnection(conn) * } * - * Name Resolution + * # Name Resolution * * The method for resolving domain names, whether indirectly with functions like * Dial or directly with functions like LookupHost and LookupAddr, varies by @@ -16455,7 +17501,7 @@ namespace bufio { * GODEBUG environment variable (see package runtime) to go or cgo, as in: * * export GODEBUG=netdns=go # force pure Go resolver - * export GODEBUG=netdns=cgo # force cgo resolver + * export GODEBUG=netdns=cgo # force native resolver (cgo, win32) * * The decision can also be forced while building the Go source tree by setting * the netgo or netcgo build tag. @@ -16465,10 +17511,14 @@ namespace bufio { * resolver while also printing debugging information, join the two settings by * a plus sign, as in GODEBUG=netdns=go+1. * + * On macOS, if Go code that uses the net package is built with + * -buildmode=c-archive, linking the resulting archive into a C program requires + * passing -lresolv when linking the C code. + * * On Plan 9, the resolver always accesses /net/cs and /net/dns. * - * On Windows, the resolver always uses C library functions, such as GetAddrInfo - * and DnsQuery. + * On Windows, in Go 1.18.x and earlier, the resolver always used C library + * functions, such as GetAddrInfo and DnsQuery. */ namespace net { /** @@ -16479,6 +17529,7 @@ namespace net { * strings is up to the implementation. */ interface Addr { + [key: string]: any network(): string // name of the network (for example, "tcp", "udp") string(): string // string form of address (for example, "192.0.2.1:25", "[2001:db8::1]:80") } @@ -16515,6 +17566,23 @@ namespace url { * * The implementation is sufficient for HTTP (RFC 2388) and the multipart bodies * generated by popular browsers. + * + * # Limits + * + * To protect against malicious inputs, this package sets limits on the size of + * the MIME data it processes. + * + * Reader.NextPart and Reader.NextRawPart limit the number of headers in a part + * to 10000 and Reader.ReadForm limits the total number of headers in all + * FileHeaders to 10000. These limits may be adjusted with the + * GODEBUG=multipartmaxheaders= setting. + * + * Reader.ReadForm further limits the number of parts in a form to 1000. This + * limit may be adjusted with the GODEBUG=multipartmaxparts= setting. + */ +/** + * Copyright 2023 The Go Authors. All rights reserved. Use of this source code + * is governed by a BSD-style license that can be found in the LICENSE file. */ namespace multipart { /** A Part represents a single part in a multipart body. */ @@ -16546,7 +17614,7 @@ namespace multipart { * Read reads the body of a part, after its headers and before the next part * (if any) begins. */ - read(d: string): number + read(d: string | Array): number } interface Part { close(): void @@ -16565,7 +17633,7 @@ namespace multipart { * resp, err := http.PostForm("http://example.com/form", * url.Values{"key": {"Value"}, "id": {"123"}}) * - * The client must close the response body when finished with it: + * The caller must close the response body when finished with it: * * resp, err := http.Get("http://example.com/") * if err != nil { @@ -16575,6 +17643,8 @@ namespace multipart { * body, err := io.ReadAll(resp.Body) * // ... * + * # Clients and Transports + * * For control over HTTP client headers, redirect policy, and other settings, * create a Client: * @@ -16605,6 +17675,8 @@ namespace multipart { * Clients and Transports are safe for concurrent use by multiple goroutines and * for efficiency should only be created once and re-used. * + * # Servers + * * ListenAndServe starts an HTTP server with a given address and handler. The * handler is usually nil, which means to use DefaultServeMux. Handle and * HandleFunc add handlers to DefaultServeMux: @@ -16629,18 +17701,19 @@ namespace multipart { * } * log.Fatal(s.ListenAndServe()) * + * # HTTP/2 + * * Starting with Go 1.6, the http package has transparent support for the HTTP/2 * protocol when using HTTPS. Programs that must disable HTTP/2 can do so by * setting Transport.TLSNextProto (for clients) or Server.TLSNextProto (for * servers) to a non-nil, empty map. Alternatively, the following GODEBUG - * environment variables are currently supported: + * settings are currently supported: * * GODEBUG=http2client=0 # disable HTTP/2 client support * GODEBUG=http2server=0 # disable HTTP/2 server support * GODEBUG=http2debug=1 # enable verbose HTTP/2 debug logs * GODEBUG=http2debug=2 # ... even more verbose, with frame dumps * - * The GODEBUG variables are not covered by Go's API compatibility promise. * Please report any issues before disabling HTTP/2 support: * https://golang.org/s/http2bug * @@ -16669,6 +17742,67 @@ namespace http { import urlpkg = url } +/** + * Package types implements some commonly used db serializable types like + * datetime, json, etc. + */ +namespace types { + /** JsonRaw defines a json value type that is safe for db read/write. */ + interface JsonRaw extends Array {} + interface JsonRaw { + /** String returns the current JsonRaw instance as a json encoded string. */ + string(): string + } + interface JsonRaw { + /** MarshalJSON implements the [json.Marshaler] interface. */ + marshalJSON(): string | Array + } + interface JsonRaw { + /** UnmarshalJSON implements the [json.Unmarshaler] interface. */ + unmarshalJSON(b: string | Array): void + } + interface JsonRaw { + /** Value implements the [driver.Valuer] interface. */ + value(): any + } + interface JsonRaw { + /** + * Scan implements [sql.Scanner] interface to scan the provided value into + * the current JsonRaw instance. + */ + scan(value: any): void + } +} + +namespace store {} + +namespace mailer { + /** Message defines a generic email message struct. */ + interface Message { + from: mail.Address + to: Array + bcc: Array + cc: Array + subject: string + html: string + text: string + headers: _TygojaDict + attachments: _TygojaDict + } + + interface Message_In extends Message { + from: mail.Address_In + to: Array + bcc: Array + cc: Array + subject: string + html: string + text: string + headers: _TygojaDict + attachments: _TygojaDict + } +} + /** * Package echo implements high performance, minimalist Go web framework. * @@ -16746,13 +17880,14 @@ namespace echo { * routing purposes and should not be used in middlewares. */ interface RoutableContext { + [key: string]: any /** Request returns `*http.Request`. */ - request(): http.Request | undefined + request(): http.Request /** * RawPathParams returns raw path pathParams value. Allocation of PathParams * is handled by Context. */ - rawPathParams(): PathParams | undefined + rawPathParams(): PathParams /** * SetRawPathParams replaces any existing param values with new values for * this context lifetime (request). Do not set any other value than what you @@ -16778,55 +17913,6 @@ namespace echo { } } -namespace store {} - -/** - * Package types implements some commonly used db serializable types like - * datetime, json, etc. - */ -namespace types { - /** JsonRaw defines a json value type that is safe for db read/write. */ - interface JsonRaw extends String {} - interface JsonRaw { - /** String returns the current JsonRaw instance as a json encoded string. */ - string(): string - } - interface JsonRaw { - /** MarshalJSON implements the [json.Marshaler] interface. */ - marshalJSON(): string - } - interface JsonRaw { - /** UnmarshalJSON implements the [json.Unmarshaler] interface. */ - unmarshalJSON(b: string): void - } - interface JsonRaw { - /** Value implements the [driver.Valuer] interface. */ - value(): any - } - interface JsonRaw { - /** - * Scan implements [sql.Scanner] interface to scan the provided value into - * the current JsonRaw instance. - */ - scan(value: {}): void - } -} - -namespace mailer { - /** Message defines a generic email message struct. */ - interface Message { - from: mail.Address - to: Array - bcc: Array - cc: Array - subject: string - html: string - text: string - headers: _TygojaDict - attachments: _TygojaDict - } -} - namespace search { /** Result defines the returned search result structure. */ interface Result { @@ -16862,34 +17948,38 @@ namespace settings { } } -namespace hook { - /** Handler defines a hook handler function. */ - interface Handler { - (e: T): void - } - /** Wrapped local Hook embedded struct to limit the public API surface. */ - type _subvTioz = Hook - interface mainHook extends _subvTioz {} -} - namespace subscriptions { /** Message defines a client's channel data. */ interface Message { name: string - data: string + data: string | Array } /** Client is an interface for a generic subscription client. */ interface Client { + [key: string]: any /** Id Returns the unique id of the client. */ id(): string /** Channel returns the client's communication channel. */ channel(): undefined /** - * Subscriptions returns all subscriptions to which the client has - * subscribed to. + * Subscriptions returns a shallow copy of the the client subscriptions + * matching the prefixes. If no prefix is specified, returns all + * subscriptions. + */ + subscriptions(...prefixes: string[]): _TygojaDict + /** + * Subscribe subscribes the client to the provided subscriptions list. + * + * Each subscription can also have "options" (json serialized + * SubscriptionOptions) as query parameter. + * + * Example: + * + * Subscribe( + * 'subscriptionA', + * `subscriptionB?options={"query":{"a":1},"headers":{"x_token":"abc"}}`, + * ) */ - subscriptions(): _TygojaDict - /** Subscribe subscribes the client to the provided subscriptions list. */ subscribe(...subs: string[]): void /** Unsubscribe unsubscribes the client from the provided subscriptions list. */ unsubscribe(...subs: string[]): void @@ -16921,6 +18011,475 @@ namespace subscriptions { } } +/** + * Package slog provides structured logging, in which log records include a + * message, a severity level, and various other attributes expressed as + * key-value pairs. + * + * It defines a type, [Logger], which provides several methods (such as + * [Logger.Info] and [Logger.Error]) for reporting events of interest. + * + * Each Logger is associated with a [Handler]. A Logger output method creates a + * [Record] from the method arguments and passes it to the Handler, which + * decides how to handle it. There is a default Logger accessible through + * top-level functions (such as [Info] and [Error]) that call the corresponding + * Logger methods. + * + * A log record consists of a time, a level, a message, and a set of key-value + * pairs, where the keys are strings and the values may be of any type. As an + * example, + * + * slog.Info('hello', 'count', 3) + * + * Creates a record containing the time of the call, a level of Info, the + * message "hello", and a single pair with key "count" and value 3. + * + * The [Info] top-level function calls the [Logger.Info] method on the default + * Logger. In addition to [Logger.Info], there are methods for Debug, Warn and + * Error levels. Besides these convenience methods for common levels, there is + * also a [Logger.Log] method which takes the level as an argument. Each of + * these methods has a corresponding top-level function that uses the default + * logger. + * + * The default handler formats the log record's message, time, level, and + * attributes as a string and passes it to the [log] package. + * + * 2022/11/08 15:28:26 INFO hello count=3 + * + * For more control over the output format, create a logger with a different + * handler. This statement uses [New] to create a new logger with a TextHandler + * that writes structured records in text form to standard error: + * + * logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + * + * [TextHandler] output is a sequence of key=value pairs, easily and + * unambiguously parsed by machine. This statement: + * + * logger.Info('hello', 'count', 3) + * + * Produces this output: + * + * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 + * + * The package also provides [JSONHandler], whose output is line-delimited JSON: + * + * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) + * logger.Info("hello", "count", 3) + * + * Produces this output: + * + * { + * "time": "2022-11-08T15:28:26.000000000-05:00", + * "level": "INFO", + * "msg": "hello", + * "count": 3 + * } + * + * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. + * There are options for setting the minimum level (see Levels, below), + * displaying the source file and line of the log call, and modifying attributes + * before they are logged. + * + * Setting a logger as the default with + * + * slog.SetDefault(logger) + * + * Will cause the top-level functions like [Info] to use it. [SetDefault] also + * updates the default logger used by the [log] package, so that existing + * applications that use [log.Printf] and related functions will send log + * records to the logger's handler without needing to be rewritten. + * + * Some attributes are common to many log calls. For example, you may wish to + * include the URL or trace identifier of a server request with all log events + * arising from the request. Rather than repeat the attribute with every log + * call, you can use [Logger.With] to construct a new Logger containing the + * attributes: + * + * logger2 := logger.With("url", r.URL) + * + * The arguments to With are the same key-value pairs used in [Logger.Info]. The + * result is a new Logger with the same handler as the original, but additional + * attributes that will appear in the output of every call. + * + * # Levels + * + * A [Level] is an integer representing the importance or severity of a log + * event. The higher the level, the more severe the event. This package defines + * constants for the most common levels, but any int can be used as a level. + * + * In an application, you may wish to log messages only at a certain level or + * greater. One common configuration is to log messages at Info or higher + * levels, suppressing debug logging until it is needed. The built-in handlers + * can be configured with the minimum level to output by setting + * [HandlerOptions.Level]. The program's `main` function typically does this. + * The default value is LevelInfo. + * + * Setting the [HandlerOptions.Level] field to a [Level] value fixes the + * handler's minimum level throughout its lifetime. Setting it to a [LevelVar] + * allows the level to be varied dynamically. A LevelVar holds a Level and is + * safe to read or write from multiple goroutines. To vary the level dynamically + * for an entire program, first initialize a global LevelVar: + * + * var programLevel = new slog.LevelVar() // Info by default + * + * Then use the LevelVar to construct a handler, and make it the default: + * + * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) + * slog.SetDefault(slog.New(h)) + * + * Now the program can change its logging level with a single statement: + * + * programLevel.Set(slog.LevelDebug) + * + * # Groups + * + * Attributes can be collected into groups. A group has a name that is used to + * qualify the names of its attributes. How this qualification is displayed + * depends on the handler. [TextHandler] separates the group and attribute names + * with a dot. [JSONHandler] treats each group as a separate JSON object, with + * the group name as the key. + * + * Use [Group] to create a Group attribute from a name and a list of key-value + * pairs: + * + * slog.Group('request', 'method', r.Method, 'url', r.URL) + * + * TextHandler would display this group as + * + * request.method=GET request.url=http://example.com + * + * JSONHandler would display it as + * + * "request":{"method":"GET","url":"http://example.com"} + * + * Use [Logger.WithGroup] to qualify all of a Logger's output with a group name. + * Calling WithGroup on a Logger results in a new Logger with the same Handler + * as the original, but with all its attributes qualified by the group name. + * + * This can help prevent duplicate attribute keys in large systems, where + * subsystems might use the same keys. Pass each subsystem a different Logger + * with its own group name so that potential duplicates are qualified: + * + * logger := slog.Default().With("id", systemID) + * parserLogger := logger.WithGroup("parser") + * parseInput(input, parserLogger) + * + * When parseInput logs with parserLogger, its keys will be qualified with + * "parser", so even if it uses the common key "id", the log line will have + * distinct keys. + * + * # Contexts + * + * Some handlers may wish to include information from the [context.Context] that + * is available at the call site. One example of such information is the + * identifier for the current span when tracing is enabled. + * + * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first + * argument, as do their corresponding top-level functions. + * + * Although the convenience methods on Logger (Info and so on) and the + * corresponding top-level functions do not take a context, the alternatives + * ending in "Context" do. For example, + * + * slog.InfoContext(ctx, 'message') + * + * It is recommended to pass a context to an output method if one is available. + * + * # Attrs and Values + * + * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well + * as alternating keys and values. The statement + * + * slog.Info('hello', slog.Int('count', 3)) + * + * Behaves the same as + * + * slog.Info('hello', 'count', 3) + * + * There are convenience constructors for [Attr] such as [Int], [String], and + * [Bool] for common types, as well as the function [Any] for constructing Attrs + * of any type. + * + * The value part of an Attr is a type called [Value]. Like an [any], a Value + * can hold any Go value, but it can represent typical values, including all + * numbers and strings, without an allocation. + * + * For the most efficient log output, use [Logger.LogAttrs]. It is similar to + * [Logger.Log] but accepts only Attrs, not alternating keys and values; this + * allows it, too, to avoid allocation. + * + * The call + * + * logger.LogAttrs(ctx, slog.LevelInfo, 'hello', slog.Int('count', 3)) + * + * Is the most efficient way to achieve the same output as + * + * slog.Info('hello', 'count', 3) + * + * # Customizing a type's logging behavior + * + * If a type implements the [LogValuer] interface, the [Value] returned from its + * LogValue method is used for logging. You can use this to control how values + * of the type appear in logs. For example, you can redact secret information + * like passwords, or gather a struct's fields in a Group. See the examples + * under [LogValuer] for details. + * + * A LogValue method may return a Value that itself implements [LogValuer]. The + * [Value.Resolve] method handles these cases carefully, avoiding infinite loops + * and unbounded recursion. Handler authors and others may wish to use + * Value.Resolve instead of calling LogValue directly. + * + * # Wrapping output methods + * + * The logger functions use reflection over the call stack to find the file name + * and line number of the logging call within the application. This can produce + * incorrect source information for functions that wrap slog. For instance, if + * you define this function in file mylog.go: + * + * func Infof(format string, args ...any) { + * slog.Default().Info(fmt.Sprintf(format, args...)) + * } + * + * And you call it like this in main.go: + * + * Infof(slog.Default(), 'hello, %s', 'world') + * + * Then slog will report the source file as mylog.go, not main.go. + * + * A correct implementation of Infof will obtain the source location (pc) and + * pass it to NewRecord. The Infof function in the package-level example called + * "wrapping" demonstrates how to do this. + * + * # Working with Records + * + * Sometimes a Handler will need to modify a Record before passing it on to + * another Handler or backend. A Record contains a mixture of simple public + * fields (e.g. Time, Level, Message) and hidden fields that refer to state + * (such as attributes) indirectly. This means that modifying a simple copy of a + * Record (e.g. by calling [Record.Add] or [Record.AddAttrs] to add attributes) + * may have unexpected effects on the original. Before modifying a Record, use + * [Record.Clone] to create a copy that shares no state with the original, or + * create a new Record with [NewRecord] and build up its Attrs by traversing the + * old ones with [Record.Attrs]. + * + * # Performance considerations + * + * If profiling your application demonstrates that logging is taking significant + * time, the following suggestions may help. + * + * If many log lines have a common attribute, use [Logger.With] to create a + * Logger with that attribute. The built-in handlers will format that attribute + * only once, at the call to [Logger.With]. The [Handler] interface is designed + * to allow that optimization, and a well-written Handler should take advantage + * of it. + * + * The arguments to a log call are always evaluated, even if the log event is + * discarded. If possible, defer computation so that it happens only if the + * value is actually logged. For example, consider the call + * + * slog.Info('starting request', 'url', r.URL.String()) // may compute String unnecessarily + * + * The URL.String method will be called even if the logger discards Info-level + * events. Instead, pass the URL directly: + * + * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed + * + * The built-in [TextHandler] will call its String method, but only if the log + * event is enabled. Avoiding the call to String also preserves the structure of + * the underlying value. For example [JSONHandler] emits the components of the + * parsed URL as a JSON object. If you want to avoid eagerly paying the cost of + * the String call without causing the handler to potentially inspect the + * structure of the value, wrap the value in a fmt.Stringer implementation that + * hides its Marshal methods. + * + * You can also use the [LogValuer] interface to avoid unnecessary work in + * disabled log calls. Say you need to log some expensive value: + * + * slog.Debug('frobbing', 'value', computeExpensiveValue(arg)) + * + * Even if this line is disabled, computeExpensiveValue will be called. To avoid + * that, define a type implementing LogValuer: + * + * type expensive struct { arg int } + * + * func (e expensive) LogValue() slog.Value { + * return slog.AnyValue(computeExpensiveValue(e.arg)) + * } + * + * Then use a value of that type in log calls: + * + * slog.Debug("frobbing", "value", expensive{arg}) + * + * Now computeExpensiveValue will only be called when the line is enabled. + * + * The built-in handlers acquire a lock before calling [io.Writer.Write] to + * ensure that each record is written in one piece. User-defined handlers are + * responsible for their own locking. + * + * # Writing a handler + * + * For a guide to writing a custom handler, see + * https://golang.org/s/slog-handler-guide. + */ +namespace slog { + /** An Attr is a key-value pair. */ + interface Attr { + key: string + value: Value + } + interface Attr { + /** Equal reports whether a and b have equal keys and values. */ + equal(b: Attr): boolean + } + interface Attr { + string(): string + } + /** + * A Handler handles log records produced by a Logger.. + * + * A typical handler may print log records to standard error, or write them to + * a file or database, or perhaps augment them with additional attributes and + * pass them on to another handler. + * + * Any of the Handler's methods may be called concurrently with itself or with + * other methods. It is the responsibility of the Handler to manage this + * concurrency. + * + * Users of the slog package should not invoke Handler methods directly. They + * should use the methods of [Logger] instead. + */ + interface Handler { + [key: string]: any + /** + * Enabled reports whether the handler handles records at the given level. + * The handler ignores records whose level is lower. It is called early, + * before any arguments are processed, to save effort if the log event + * should be discarded. If called from a Logger method, the first argument + * is the context passed to that method, or context.Background() if nil was + * passed or the method does not take a context. The context is passed so + * Enabled can use its values to make a decision. + */ + enabled(_arg0: context.Context, _arg1: Level): boolean + /** + * Handle handles the Record. It will only be called when Enabled returns + * true. The Context argument is as for Enabled. It is present solely to + * provide Handlers access to the context's values. Canceling the context + * should not affect record processing. (Among other things, log messages + * may be necessary to debug a cancellation-related problem.) + * + * Handle methods that produce output should observe the following rules: + * + * - If r.Time is the zero time, ignore the time. + * - If r.PC is zero, ignore it. + * - Attr's values should be resolved. + * - If an Attr's key and value are both the zero value, ignore the Attr. + * This can be tested with attr.Equal(Attr{}). + * - If a group's key is empty, inline the group's Attrs. + * - If a group has no Attrs (even if it has a non-empty key), + * ignore it. + */ + handle(_arg0: context.Context, _arg1: Record): void + /** + * WithAttrs returns a new Handler whose attributes consist of both the + * receiver's attributes and the arguments. The Handler owns the slice: it + * may retain, modify or discard it. + */ + withAttrs(attrs: Array): Handler + /** + * WithGroup returns a new Handler with the given group appended to the + * receiver's existing groups. The keys of all subsequent attributes, + * whether added by With or in a Record, should be qualified by the sequence + * of group names. + * + * How this qualification happens is up to the Handler, so long as this + * Handler's attribute keys differ from those of another Handler with a + * different sequence of group names. + * + * A Handler should treat WithGroup as starting a Group of Attrs that ends + * at the end of the log event. That is, + * + * logger + * .WithGroup('s') + * .LogAttrs(level, msg, slog.Int('a', 1), slog.Int('b', 2)) + * + * Should behave like + * + * logger.LogAttrs( + * level, + * msg, + * slog.Group('s', slog.Int('a', 1), slog.Int('b', 2)), + * ) + * + * If the name is empty, WithGroup returns the receiver. + */ + withGroup(name: string): Handler + } + /** + * A Level is the importance or severity of a log event. The higher the level, + * the more important or severe the event. + */ + interface Level extends Number {} + interface Level { + /** + * String returns a name for the level. If the level has a name, then that + * name in uppercase is returned. If the level is between named values, then + * an integer is appended to the uppercased name. Examples: + * + * LevelWarn.String() => "WARN" + * (LevelInfo+2).String() => "INFO+2" + */ + string(): string + } + interface Level { + /** + * MarshalJSON implements [encoding/json.Marshaler] by quoting the output of + * [Level.String]. + */ + marshalJSON(): string | Array + } + interface Level { + /** + * UnmarshalJSON implements [encoding/json.Unmarshaler] It accepts any + * string produced by [Level.MarshalJSON], ignoring case. It also accepts + * numeric offsets that would result in a different string on output. For + * example, "Error-8" would marshal as "INFO". + */ + unmarshalJSON(data: string | Array): void + } + interface Level { + /** + * MarshalText implements [encoding.TextMarshaler] by calling + * [Level.String]. + */ + marshalText(): string | Array + } + interface Level { + /** + * UnmarshalText implements [encoding.TextUnmarshaler]. It accepts any + * string produced by [Level.MarshalText], ignoring case. It also accepts + * numeric offsets that would result in a different string on output. For + * example, "Error-8" would marshal as "INFO". + */ + unmarshalText(data: string | Array): void + } + interface Level { + /** Level returns the receiver. It implements Leveler. */ + level(): Level + } + // @ts-ignore + import loginternal = internal +} + +namespace hook { + /** Handler defines a hook handler function. */ + interface Handler { + (e: T): void + } + /** Wrapped local Hook embedded struct to limit the public API surface. */ + type _subTlzch = Hook + interface mainHook extends _subTlzch {} +} + /** * Package core is the backbone of PocketBase. * @@ -16941,6 +18500,462 @@ namespace core { } } +namespace subscriptions {} + +/** + * Package slog provides structured logging, in which log records include a + * message, a severity level, and various other attributes expressed as + * key-value pairs. + * + * It defines a type, [Logger], which provides several methods (such as + * [Logger.Info] and [Logger.Error]) for reporting events of interest. + * + * Each Logger is associated with a [Handler]. A Logger output method creates a + * [Record] from the method arguments and passes it to the Handler, which + * decides how to handle it. There is a default Logger accessible through + * top-level functions (such as [Info] and [Error]) that call the corresponding + * Logger methods. + * + * A log record consists of a time, a level, a message, and a set of key-value + * pairs, where the keys are strings and the values may be of any type. As an + * example, + * + * slog.Info('hello', 'count', 3) + * + * Creates a record containing the time of the call, a level of Info, the + * message "hello", and a single pair with key "count" and value 3. + * + * The [Info] top-level function calls the [Logger.Info] method on the default + * Logger. In addition to [Logger.Info], there are methods for Debug, Warn and + * Error levels. Besides these convenience methods for common levels, there is + * also a [Logger.Log] method which takes the level as an argument. Each of + * these methods has a corresponding top-level function that uses the default + * logger. + * + * The default handler formats the log record's message, time, level, and + * attributes as a string and passes it to the [log] package. + * + * 2022/11/08 15:28:26 INFO hello count=3 + * + * For more control over the output format, create a logger with a different + * handler. This statement uses [New] to create a new logger with a TextHandler + * that writes structured records in text form to standard error: + * + * logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + * + * [TextHandler] output is a sequence of key=value pairs, easily and + * unambiguously parsed by machine. This statement: + * + * logger.Info('hello', 'count', 3) + * + * Produces this output: + * + * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 + * + * The package also provides [JSONHandler], whose output is line-delimited JSON: + * + * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) + * logger.Info("hello", "count", 3) + * + * Produces this output: + * + * { + * "time": "2022-11-08T15:28:26.000000000-05:00", + * "level": "INFO", + * "msg": "hello", + * "count": 3 + * } + * + * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. + * There are options for setting the minimum level (see Levels, below), + * displaying the source file and line of the log call, and modifying attributes + * before they are logged. + * + * Setting a logger as the default with + * + * slog.SetDefault(logger) + * + * Will cause the top-level functions like [Info] to use it. [SetDefault] also + * updates the default logger used by the [log] package, so that existing + * applications that use [log.Printf] and related functions will send log + * records to the logger's handler without needing to be rewritten. + * + * Some attributes are common to many log calls. For example, you may wish to + * include the URL or trace identifier of a server request with all log events + * arising from the request. Rather than repeat the attribute with every log + * call, you can use [Logger.With] to construct a new Logger containing the + * attributes: + * + * logger2 := logger.With("url", r.URL) + * + * The arguments to With are the same key-value pairs used in [Logger.Info]. The + * result is a new Logger with the same handler as the original, but additional + * attributes that will appear in the output of every call. + * + * # Levels + * + * A [Level] is an integer representing the importance or severity of a log + * event. The higher the level, the more severe the event. This package defines + * constants for the most common levels, but any int can be used as a level. + * + * In an application, you may wish to log messages only at a certain level or + * greater. One common configuration is to log messages at Info or higher + * levels, suppressing debug logging until it is needed. The built-in handlers + * can be configured with the minimum level to output by setting + * [HandlerOptions.Level]. The program's `main` function typically does this. + * The default value is LevelInfo. + * + * Setting the [HandlerOptions.Level] field to a [Level] value fixes the + * handler's minimum level throughout its lifetime. Setting it to a [LevelVar] + * allows the level to be varied dynamically. A LevelVar holds a Level and is + * safe to read or write from multiple goroutines. To vary the level dynamically + * for an entire program, first initialize a global LevelVar: + * + * var programLevel = new slog.LevelVar() // Info by default + * + * Then use the LevelVar to construct a handler, and make it the default: + * + * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) + * slog.SetDefault(slog.New(h)) + * + * Now the program can change its logging level with a single statement: + * + * programLevel.Set(slog.LevelDebug) + * + * # Groups + * + * Attributes can be collected into groups. A group has a name that is used to + * qualify the names of its attributes. How this qualification is displayed + * depends on the handler. [TextHandler] separates the group and attribute names + * with a dot. [JSONHandler] treats each group as a separate JSON object, with + * the group name as the key. + * + * Use [Group] to create a Group attribute from a name and a list of key-value + * pairs: + * + * slog.Group('request', 'method', r.Method, 'url', r.URL) + * + * TextHandler would display this group as + * + * request.method=GET request.url=http://example.com + * + * JSONHandler would display it as + * + * "request":{"method":"GET","url":"http://example.com"} + * + * Use [Logger.WithGroup] to qualify all of a Logger's output with a group name. + * Calling WithGroup on a Logger results in a new Logger with the same Handler + * as the original, but with all its attributes qualified by the group name. + * + * This can help prevent duplicate attribute keys in large systems, where + * subsystems might use the same keys. Pass each subsystem a different Logger + * with its own group name so that potential duplicates are qualified: + * + * logger := slog.Default().With("id", systemID) + * parserLogger := logger.WithGroup("parser") + * parseInput(input, parserLogger) + * + * When parseInput logs with parserLogger, its keys will be qualified with + * "parser", so even if it uses the common key "id", the log line will have + * distinct keys. + * + * # Contexts + * + * Some handlers may wish to include information from the [context.Context] that + * is available at the call site. One example of such information is the + * identifier for the current span when tracing is enabled. + * + * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first + * argument, as do their corresponding top-level functions. + * + * Although the convenience methods on Logger (Info and so on) and the + * corresponding top-level functions do not take a context, the alternatives + * ending in "Context" do. For example, + * + * slog.InfoContext(ctx, 'message') + * + * It is recommended to pass a context to an output method if one is available. + * + * # Attrs and Values + * + * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well + * as alternating keys and values. The statement + * + * slog.Info('hello', slog.Int('count', 3)) + * + * Behaves the same as + * + * slog.Info('hello', 'count', 3) + * + * There are convenience constructors for [Attr] such as [Int], [String], and + * [Bool] for common types, as well as the function [Any] for constructing Attrs + * of any type. + * + * The value part of an Attr is a type called [Value]. Like an [any], a Value + * can hold any Go value, but it can represent typical values, including all + * numbers and strings, without an allocation. + * + * For the most efficient log output, use [Logger.LogAttrs]. It is similar to + * [Logger.Log] but accepts only Attrs, not alternating keys and values; this + * allows it, too, to avoid allocation. + * + * The call + * + * logger.LogAttrs(ctx, slog.LevelInfo, 'hello', slog.Int('count', 3)) + * + * Is the most efficient way to achieve the same output as + * + * slog.Info('hello', 'count', 3) + * + * # Customizing a type's logging behavior + * + * If a type implements the [LogValuer] interface, the [Value] returned from its + * LogValue method is used for logging. You can use this to control how values + * of the type appear in logs. For example, you can redact secret information + * like passwords, or gather a struct's fields in a Group. See the examples + * under [LogValuer] for details. + * + * A LogValue method may return a Value that itself implements [LogValuer]. The + * [Value.Resolve] method handles these cases carefully, avoiding infinite loops + * and unbounded recursion. Handler authors and others may wish to use + * Value.Resolve instead of calling LogValue directly. + * + * # Wrapping output methods + * + * The logger functions use reflection over the call stack to find the file name + * and line number of the logging call within the application. This can produce + * incorrect source information for functions that wrap slog. For instance, if + * you define this function in file mylog.go: + * + * func Infof(format string, args ...any) { + * slog.Default().Info(fmt.Sprintf(format, args...)) + * } + * + * And you call it like this in main.go: + * + * Infof(slog.Default(), 'hello, %s', 'world') + * + * Then slog will report the source file as mylog.go, not main.go. + * + * A correct implementation of Infof will obtain the source location (pc) and + * pass it to NewRecord. The Infof function in the package-level example called + * "wrapping" demonstrates how to do this. + * + * # Working with Records + * + * Sometimes a Handler will need to modify a Record before passing it on to + * another Handler or backend. A Record contains a mixture of simple public + * fields (e.g. Time, Level, Message) and hidden fields that refer to state + * (such as attributes) indirectly. This means that modifying a simple copy of a + * Record (e.g. by calling [Record.Add] or [Record.AddAttrs] to add attributes) + * may have unexpected effects on the original. Before modifying a Record, use + * [Record.Clone] to create a copy that shares no state with the original, or + * create a new Record with [NewRecord] and build up its Attrs by traversing the + * old ones with [Record.Attrs]. + * + * # Performance considerations + * + * If profiling your application demonstrates that logging is taking significant + * time, the following suggestions may help. + * + * If many log lines have a common attribute, use [Logger.With] to create a + * Logger with that attribute. The built-in handlers will format that attribute + * only once, at the call to [Logger.With]. The [Handler] interface is designed + * to allow that optimization, and a well-written Handler should take advantage + * of it. + * + * The arguments to a log call are always evaluated, even if the log event is + * discarded. If possible, defer computation so that it happens only if the + * value is actually logged. For example, consider the call + * + * slog.Info('starting request', 'url', r.URL.String()) // may compute String unnecessarily + * + * The URL.String method will be called even if the logger discards Info-level + * events. Instead, pass the URL directly: + * + * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed + * + * The built-in [TextHandler] will call its String method, but only if the log + * event is enabled. Avoiding the call to String also preserves the structure of + * the underlying value. For example [JSONHandler] emits the components of the + * parsed URL as a JSON object. If you want to avoid eagerly paying the cost of + * the String call without causing the handler to potentially inspect the + * structure of the value, wrap the value in a fmt.Stringer implementation that + * hides its Marshal methods. + * + * You can also use the [LogValuer] interface to avoid unnecessary work in + * disabled log calls. Say you need to log some expensive value: + * + * slog.Debug('frobbing', 'value', computeExpensiveValue(arg)) + * + * Even if this line is disabled, computeExpensiveValue will be called. To avoid + * that, define a type implementing LogValuer: + * + * type expensive struct { arg int } + * + * func (e expensive) LogValue() slog.Value { + * return slog.AnyValue(computeExpensiveValue(e.arg)) + * } + * + * Then use a value of that type in log calls: + * + * slog.Debug("frobbing", "value", expensive{arg}) + * + * Now computeExpensiveValue will only be called when the line is enabled. + * + * The built-in handlers acquire a lock before calling [io.Writer.Write] to + * ensure that each record is written in one piece. User-defined handlers are + * responsible for their own locking. + * + * # Writing a handler + * + * For a guide to writing a custom handler, see + * https://golang.org/s/slog-handler-guide. + */ +namespace slog { + // @ts-ignore + import loginternal = internal + /** + * A Record holds information about a log event. Copies of a Record share + * state. Do not modify a Record after handing out a copy to it. Call + * [NewRecord] to create a new Record. Use [Record.Clone] to create a copy + * with no shared state. + */ + interface Record { + /** The time at which the output method (Log, Info, etc.) was called. */ + time: time.Time + /** The log message. */ + message: string + /** The level of the event. */ + level: Level + /** + * The program counter at the time the record was constructed, as determined + * by runtime.Callers. If zero, no program counter is available. + * + * The only valid use for this value is as an argument to + * [runtime.CallersFrames]. In particular, it must not be passed to + * [runtime.FuncForPC]. + */ + pc: number + } + interface Record { + /** + * Clone returns a copy of the record with no shared state. The original + * record and the clone can both be modified without interfering with each + * other. + */ + clone(): Record + } + interface Record { + /** NumAttrs returns the number of attributes in the Record. */ + numAttrs(): number + } + interface Record { + /** + * Attrs calls f on each Attr in the Record. Iteration stops if f returns + * false. + */ + attrs(f: (_arg0: Attr) => boolean): void + } + interface Record { + /** + * AddAttrs appends the given Attrs to the Record's list of Attrs. It omits + * empty groups. + */ + addAttrs(...attrs: Attr[]): void + } + interface Record { + /** + * Add converts the args to Attrs as described in [Logger.Log], then appends + * the Attrs to the Record's list of Attrs. It omits empty groups. + */ + add(...args: any[]): void + } + /** + * A Value can represent any Go value, but unlike type any, it can represent + * most small values without an allocation. The zero Value corresponds to + * nil. + */ + interface Value {} + interface Value { + /** Kind returns v's Kind. */ + kind(): Kind + } + interface Value { + /** Any returns v's value as an any. */ + any(): any + } + interface Value { + /** + * String returns Value's value as a string, formatted like fmt.Sprint. + * Unlike the methods Int64, Float64, and so on, which panic if v is of the + * wrong kind, String never panics. + */ + string(): string + } + interface Value { + /** + * Int64 returns v's value as an int64. It panics if v is not a signed + * integer. + */ + int64(): number + } + interface Value { + /** + * Uint64 returns v's value as a uint64. It panics if v is not an unsigned + * integer. + */ + uint64(): number + } + interface Value { + /** Bool returns v's value as a bool. It panics if v is not a bool. */ + bool(): boolean + } + interface Value { + /** + * Duration returns v's value as a time.Duration. It panics if v is not a + * time.Duration. + */ + duration(): time.Duration + } + interface Value { + /** Float64 returns v's value as a float64. It panics if v is not a float64. */ + float64(): number + } + interface Value { + /** Time returns v's value as a time.Time. It panics if v is not a time.Time. */ + time(): time.Time + } + interface Value { + /** + * LogValuer returns v's value as a LogValuer. It panics if v is not a + * LogValuer. + */ + logValuer(): LogValuer + } + interface Value { + /** + * Group returns v's value as a []Attr. It panics if v's Kind is not + * KindGroup. + */ + group(): Array + } + interface Value { + /** Equal reports whether v and w represent the same Go value. */ + equal(w: Value): boolean + } + interface Value { + /** + * Resolve repeatedly calls LogValue on v while it implements LogValuer, and + * returns the result. If v resolves to a group, the group's attributes' + * values are not recursively resolved. If the number of LogValue calls + * exceeds a threshold, a Value containing an error is returned. Resolve's + * return value is guaranteed not to be of Kind KindLogValuer. + */ + resolve(): Value + } +} + /** * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer * object, creating another object (Reader or Writer) that also implements the @@ -16957,7 +18972,8 @@ namespace bufio { /** * Reset discards any buffered data, resets all state, and switches the * buffered reader to read from r. Calling Reset on the zero value of Reader - * initializes the internal buffer to the default size. + * initializes the internal buffer to the default size. Calling b.Reset(b) + * (that is, resetting a Reader to itself) does nothing. */ reset(r: io.Reader): void } @@ -16971,7 +18987,7 @@ namespace bufio { * Calling Peek prevents a UnreadByte or UnreadRune call from succeeding * until the next read operation. */ - peek(n: number): string + peek(n: number): string | Array } interface Reader { /** @@ -16988,16 +19004,17 @@ namespace bufio { * Read reads data into p. It returns the number of bytes read into p. The * bytes are taken from at most one Read on the underlying Reader, hence n * may be less than len(p). To read exactly len(p) bytes, use io.ReadFull(b, - * p). At EOF, the count will be zero and err will be io.EOF. + * p). If the underlying Reader can return a non-zero count with io.EOF, + * then this Read method can do so as well; see the [io.Reader] docs. */ - read(p: string): number + read(p: string | Array): number } interface Reader { /** * ReadByte reads and returns a single byte. If no byte is available, * returns an error. */ - readByte(): string + readByte(): number } interface Reader { /** @@ -17016,7 +19033,7 @@ namespace bufio { * rune and its size in bytes. If the encoded rune is invalid, it consumes * one byte and returns unicode.ReplacementChar (U+FFFD) with a size of 1. */ - readRune(): [string, number] + readRune(): [number, number] } interface Reader { /** @@ -17046,7 +19063,7 @@ namespace bufio { * ReadBytes or ReadString instead. ReadSlice returns err != nil if and only * if line does not end in delim. */ - readSlice(delim: string): string + readSlice(delim: number): string | Array } interface Reader { /** @@ -17067,7 +19084,7 @@ namespace bufio { * byte read (possibly a character belonging to the line end) even if that * byte is not part of the line returned by ReadLine. */ - readLine(): [string, boolean] + readLine(): [string | Array, boolean] } interface Reader { /** @@ -17078,7 +19095,7 @@ namespace bufio { * ReadBytes returns err != nil if and only if the returned data does not * end in delim. For simple uses, a Scanner may be more convenient. */ - readBytes(delim: string): string + readBytes(delim: number): string | Array } interface Reader { /** @@ -17089,7 +19106,7 @@ namespace bufio { * ReadString returns err != nil if and only if the returned data does not * end in delim. For simple uses, a Scanner may be more convenient. */ - readString(delim: string): string + readString(delim: number): string } interface Reader { /** @@ -17115,7 +19132,8 @@ namespace bufio { /** * Reset discards any unflushed buffered data, clears any error, and resets * b to write its output to w. Calling Reset on the zero value of Writer - * initializes the internal buffer to the default size. + * initializes the internal buffer to the default size. Calling w.Reset(w) + * (that is, resetting a Writer to itself) does nothing. */ reset(w: io.Writer): void } @@ -17134,7 +19152,7 @@ namespace bufio { * succeeding Write call. The buffer is only valid until the next write * operation on b. */ - availableBuffer(): string + availableBuffer(): string | Array } interface Writer { /** @@ -17149,18 +19167,18 @@ namespace bufio { * bytes written. If nn < len(p), it also returns an error explaining why * the write is short. */ - write(p: string): number + write(p: string | Array): number } interface Writer { /** WriteByte writes a single byte. */ - writeByte(c: string): void + writeByte(c: number): void } interface Writer { /** * WriteRune writes a single Unicode code point, returning the number of * bytes written and any error. */ - writeRune(r: string): number + writeRune(r: number): number } interface Writer { /** @@ -17181,7 +19199,11 @@ namespace bufio { } } -namespace subscriptions {} +/** + * Package types implements some commonly used db serializable types like + * datetime, json, etc. + */ +namespace types {} /** * Package mail implements parsing of mail messages. @@ -17189,12 +19211,13 @@ namespace subscriptions {} * For the most part, this package follows the syntax as specified by RFC 5322 * and extended by RFC 6532. Notable divergences: * - * * Obsolete address formats are not parsed, including addresses with - * embedded route information. - * * The full range of spacing (the CFWS syntax element) is not supported, - * such as breaking addresses across lines. - * * No unicode normalization is performed. - * * The special characters ()[]:;@\, are allowed to appear unquoted in names. + * - Obsolete address formats are not parsed, including addresses with + * embedded route information. + * - The full range of spacing (the CFWS syntax element) is not supported, + * such as breaking addresses across lines. + * - No unicode normalization is performed. + * - The special characters ()[]:;@\, are allowed to appear unquoted in names. + * - A leading From line is permitted, as in mbox format (RFC 4155). */ namespace mail { /** @@ -17202,11 +19225,11 @@ namespace mail { * [bg@example.com](mailto:bg@example.com)" is represented as Address{Name: * "Barry Gibbs", Address: "bg@example.com"}. */ - interface Address { - name: string // Proper name; may be empty. + interface Address_In { + name?: string // Proper name; may be empty. address: string // user@domain } - interface Address { + interface Address extends Address_In { /** * String formats the address as a valid RFC 5322 address. If the address's * name contains non-ASCII characters the name will be rendered according to @@ -17217,3 +19240,334 @@ namespace mail { } namespace search {} + +/** + * Package slog provides structured logging, in which log records include a + * message, a severity level, and various other attributes expressed as + * key-value pairs. + * + * It defines a type, [Logger], which provides several methods (such as + * [Logger.Info] and [Logger.Error]) for reporting events of interest. + * + * Each Logger is associated with a [Handler]. A Logger output method creates a + * [Record] from the method arguments and passes it to the Handler, which + * decides how to handle it. There is a default Logger accessible through + * top-level functions (such as [Info] and [Error]) that call the corresponding + * Logger methods. + * + * A log record consists of a time, a level, a message, and a set of key-value + * pairs, where the keys are strings and the values may be of any type. As an + * example, + * + * slog.Info('hello', 'count', 3) + * + * Creates a record containing the time of the call, a level of Info, the + * message "hello", and a single pair with key "count" and value 3. + * + * The [Info] top-level function calls the [Logger.Info] method on the default + * Logger. In addition to [Logger.Info], there are methods for Debug, Warn and + * Error levels. Besides these convenience methods for common levels, there is + * also a [Logger.Log] method which takes the level as an argument. Each of + * these methods has a corresponding top-level function that uses the default + * logger. + * + * The default handler formats the log record's message, time, level, and + * attributes as a string and passes it to the [log] package. + * + * 2022/11/08 15:28:26 INFO hello count=3 + * + * For more control over the output format, create a logger with a different + * handler. This statement uses [New] to create a new logger with a TextHandler + * that writes structured records in text form to standard error: + * + * logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + * + * [TextHandler] output is a sequence of key=value pairs, easily and + * unambiguously parsed by machine. This statement: + * + * logger.Info('hello', 'count', 3) + * + * Produces this output: + * + * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 + * + * The package also provides [JSONHandler], whose output is line-delimited JSON: + * + * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) + * logger.Info("hello", "count", 3) + * + * Produces this output: + * + * { + * "time": "2022-11-08T15:28:26.000000000-05:00", + * "level": "INFO", + * "msg": "hello", + * "count": 3 + * } + * + * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. + * There are options for setting the minimum level (see Levels, below), + * displaying the source file and line of the log call, and modifying attributes + * before they are logged. + * + * Setting a logger as the default with + * + * slog.SetDefault(logger) + * + * Will cause the top-level functions like [Info] to use it. [SetDefault] also + * updates the default logger used by the [log] package, so that existing + * applications that use [log.Printf] and related functions will send log + * records to the logger's handler without needing to be rewritten. + * + * Some attributes are common to many log calls. For example, you may wish to + * include the URL or trace identifier of a server request with all log events + * arising from the request. Rather than repeat the attribute with every log + * call, you can use [Logger.With] to construct a new Logger containing the + * attributes: + * + * logger2 := logger.With("url", r.URL) + * + * The arguments to With are the same key-value pairs used in [Logger.Info]. The + * result is a new Logger with the same handler as the original, but additional + * attributes that will appear in the output of every call. + * + * # Levels + * + * A [Level] is an integer representing the importance or severity of a log + * event. The higher the level, the more severe the event. This package defines + * constants for the most common levels, but any int can be used as a level. + * + * In an application, you may wish to log messages only at a certain level or + * greater. One common configuration is to log messages at Info or higher + * levels, suppressing debug logging until it is needed. The built-in handlers + * can be configured with the minimum level to output by setting + * [HandlerOptions.Level]. The program's `main` function typically does this. + * The default value is LevelInfo. + * + * Setting the [HandlerOptions.Level] field to a [Level] value fixes the + * handler's minimum level throughout its lifetime. Setting it to a [LevelVar] + * allows the level to be varied dynamically. A LevelVar holds a Level and is + * safe to read or write from multiple goroutines. To vary the level dynamically + * for an entire program, first initialize a global LevelVar: + * + * var programLevel = new slog.LevelVar() // Info by default + * + * Then use the LevelVar to construct a handler, and make it the default: + * + * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) + * slog.SetDefault(slog.New(h)) + * + * Now the program can change its logging level with a single statement: + * + * programLevel.Set(slog.LevelDebug) + * + * # Groups + * + * Attributes can be collected into groups. A group has a name that is used to + * qualify the names of its attributes. How this qualification is displayed + * depends on the handler. [TextHandler] separates the group and attribute names + * with a dot. [JSONHandler] treats each group as a separate JSON object, with + * the group name as the key. + * + * Use [Group] to create a Group attribute from a name and a list of key-value + * pairs: + * + * slog.Group('request', 'method', r.Method, 'url', r.URL) + * + * TextHandler would display this group as + * + * request.method=GET request.url=http://example.com + * + * JSONHandler would display it as + * + * "request":{"method":"GET","url":"http://example.com"} + * + * Use [Logger.WithGroup] to qualify all of a Logger's output with a group name. + * Calling WithGroup on a Logger results in a new Logger with the same Handler + * as the original, but with all its attributes qualified by the group name. + * + * This can help prevent duplicate attribute keys in large systems, where + * subsystems might use the same keys. Pass each subsystem a different Logger + * with its own group name so that potential duplicates are qualified: + * + * logger := slog.Default().With("id", systemID) + * parserLogger := logger.WithGroup("parser") + * parseInput(input, parserLogger) + * + * When parseInput logs with parserLogger, its keys will be qualified with + * "parser", so even if it uses the common key "id", the log line will have + * distinct keys. + * + * # Contexts + * + * Some handlers may wish to include information from the [context.Context] that + * is available at the call site. One example of such information is the + * identifier for the current span when tracing is enabled. + * + * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first + * argument, as do their corresponding top-level functions. + * + * Although the convenience methods on Logger (Info and so on) and the + * corresponding top-level functions do not take a context, the alternatives + * ending in "Context" do. For example, + * + * slog.InfoContext(ctx, 'message') + * + * It is recommended to pass a context to an output method if one is available. + * + * # Attrs and Values + * + * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well + * as alternating keys and values. The statement + * + * slog.Info('hello', slog.Int('count', 3)) + * + * Behaves the same as + * + * slog.Info('hello', 'count', 3) + * + * There are convenience constructors for [Attr] such as [Int], [String], and + * [Bool] for common types, as well as the function [Any] for constructing Attrs + * of any type. + * + * The value part of an Attr is a type called [Value]. Like an [any], a Value + * can hold any Go value, but it can represent typical values, including all + * numbers and strings, without an allocation. + * + * For the most efficient log output, use [Logger.LogAttrs]. It is similar to + * [Logger.Log] but accepts only Attrs, not alternating keys and values; this + * allows it, too, to avoid allocation. + * + * The call + * + * logger.LogAttrs(ctx, slog.LevelInfo, 'hello', slog.Int('count', 3)) + * + * Is the most efficient way to achieve the same output as + * + * slog.Info('hello', 'count', 3) + * + * # Customizing a type's logging behavior + * + * If a type implements the [LogValuer] interface, the [Value] returned from its + * LogValue method is used for logging. You can use this to control how values + * of the type appear in logs. For example, you can redact secret information + * like passwords, or gather a struct's fields in a Group. See the examples + * under [LogValuer] for details. + * + * A LogValue method may return a Value that itself implements [LogValuer]. The + * [Value.Resolve] method handles these cases carefully, avoiding infinite loops + * and unbounded recursion. Handler authors and others may wish to use + * Value.Resolve instead of calling LogValue directly. + * + * # Wrapping output methods + * + * The logger functions use reflection over the call stack to find the file name + * and line number of the logging call within the application. This can produce + * incorrect source information for functions that wrap slog. For instance, if + * you define this function in file mylog.go: + * + * func Infof(format string, args ...any) { + * slog.Default().Info(fmt.Sprintf(format, args...)) + * } + * + * And you call it like this in main.go: + * + * Infof(slog.Default(), 'hello, %s', 'world') + * + * Then slog will report the source file as mylog.go, not main.go. + * + * A correct implementation of Infof will obtain the source location (pc) and + * pass it to NewRecord. The Infof function in the package-level example called + * "wrapping" demonstrates how to do this. + * + * # Working with Records + * + * Sometimes a Handler will need to modify a Record before passing it on to + * another Handler or backend. A Record contains a mixture of simple public + * fields (e.g. Time, Level, Message) and hidden fields that refer to state + * (such as attributes) indirectly. This means that modifying a simple copy of a + * Record (e.g. by calling [Record.Add] or [Record.AddAttrs] to add attributes) + * may have unexpected effects on the original. Before modifying a Record, use + * [Record.Clone] to create a copy that shares no state with the original, or + * create a new Record with [NewRecord] and build up its Attrs by traversing the + * old ones with [Record.Attrs]. + * + * # Performance considerations + * + * If profiling your application demonstrates that logging is taking significant + * time, the following suggestions may help. + * + * If many log lines have a common attribute, use [Logger.With] to create a + * Logger with that attribute. The built-in handlers will format that attribute + * only once, at the call to [Logger.With]. The [Handler] interface is designed + * to allow that optimization, and a well-written Handler should take advantage + * of it. + * + * The arguments to a log call are always evaluated, even if the log event is + * discarded. If possible, defer computation so that it happens only if the + * value is actually logged. For example, consider the call + * + * slog.Info('starting request', 'url', r.URL.String()) // may compute String unnecessarily + * + * The URL.String method will be called even if the logger discards Info-level + * events. Instead, pass the URL directly: + * + * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed + * + * The built-in [TextHandler] will call its String method, but only if the log + * event is enabled. Avoiding the call to String also preserves the structure of + * the underlying value. For example [JSONHandler] emits the components of the + * parsed URL as a JSON object. If you want to avoid eagerly paying the cost of + * the String call without causing the handler to potentially inspect the + * structure of the value, wrap the value in a fmt.Stringer implementation that + * hides its Marshal methods. + * + * You can also use the [LogValuer] interface to avoid unnecessary work in + * disabled log calls. Say you need to log some expensive value: + * + * slog.Debug('frobbing', 'value', computeExpensiveValue(arg)) + * + * Even if this line is disabled, computeExpensiveValue will be called. To avoid + * that, define a type implementing LogValuer: + * + * type expensive struct { arg int } + * + * func (e expensive) LogValue() slog.Value { + * return slog.AnyValue(computeExpensiveValue(e.arg)) + * } + * + * Then use a value of that type in log calls: + * + * slog.Debug("frobbing", "value", expensive{arg}) + * + * Now computeExpensiveValue will only be called when the line is enabled. + * + * The built-in handlers acquire a lock before calling [io.Writer.Write] to + * ensure that each record is written in one piece. User-defined handlers are + * responsible for their own locking. + * + * # Writing a handler + * + * For a guide to writing a custom handler, see + * https://golang.org/s/slog-handler-guide. + */ +namespace slog { + // @ts-ignore + import loginternal = internal + /** Kind is the kind of a Value. */ + interface Kind extends Number {} + interface Kind { + string(): string + } + /** + * A LogValuer is any Go value that can convert itself into a Value for + * logging. + * + * This mechanism may be used to defer expensive operations until they are + * needed, or to expand a single value into a sequence of components. + */ + interface LogValuer { + [key: string]: any + logValue(): Value + } +} diff --git a/src/services/InstanceService/index.ts b/src/services/InstanceService/index.ts index 11fac426..22e728c1 100644 --- a/src/services/InstanceService/index.ts +++ b/src/services/InstanceService/index.ts @@ -3,7 +3,6 @@ import { EDGE_APEX_DOMAIN, INSTANCE_APP_HOOK_DIR, INSTANCE_APP_MIGRATIONS_DIR, - INSTANCE_DATA_DB, mkAppUrl, mkContainerHomePath, mkDocUrl, @@ -16,7 +15,7 @@ import { PocketbaseService, PortService, proxyService, - SqliteService, + SpawnConfig, } from '$services' import { assertTruthy, @@ -34,6 +33,7 @@ import { asyncExitHook, mkInternalUrl, now } from '$util' import { flatten, map, values } from '@s-libs/micro-dash' import Bottleneck from 'bottleneck' import { globSync } from 'glob' +import stringify from 'json-stringify-safe' import { basename, join } from 'path' import { ClientResponseError } from 'pocketbase' import { AsyncReturnType } from 'type-fest' @@ -248,6 +248,34 @@ export const instanceService = mkSingleton( }) healthyGuard() + /** Create spawn config */ + const spawnArgs: SpawnConfig = { + subdomain: instance.subdomain, + instanceId: instance.id, + port: newPort, + dev: instance.dev, + extraBinds: flatten([ + globSync(join(INSTANCE_APP_MIGRATIONS_DIR(), '*.js')).map( + (file) => + `${file}:${mkContainerHomePath( + `pb_migrations/${basename(file)}`, + )}:ro`, + ), + globSync(join(INSTANCE_APP_HOOK_DIR(), '*.js')).map( + (file) => + `${file}:${mkContainerHomePath( + `pb_hooks/${basename(file)}`, + )}:ro`, + ), + ]), + env: { + ...instance.secrets, + PH_APP_NAME: instance.subdomain, + PH_INSTANCE_URL: mkEdgeUrl(instance.subdomain), + }, + version, + } + /** Sync admin account */ if (instance.syncAdmin) { const id = instance.uid @@ -255,36 +283,11 @@ export const instanceService = mkSingleton( const { email, tokenKey, passwordHash } = await client.getUserTokenInfo({ id }) dbg(`Token info is`, { email, tokenKey, passwordHash }) - const sqliteService = await SqliteService() - const db = await sqliteService.getDatabase( - INSTANCE_DATA_DB(instance.id), - ) - userInstanceLogger.info(`Syncing admin login`) - try { - // First, try upserting - await db(`_admins`) - .insert({ id, email, tokenKey, passwordHash }) - .onConflict('id') - .merge({ email, tokenKey, passwordHash }) - - userInstanceLogger.info(`${email} has been successfully sync'd`) - } catch (e) { - // Upsert could fail if the email exists under a different ID - // If that happens, it means they created an admin account with the same email - // manually. In that case, just update the pw hash - try { - userInstanceLogger.info(`Got an error on admin sync upsert. ${e}`) - userInstanceLogger.info( - `${email} may already exist under a different ID, trying to update instead`, - ) - await db(`_admins`) - .update({ tokenKey, passwordHash }) - .where({ email }) - userInstanceLogger.info(`${email} has been successfully sync'd`) - } catch (e) { - userInstanceLogger.error(`Failed to sync admin account: ${e}`) - } - } + spawnArgs.env!.ADMIN_SYNC = stringify({ + email, + tokenKey, + passwordHash, + }) } /* @@ -292,32 +295,7 @@ export const instanceService = mkSingleton( */ const childProcess = await (async () => { try { - const cp = await pbService.spawn({ - subdomain: instance.subdomain, - instanceId: instance.id, - port: newPort, - dev: instance.dev, - extraBinds: flatten([ - globSync(join(INSTANCE_APP_MIGRATIONS_DIR(), '*.js')).map( - (file) => - `${file}:${mkContainerHomePath( - `pb_migrations/${basename(file)}`, - )}:ro`, - ), - globSync(join(INSTANCE_APP_HOOK_DIR(), '*.js')).map( - (file) => - `${file}:${mkContainerHomePath( - `pb_hooks/${basename(file)}`, - )}:ro`, - ), - ]), - env: { - ...instance.secrets, - PH_APP_NAME: instance.subdomain, - PH_INSTANCE_URL: mkEdgeUrl(instance.subdomain), - }, - version, - }) + const cp = await pbService.spawn(spawnArgs) return cp } catch (e) { diff --git a/tsconfig.json b/tsconfig.json index 5a3e05b9..bd0e2bae 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -24,6 +24,5 @@ "$shared": ["src/shared"] } }, - "include": ["./src"], - "exclude": ["src/instance-app"] + "include": ["./src"] }