Compare commits

..

No commits in common. "ec6a0d81a98cbbcff71453ebf9d4f2499e46bdcb" and "60db12abe18694f84930d4886415fbced5705185" have entirely different histories.

41 changed files with 594 additions and 2001 deletions

View file

@ -1,5 +0,0 @@
# .resources
this directory contains internal "template" files, copied by HTTP.sh during first run/init.
editing them directly here won't do much :p

View file

@ -1,10 +0,0 @@
## app config
## your application-specific config goes here!
# worker_add example 5
cfg[enable_multipart]=false # by default, uploading files is disabled
if [[ "$run_once" ]]; then
# the following will only run once at startup, not with every request
:
fi

View file

@ -1,15 +0,0 @@
#!/usr/bin/env bash
source templates/head.sh
echo "<h1>Hello from HTTP.sh!</h1><br>To get started with your app, check out $(pwd)/${cfg[namespace]}/
<ul><li>$(pwd)/${cfg[namespace]}/${cfg[root]} - your (public) files go here</li>
<li>$(pwd)/${cfg[namespace]}/workers/ - worker directory, with an example one ready to go</li>
<li>$(pwd)/${cfg[namespace]}/views/ - individual views can be stored there, to be later referenced by routes.sh</li>
<li>$(pwd)/${cfg[namespace]}/templates/ - template files live over there</li>
<li>$(pwd)/${cfg[namespace]}/config.sh - config for everything specific to your app AND workers</li>
<li>$(pwd)/${cfg[namespace]}/routes.sh - config for the HTTP.sh router</li></ul>
Fun things outside of the app directory:
<ul><li>$(pwd)/config/master.sh - master server config</li>
<li>$(pwd)/config/<hostname> - config loaded if a request is made to a specific hostname</li>
<li>$(pwd)/storage/ - directory for storing all and any data your app may produce</li>
<li>$(pwd)/secret/ - user accounts and other secret tokens live here</li>
<li>$(pwd)/src/ - HTTP.sh src, feel free to poke around ;P</li></ul>"

View file

@ -1,2 +0,0 @@
#!/usr/bin/env bash
date

View file

@ -1,14 +0,0 @@
## routes - application-specific routes
##
## HTTP.sh supports both serving files using a directory structure (webroot),
## and using routes. The latter may come in handy if you want to create nicer
## paths, e.g.
##
## (webroot) https://example.com/profile.shs?name=asdf
## ... may become ...
## (routes) https://example.com/profile/asdf
##
## To set up routes, define rules in this file (see below for examples)
# router "/test" "app/views/test.shs"
# router "/profile/:user" "app/views/user.shs"

View file

@ -1,14 +1,15 @@
FROM alpine:3.21 FROM alpine:3.14
RUN apk upgrade -U && apk add bash sed grep nmap-ncat socat file findutils jq curl argon2 RUN apk update \
&& apk add sed xxd grep findutils file nmap-ncat socat jq bash file curl
WORKDIR /app WORKDIR /httpsh
COPY . . COPY . .
EXPOSE 1337 EXPOSE 1337
VOLUME /app/app VOLUME /httpsh/config
VOLUME /app/config VOLUME /httpsh/app
VOLUME /app/storage VOLUME /httpsh/storage
VOLUME /app/secret VOLUME /httpsh/secret
CMD ["/app/http.sh"] CMD ["/httpsh/http.sh"]

View file

@ -1,11 +1,157 @@
Copyright 2020-2024, sdomi et al. ### GNU LESSER GENERAL PUBLIC LICENSE
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Version 3, 29 June 2007
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Copyright (C) 2007 Free Software Foundation, Inc.
<https://fsf.org/>
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. This version of the GNU Lesser General Public License incorporates the
terms and conditions of version 3 of the GNU General Public License,
supplemented by the additional permissions listed below.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #### 0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the
GNU General Public License.
"The Library" refers to a covered work governed by this License, other
than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
#### 1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
#### 2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
- a) under this License, provided that you make a good faith effort
to ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
- b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
#### 3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from a
header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
- a) Give prominent notice with each copy of the object code that
the Library is used in it and that the Library and its use are
covered by this License.
- b) Accompany the object code with a copy of the GNU GPL and this
license document.
#### 4. Combined Works.
You may convey a Combined Work under terms of your choice that, taken
together, effectively do not restrict modification of the portions of
the Library contained in the Combined Work and reverse engineering for
debugging such modifications, if you also do each of the following:
- a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
- b) Accompany the Combined Work with a copy of the GNU GPL and this
license document.
- c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
- d) Do one of the following:
- 0) Convey the Minimal Corresponding Source under the terms of
this License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
- 1) Use a suitable shared library mechanism for linking with
the Library. A suitable mechanism is one that (a) uses at run
time a copy of the Library already present on the user's
computer system, and (b) will operate properly with a modified
version of the Library that is interface-compatible with the
Linked Version.
- e) Provide Installation Information, but only if you would
otherwise be required to provide such information under section 6
of the GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the Application
with a modified version of the Linked Version. (If you use option
4d0, the Installation Information must accompany the Minimal
Corresponding Source and Corresponding Application Code. If you
use option 4d1, you must provide the Installation Information in
the manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.)
#### 5. Combined Libraries.
You may place library facilities that are a work based on the Library
side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
- a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities, conveyed under the terms of this License.
- b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
#### 6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
as you received it specifies that a certain numbered version of the
GNU Lesser General Public License "or any later version" applies to
it, you have the option of following the terms and conditions either
of that published version or of any later version published by the
Free Software Foundation. If the Library as you received it does not
specify a version number of the GNU Lesser General Public License, you
may choose any version of the GNU Lesser General Public License ever
published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

View file

@ -1,43 +1,68 @@
# HTTP.sh # HTTP.sh
Node.js, but `| sed 's/Node/HTTP/;s/js/sh/'`.
the *coolest* web framework (in Bash) to date. HTTP.sh is (by far) the most extensible attempt at creating a web framework in Bash, and (AFAIK) the only one that's actively maintained. Although I strive for code quality, this is still rather experimental and may contain bugs.
We now have an IRC channel! Join #http.sh @ irc.libera.chat Originally made for Junction Stupidhack 2020; Created by [sdomi](https://sakamoto.pl/), [ptrcnull](https://ptrcnull.me/) and [selfisekai](https://selfisekai.rocks/).
## Documentation ## Quick Start
We have some guides and general documentation in the [docs](docs/) directory. Among them: If you want to build a new webapp from scratch:
- A [quick start](docs/quick-start.md) guide ```
- General [directory structure](docs/directory-structure.md) ./http.sh init
- [CLI usage](docs/running.md) ./http.sh
- [Tests](docs/tests.md) ```
- [HTTP Router](docs/router.md)
- [List of security fixes](docs/sec-fixes/) If you're setting up HTTP.sh for an existing application:
```
git clone https://git.sakamoto.pl/laudom/ocw/ app # example repo :P
./http.sh
```
We also support Docker! Both a Dockerfile and an example docker-compose.yml are included for your convenience. Containerizing your webapp is as easy as `docker-compose up -d`
## Dependencies ## Dependencies
Absolutely necessary: - Bash (4.x should work, but we'll need 5.0 soon)
- [Ncat](https://nmap.org/ncat), not openbsd-nc, not netcat, not nc
- Bash (5.x, not interested in backwards compat) - socat (because the above is slightly broken)
- either [Ncat](https://nmap.org/ncat) (not openbsd-nc, not netcat, not nc) or socat, or a combo of both - pkill
- GNU grep/sed - mktemp
- jq (probably not needed just yet, but it will be in 1.0)
Full list of dependencies: [required](src/dependencies.required), [optional](src/dependencies.optional). - dd (for accounts, multipart/form-data and websockets)
- sha1sum, sha256sum, base64 (for accounts and simple auth)
- curl (for some demos)
## Known faults ## Known faults
- if ncat fails to bind to `[::]`, change the bind to `127.0.0.1` or `0` in `config/master.sh` - can't change the HTTP status code from Shell Server scripts. This could theoretically be done with custom vhost configs and some `if` statements, but this would be a rather nasty solution to that problem.
- `$post_multipart` doesn't keep original names - could be fixed by parsing individual headers from the multipart request instead of skipping them all - `$post_multipart` doesn't keep original names - could be fixed by parsing individual headers from the multipart request instead of skipping them all
- websocket impl isn't properly finished - it won't ever throw a 500, thus it fails silently
- fails with an empty response, instead of throwing 400/500
## Directory structure
- ${cfg[namespace]} (`app` by default)
- ${cfg[root]} (`webroot` by default) - public application root
- workers/ - scripts that execute periodically live there (see examples)
- views/ - for use with HTTP.sh router
- config.sh - application-level config file
- config
- master.sh - main server config file - loaded on boot and with every request
- host:port - if a file matching the Host header is found, HTTP.sh will load it request-wide
- src
- server source files and modules
- response
- files corresponding to specific HTTP status codes
- listing.sh (code 210) is actually HTTP 200, but triggered in a directory with autoindex turned on and without a valid `index.shs` file
- templates - section templates go here
- secret - users, passwords and other Seecret data should be stored here
- storage - random data storage for your webapp
## Variables that we think are cool! ## Variables that we think are cool!
![](https://f.sakamoto.pl/d6584c01-1c48-42b9-935b-d9a89af4e071file_101.jpg) ![](https://f.sakamoto.pl/d6584c01-1c48-42b9-935b-d9a89af4e071file_101.jpg)
(this data may be slightly outdated. Full docs TODO.)
- get_data - holds data from GET parameters - get_data - holds data from GET parameters
- /?test=asdf -> `${get_data[test]}` == `"asdf"` - /?test=asdf -> `${get_data[test]}` == `"asdf"`
- params - holds parsed data from URL router - params - holds parsed data from URL router

1
config/localhost Normal file
View file

@ -0,0 +1 @@
cfg[title]='Laura is cute :3'

View file

@ -1,6 +1,6 @@
declare -A cfg declare -A cfg
cfg[ip]=[::] # IP address to bind to - use [::] to bind to all cfg[ip]=127.0.0.1 # IP address to bind to - use 0.0.0.0 to bind to all
cfg[http]=true # enables/disables listening on HTTP cfg[http]=true # enables/disables listening on HTTP
cfg[port]=1337 # HTTP port cfg[port]=1337 # HTTP port
@ -13,7 +13,7 @@ cfg[index]='index.shs'
cfg[autoindex]=true cfg[autoindex]=true
cfg[auth_required]=false cfg[auth_required]=false
cfg[auth_realm]="asdf" cfg[auth_realm]="Laura is cute <3"
cfg[ssl]=false # enables/disables listening on HTTPS cfg[ssl]=false # enables/disables listening on HTTPS
cfg[ssl_port]=8443 cfg[ssl_port]=8443
@ -21,25 +21,22 @@ cfg[ssl_cert]=''
cfg[ssl_key]='' cfg[ssl_key]=''
cfg[extension]='shs' cfg[extension]='shs'
#cfg[encoding]='UTF-8' # UTF-8 by default, used by iconv cfg[extra_headers]='server: HTTP.sh/0.95 (devel)'
cfg[extra_headers]="server: HTTP.sh/$HTTPSH_VERSION (devel)"
cfg[title]="HTTP.sh $HTTPSH_VERSION" cfg[title]='HTTP.sh 0.95'
cfg[php_enabled]=false # enable PHP script evalutaion (requires PHP)
cfg[python_enabled]=false # enable Python script evalutaion (requires Python)
cfg[log]='log' # filename cfg[log]='log' # filename
# proxy functionality is very WiP
cfg[proxy]=false
cfg[proxy_url]='http://example.com/'
# mail handler config # mail handler config
cfg[mail]="" cfg[mail]=""
cfg[mail_server]="" cfg[mail_server]=""
cfg[mail_password]="" cfg[mail_password]=""
cfg[mail_ssl]=true cfg[mail_ssl]=true
cfg[mail_ignore_bad_cert]=false cfg[mail_ignore_bad_cert]=false
# unset for legacy sha256sum hashing (not recommended)
cfg[hash]="argon2id"
cfg[cookie_path]="/"
# should registering automatically login the user?
# useful for flows involving a confirmation e-mail
cfg[register_should_login]=true

View file

@ -1,49 +0,0 @@
# File / Directory structure
(alphabetical order; state for 2024-08-05)
- `config` contains per-vhost configuration settings. `config/master.sh` gets loaded by default,
`config/<hostname>[:port]` gets loaded based on the `Host` header.
- `docs` is what you're reading now. Hi!!
- `secret` is where user data gets stored. think: user accounts and sessions.
- `src` contains the majority of HTTP.sh's code.
- `response/*` are files executed based on computed return code. `response/200.sh` is a bit
special, because it handles the general "success" path. Refactor pending.
- `account.sh` is the middleware for user account management
- `dependencies.*` store the list of required and optional deps, newline delimetered
- `mail.sh` has some crude SMTP code, for sending out mails
- `mime.sh` contains a glue function for handling special cases where `file` command doesn't
return the proper mimetype
- `misc.sh` consists of functions that didn't really fit anywhere else. Of note, `html_encode`,
`url_encode`, `url_decode`, `header` and various cookie functions all live there for now.
- `notORM.sh` is, as I said, not an [ORM](https://en.wikipedia.org/wiki/Object%E2%80%93relational_mapping)
- `route.sh` defines a small function for handling adding the routes
- `server.sh` is where most of the demons live
- `template.sh` is where the rest of the demons live
- `worker.sh` is the literal embodiment of "we have cron at home"; workers are just background
jobs that run every n minutes; you can also start and stop them on will! *fancy*
- `ws.sh` is an incomplete WebSocket implementation
- `storage` is like `secret`, but you can generally use it for whatever
- `templates` will be moved/removed soon (`head.sh` has *nothing* to do with the current templating
system; it has some handlers for remaking things you put into `meta[]` array into HTML `<head>`
fields. Should not be used, at least not in its current form.)
- `tests` is where all the tests live!
The actually important files are:
- `http.sh` - run this and see what happens
- `tst.sh` - [the test suite](tests.md)
## suggested skeleton structure in `app/`
FYI: this is merely a suggestion. `./http.sh init` will create some of those directories for you,
but it's fine to move things around. A lot of it can be changed within `config/master.sh`, even the
directory name itself!
- `src` for various backend code
- `templates` for HTML in our special templating language
- `views` for individual pages / endpoints
- `webroot` for static files, or .shs scripts that don't use the router
- `config.sh` has some general, always-included stuff
- `routes.sh` configures the router; entries should point into `views/`
- `localcfg.sh` may be sourced from `config.sh` and contain only local config (useful for developing
stuff with others through git, for instance; `localcfg.sh` should then be added to `.gitignore`)

View file

@ -1,101 +0,0 @@
# HTTP.sh: quick start
Welcome to the exciting world of Bash witchery! I'll be your guide on this webdev adventure today.
## about HTTP.sh
HTTP.sh is a very extensive web framework. I use it for quick and dirty hacks, and I "designed" it
in a way where you don't need to write a lot of code to do some basic stuff. I'm also gradually
adding middleware that helps you do more advanced stuff. With some regards, there are already
multiple ways one could implement a web app in HTTP.sh; Thus, I feel like I need this to be heard:
**There are no bad ways to write code here.** You can still write *bad code*, but this is a safe
space where nobody shall tell you "Y is garbage, you should use X instead!";
This strongly applies to specific features of the framework: You can use the templating engine, or
you can just `echo` a bunch of stuff directly from your script. You can use the URL router, or you
could just name your scripts under the webroot in a fancy way. **As long as it works, it's good :3**
## Getting started
First, clone the repository. I'm sure you know how to do that; Afterwards, try running:
```
./http.sh init
./http.sh
```
`init` will lay out some directories, and running it w/o any params will just start the server.
If you're missing any dependencies, you should now see a list of them.
By default, http.sh starts on port 1337; Try going to http://localhost:1337/ - if you see a welcome
page, it's working!!
We have a "debug mode" under `./http.sh debug`. Check [running.md](running.md) for more options.
## Basic scripting
By default, your application lives in `app/`. See [directory-structure.md](directory-structure.md)
for more info on what goes where. For now, go into `app/webroot/` and remove `index.shs`. That
should bring you to an empty directory listing; Static files can be put as-is into `app/webroot/`
and they'll be visible within the directory!
To create a script, make a new file with `.shs` extension, and start writing a script like normal.
All of your `stdout` (aka: everything you `echo`) goes directly to the output. Everything sent to
`stderr` will be shown in the `./http.sh debug` output.
## Parameters
There are a few ways of receiving input; The most basic ones are `get_data` and `post_data`, which
are associative arrays that handle GET params and POST (body) params, respectively. Consider the
following example:
```
#!/bin/bash
echo '<html><head><meta charset="utf-8"></head><body>'
if [[ ! "${get_data[example]}" ]]; then
echo '<form>
<input type="text" name="example">
<input type="submit">
</form>'
else
echo "<p>you sent: $(html_encode "${get_data[example]}")</p>"
fi
echo '</body></html>'
```
When opened in a browser, this example looks like so:
![screenshot of a simple web page. there's a text box, and a button saying Submit Query](https://f.sakamoto.pl/IwIalnWw.png)
... and after submitting data, it looks like that:
![screenshot of another page. it says "you sent: meow!"](https://f.sakamoto.pl/IwIy0thg.png)
## Security
Remember to use sufficient quotes in your scripts, and escape untrusted data (read: ALL data you
didn't write/create yourself. This is especially important when parameter splitting may occur;
For instance, consider:
```
rm storage/${get_data[file]}
```
vs
```
rm -- "storage/$(basename "${get_data[file]}")"
```
The first one can fail due to:
- spaces (if `?file=a+b+c+d`, then it will remove `storage/a`, `b`, `c` and `d`). Hence, you get
arbitrary file deletion.
- unescaped filename (param containing `../` leads to path traversal)
- unterminated parameter expansion (`--` in `rm --` terminates switches; after this point, only
file names can occur)
Furthermore, if you're displaying user-controlled data in your app, remember to use `html_encode`
to prevent cross-site scripting attacks.

View file

@ -1,36 +0,0 @@
# HTTP.sh: URL router
After running `./http.sh init`, your `app` directory should include a file called `routes.sh` - this
is where you define custom routes. The syntax is as follows:
```
router "/uri/path" "${cfg[namespace]}/views/file.shs"
```
This can be used to remap files that are already in `webroot`, but to prevent confusion, it is
recommended to make a separate directory for routed files. In other HTTP.sh projects, it's usually
`views`.
The router also can be used to pass parameters:
```
router "/user/:username" "${cfg[namespace]}/views/profile.shs"
router "/user/:username/:postid" "${cfg[namespace]}/views/post.shs"
```
All router parameters are available at runtime through `${params[]}` associative array.
A sample `profile.shs` could look like this:
```
#!/bin/bash
echo "$(html_encode "${params[username]}")'s profile"
```
## Limitations
- The param name can only contain the following characters: `[A-Za-z0-9]`
- Currently, the param itself can only contain the following characters: `[A-Za-z0-9.,%:\\-_]`;
Otherwise, the route won't match, and you'll likely get a 404. Support for other special chars
will be added somewhere down the line.
- Router takes precedence over normal file matching; This could allow one to override a file.

View file

@ -1,11 +0,0 @@
# Running http.sh
## cli args
The arg parsing is a bit rudimentary atm. Assume only one option supported per invocation.
- `init` creates an app skeleton and writes example config. Optional second parameter sets the
namespace (app directory) name.
- `debug` shows stderr (useful for debugging)
- `debuggier` shows stderr and calltrace
- `shell` drops you into an environment practically equivalent to the runtime

View file

@ -1,9 +0,0 @@
# 2024-12-15 Possible pattern injection in notORM
Prior to commit a00b1b00ee64215dfdd575cf3c51e2f7c387761f, notORM was vulnerable to a pattern
injection attack, which could potentially lead to privilege escalation through the account system.
The vulnerability arose due to an inconsistency with how certain versions of sed handle escaped
hex characters (`\xNN`). GNU sed expands the escaped characters and treats them as a raw part of
the pattern as long as Extended Regex (`-E`) mode is used. This behavior is not present within
busybox sed, which is why it hasn't been caught before.

View file

@ -1,169 +0,0 @@
# the test framework
We have a small test harness! It lives in `./tst.sh` in the root of the HTTP.sh repo. It's inspired
by some init systems, and a bit influenced by how APKBUILD/PKGBUILDs are structured. A very basic
test is attached below:
```
tst() {
return 0
}
```
A `tst()` function is all you need in a test. Running the test can be done like so:
```
$ ./tst.sh tests/example.sh
OK: tests/example.sh
Testing done!
OK: 1
FAIL: 0
```
If running multiple tests is desired, I recommend calling `./tst.sh tests/*`, and prepending the
filenames with numbers to make sure they run in the correct sequence.
You can also contain multiple tests in a file by grouping them into a function, and then adding the
function names to an array:
```
a() {
tst() {
return 0
}
}
b() {
tst() {
return 1
}
}
subtest_list=(
a
b
)
```
This will yield the following result *(output subject to change)*:
```
--- tests/example.sh ---
OK: a
FAIL: b
(res: )
Testing done!
OK: 1
FAIL: 1
```
Of note: `tst.sh` is designed in a way where *most* functions will fall through; If you'd like to
run the same test against a different set of checks (see below) then you *don't* need to redefine
the `tst()` function, just changing the checks is enough.
---
## return codes
The following return codes are defined:
- 0 as success
- 1 as error (test execution continues)
- 255 as fatal error (cleans up and exits immediately)
## determining success / failure
Besides very simple return-code based matching, `tst.sh` also supports stdout matching with the
following variables:
- `match` (matches the whole string)
- `match_sub` (matches a substring)
- `match_begin` (matches the beginning)
- `match_end` (matches the end)
- `match_not` (inverse substring match)
If any of those are defined, all except fatal return codes are ignored. If more than one of those
is defined, it checks the list above top-to-bottom and picks the first one that is set, ignoring
all others.
## special functions
The framework defines two special functions, plus a few callbacks that can be overriden:
### prepare
`prepare` runs **once** after definition, right before the test itself. As of now, it's the only
function that gets cleaned up after each run (by design; see section `statefullness` below)
By default (undefined state), `prepare` does nothing.
```
prepare() {
echo 'echo meow' > app/webroot/test.shs
}
tst() {
curl localhost:1337/test.shs
}
match="meow"
```
*(note: this test requires tst.sh to be used with http.sh, and for http.sh to be running)*
### cleanup
`cleanup` runs after every test. The name should be self-explanatory. Define as `cleanup() { :; }`
to disable behavior from previous tests.
By default (undefined state), `cleanup` does nothing.
```
prepare() {
echo 'echo meow' > app/webroot/test.shs
}
tst() {
curl localhost:1337/test.shs
}
cleanup() {
rm app/webroot/test.shs
}
match="meow"
```
*(note: same thing as above)*
### on_success, on_error, on_fatal
Called on every success, failure and fatal error. First two call `on_{success,error}_default`,
which increments the counter and outputs the OK/FAIL message. The third one just logs the FATAL,
cleans up and exits. Overloading `on_fatal` is not recommended; While overloading the other two,
make sure to add a call to the `_default` function, or handle the numbers gracefully by yourself.
## statefullness
This framework is designed in a way where a lot of the state is inherited from previous tests. This
is by-design, to make sure that there's less repetition in the tests themselves. It is up to the
author of the tests to remember about cleaning up variables and other state that could affect any
further tests in the chain.
Currently, state is cleaned up under the following circumstances:
- all `match` variables get cleaned up after every test
- `prepare()` function is reset after every test (so, each definition of `prepare` will run
exactly *once*)
- upon switching files, `tst()` and `cleanup()` get reset to initial values. Of note, those two
**do** get inherited between subtests in a single file!
- upon termination of the test harness, it tries to kill all child processes
The following state **is not** cleaned up:
- `tst()` and `cleanup()` between subtests in a single file
- `on_error()`, `on_success()` functions
- any global user-defined variables, also between files
- any started processes
- any modified files (we don't have a way to track those atm, although I may look into this)

211
http.sh
View file

@ -1,101 +1,148 @@
#!/usr/bin/env bash #!/usr/bin/env bash
trap ctrl_c INT trap ctrl_c INT
ctrl_c() {
if [[ ! -f "config/master.sh" ]]; then
mkdir -p config
cat <<EOF > "config/master.sh"
declare -A cfg
cfg[ip]=0.0.0.0 # IP address to bind to - use 0.0.0.0 to bind to all
cfg[http]=true # enables/disables listening on HTTP
cfg[port]=1337 # HTTP port
cfg[socat_only]=false
cfg[namespace]='app'
cfg[root]='webroot/'
cfg[index]='index.shs'
cfg[autoindex]=true
cfg[auth_required]=false
cfg[auth_realm]="asdf"
cfg[ssl]=false # enables/disables listening on HTTPS
cfg[ssl_port]=8443
cfg[ssl_cert]=''
cfg[ssl_key]=''
cfg[extension]='shs'
cfg[extra_headers]='server: HTTP.sh/0.95 (devel)'
cfg[title]='HTTP.sh 0.95'
cfg[php_enabled]=false # enable PHP script evalutaion (requires PHP)
cfg[python_enabled]=false # enable Python script evalutaion (requires Python)
cfg[log]='log' # filename
cfg[proxy]=false # you probably want to configure this per-url
cfg[proxy_url]='' # regexp matching valid URLs to proxy
cfg[proxy_param]='url' # /proxy?url=...
# mail handler config
cfg[mail]=""
cfg[mail_server]=""
cfg[mail_password]=""
cfg[mail_ssl]=true
cfg[mail_ignore_bad_cert]=false
EOF
fi
source config/master.sh
function ctrl_c() {
[[ $socket != '' ]] && rm $socket [[ $socket != '' ]] && rm $socket
pkill -P $$ pkill -P $$
echo -e "Cleaned up, exitting.\nHave an awesome day!!" echo -e "Cleaned up, exitting.\nHave an awesome day!!"
} }
setup_config() { if [[ ! -f "$(pwd)/http.sh" ]]; then
[[ ! "$1" ]] && namespace=app || namespace="$1" echo -e "Please run HTTP.sh inside it's designated directory\nRunning the script from arbitrary locations isn't supported."
mkdir -p config
cp ".resources/primary_config.sh" "config/master.sh"
echo "cfg[namespace]=$namespace # default namespace" >> "config/master.sh"
echo "cfg[init_version]=$HTTPSH_VERSION" >> "config/master.sh"
}
if [[ ! -f "$PWD/http.sh" ]]; then
echo -e "Please run HTTP.sh inside its designated directory\nRunning the script from arbitrary locations isn't supported."
exit 1 exit 1
fi
source src/version.sh
if [[ "$1" == "init" ]]; then # will get replaced with proper parameter parsing in 1.0
[[ ! "$2" ]] && namespace=app || namespace="$2"
if [[ ! -f "config/master.sh" ]]; then
setup_config
elif [[ -d "$namespace" ]]; then
echo -e "ERR: HTTP.sh has been initialized before.\nSpecify a new namespace directory, or perish (remove '$namespace'?)"
exit 1
else
echo "WARN: HTTP.sh has been initialized before. Continuing w/o recreating config."
fi
source config/master.sh
mkdir -p "${cfg[namespace]}/${cfg[root]}" "${cfg[namespace]}/workers/example" "${cfg[namespace]}/views" "${cfg[namespace]}/templates"
touch "${cfg[namespace]}/config.sh" "${cfg[namespace]}/workers/example/control"
cp ".resources/config.sh" "${cfg[namespace]}/config.sh"
cp ".resources/routes.sh" "${cfg[namespace]}/routes.sh"
cp .resources/example_worker/* "${cfg[namespace]}/workers/example/"
cp .resources/example_webroot/* "${cfg[namespace]}/${cfg[root]}/index.shs"
echo -e "Success..?\nTry running \`./http.sh\` now"
exit 0
elif [[ ! -f "config/master.sh" ]]; then
if [[ -d "app" ]]; then # if the de-facto default app dir already exists, copy the cfg
setup_config
else
echo "ERR: Initialize HTTP.sh first! run './http.sh init'"
exit 1
fi
fi
source config/master.sh
if [[ "$HTTPSH_VERSION" != "${cfg[init_version]}" ]]; then
echo "WARN: HTTP.sh was updated since this instance was initialized (config v${cfg[init_version]:-(none)}, runtime v$HTTPSH_VERSION). There may be breaking changes. Edit cfg[init_version] in config/master.sh to remove this warning."
fi fi
while read i; do
if ! which $i > /dev/null 2>&1; then for i in $(cat src/dependencies.required); do
which $i > /dev/null 2>&1
if [[ $? != 0 ]]; then
echo "ERROR: can't find $i" echo "ERROR: can't find $i"
error=true error=true
fi fi
done < src/dependencies.required done
for i in $(cat src/dependencies.optional); do
while read i; do
which $i > /dev/null 2>&1 which $i > /dev/null 2>&1
[[ $? != 0 ]] && echo "WARNING: can't find $i" [[ $? != 0 ]] && echo "WARNING: can't find $i"
done < src/dependencies.optional done
if ! which ncat > /dev/null 2>&1; then which ncat > /dev/null 2>&1
if [[ $? != 0 ]]; then
if [[ ${cfg[socat_only]} != true ]]; then if [[ ${cfg[socat_only]} != true ]]; then
echo "ERR: can't find ncat, and cfg[socat_only] is not set to true" echo "ERROR: can't find ncat, and cfg[socat_only] is not set to true"
error=true error=true
fi fi
fi fi
if [[ $error == true ]]; then if [[ $error == true ]]; then
echo "Fix above dependencies, and I might just let you pass." echo "Fix above dependencies, and I might just let you pass."
exit 1 exit 0
fi fi
if [[ "$1" == 'shell' ]]; then if [[ $1 == "init" ]]; then # will get replaced with proper parameter parsing in 1.0
bash --rcfile <(echo ' #set -e
shopt -s extglob
x() { declare -p data;} # for notORM mkdir -p "${cfg[namespace]}/${cfg[root]}" "${cfg[namespace]}/workers/example" "${cfg[namespace]}/views" "${cfg[namespace]}/templates"
source config/master.sh touch "${cfg[namespace]}/config.sh" "${cfg[namespace]}/workers/example/control"
source src/account.sh cat <<EOF > "${cfg[namespace]}/config.sh"
source src/mail.sh ## app config
source src/mime.sh ## your application-specific config goes here!
source src/misc.sh
source src/notORM.sh # worker_add example 5
source src/template.sh cfg[enable_multipart]=false # by default, uploading files is disabled
source "${cfg[namespace]}/config.sh" EOF
PS1="[HTTP.sh] \[\033[01;34m\]\w\[\033[00m\]\$ "')
cat <<EOF > "${cfg[namespace]}/workers/example/worker.sh"
#!/usr/bin/env bash
date
EOF
cat <<EOF > "${cfg[namespace]}/${cfg[root]}/index.shs"
#!/usr/bin/env bash
source templates/head.sh
echo "<h1>Hello from HTTP.sh!</h1><br>To get started with your app, check out $(pwd)/${cfg[namespace]}/
<ul><li>$(pwd)/${cfg[namespace]}/${cfg[root]} - your (public) files go here</li>
<li>$(pwd)/${cfg[namespace]}/workers/ - worker directory, with an example one ready to go</li>
<li>$(pwd)/${cfg[namespace]}/views/ - individual views can be stored there, to be later referenced by routes.sh</li>
<li>$(pwd)/${cfg[namespace]}/templates/ - template files (.t) live over there</li>
<li>$(pwd)/${cfg[namespace]}/config.sh - config for everything specific to your app AND workers</li>
<li>$(pwd)/${cfg[namespace]}/routes.sh - config for the HTTP.sh router</li></ul>
Fun things outside of the app directory:
<ul><li>$(pwd)/config/master.sh - master server config</li>
<li>$(pwd)/config/<hostname> - config loaded if a request is made to a specific hostname</li>
<li>$(pwd)/storage/ - directory for storing all and any data your app may produce</li>
<li>$(pwd)/secret/ - user accounts and other secret tokens live here</li>
<li>$(pwd)/src/ - HTTP.sh src, feel free to poke around ;P</li></ul>"
EOF
cat <<EOF > "${cfg[namespace]}/routes.sh"
## routes - application-specific routes
##
## HTTP.sh supports both serving files using a directory structure (webroot),
## and using routes. The latter may come in handy if you want to create nicer
## paths, e.g.
##
## (webroot) https://example.com/profile.shs?name=asdf
## ... may become ...
## (routes) https://example.com/profile/asdf
##
## To set up routes, define rules in this file (see below for examples)
# router "/test" "app/views/test.shs"
# router "/profile/:user" "app/views/user.shs"
EOF
chmod +x "${cfg[namespace]}/workers/example/worker.sh"
echo -e "Success..?\nTry running \`./http.sh\` now"
exit 0 exit 0
fi fi
@ -105,37 +152,29 @@ cat <<EOF >&2
| |__| | | | | | | |_| | |___ | |__| | | |__| | | | | | | |_| | |___ | |__| |
| |__| | | | | | | ___/\___ \ | |__| | | |__| | | | | | | ___/\___ \ | |__| |
| | | | | | | | | | ___\ \| | | | | | | | | | | | | | ___\ \| | | |
|_| |_| |_| |_| |_| □ /_____/|_| |_| v$HTTPSH_VERSION |_| |_| |_| |_| |_| □ /_____/|_| |_|
EOF EOF
if [[ "$1" == "debug" ]]; then if [[ "$1" == "debug" ]]; then
cfg[dbg]=true cfg[dbg]=true
echo "[DEBUG] Activated debug mode - stderr will be shown" echo "[DEBUG] Activated debug mode - stderr will be shown"
elif [[ "$1" == "debuggier" ]]; then
cfg[dbg]=true
cfg[debuggier]=true
export PS4=' ${BASH_SOURCE}:${LINENO}: ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
echo "[DEBUG] Activated debuggier mode - stderr and call trace will be shown"
set -x
fi fi
source src/worker.sh source src/worker.sh
if [[ -f "${cfg[namespace]}/config.sh" ]]; then if [[ -f "${cfg[namespace]}/config.sh" ]]; then
run_once=true
source "${cfg[namespace]}/config.sh" source "${cfg[namespace]}/config.sh"
unset run_once
fi fi
if [[ ${cfg[socat_only]} == true ]]; then if [[ ${cfg[socat_only]} == true ]]; then
echo "[INFO] listening directly via socat, assuming no ncat available" echo "[INFO] listening directly via socat, assuming no ncat available"
echo "[HTTP] listening on ${cfg[ip]}:${cfg[port]}" echo "[HTTP] listening on ${cfg[ip]}:${cfg[port]}"
if [[ ${cfg[dbg]} == true ]]; then if [[ ${cfg[dbg]} == true ]]; then
socat tcp-listen:${cfg[port]},bind=${cfg[ip]},fork "exec:bash -c \'src/server.sh ${cfg[debuggier]}\'" socat tcp-listen:${cfg[port]},bind=${cfg[ip]},fork "exec:bash -c src/server.sh"
else else
socat tcp-listen:${cfg[port]},bind=${cfg[ip]},fork "exec:bash -c src/server.sh" 2>> /dev/null socat tcp-listen:${cfg[port]},bind=${cfg[ip]},fork "exec:bash -c src/server.sh" 2>> /dev/null
if [[ $? != 0 ]]; then if [[ $? != 0 ]]; then
echo "[WARN] socat quit with a non-zero status; Maybe the port is in use?" echo "[WARN] socat exitted with a non-zero status; Maybe the port is in use?"
fi fi
fi fi
else else
@ -147,11 +186,11 @@ else
# to quit after the first time-outed connection, ignoring the # to quit after the first time-outed connection, ignoring the
# "broker" (-k) mode. This is a workaround for this. # "broker" (-k) mode. This is a workaround for this.
while true; do while true; do
ncat -i 600s -l -U "$socket" -c "src/server.sh ${cfg[debuggier]}" -k ncat -i 600s -l -U "$socket" -c src/server.sh -k
done & done &
else else
while true; do while true; do
ncat -i 600s -l -U "$socket" -c src/server.sh -k 2>> /dev/null ncat -i 600s -l -U "$socket" -c src/server.sh -k 2>> /dev/null &
done & done &
fi fi
socat TCP-LISTEN:${cfg[port]},fork,bind=${cfg[ip]} UNIX-CLIENT:$socket & socat TCP-LISTEN:${cfg[port]},fork,bind=${cfg[ip]} UNIX-CLIENT:$socket &

View file

@ -1,241 +1,87 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# account.sh - account and session mgmt # account.sh - account and session mgmt
# TODO: add stricter argument checks for all the funcs
# registers a new user.
# first two params are strings; third is a reference to an array with # register(username, password)
# optional extra data (email, OTP...)
#
# [extra=()] register(username, password)
function register() { function register() {
if [[ ! "$1" || ! "$2" ]]; then local username=$(echo -ne $(sed -E "s/ /_/g;s/\:/\-/g;s/\%/\\x/g" <<< "$1"))
reason="User/password empty!"
return 1
fi
local username=$(url_decode "$1")
unset IFS
data_get secret/users.dat "$username" if [[ $(grep "$username:" secret/users.dat) != '' ]]; then
if [[ $? != 2 && $? != 4 ]]; then # entry not found / file not found
reason="This user already exists!" reason="This user already exists!"
return 1 return 1
fi fi
local salt=$(dd if=/dev/urandom bs=16 count=1 status=none | xxd -p) local salt=$(dd if=/dev/urandom bs=256 count=1 | sha1sum | cut -c 1-16)
local hash=$(echo -n $2$salt | sha256sum | cut -c 1-64)
_password_hash "$2" "$salt" local token=$(dd if=/dev/urandom bs=32 count=1 | sha1sum | cut -c 1-40)
set_cookie_permanent "sh_session" $token
local out=("$username" "$hash" "$salt" "" "${extra[@]}") set_cookie_permanent "username" $username
data_add secret/users.dat out
echo "$username:$hash:$salt:$token" >> secret/users.dat
[[ "${cfg[register_should_login]}" == true ]] && _new_session "$username"
set_cookie_permanent "sh_session" "${session[2]}"
set_cookie_permanent "username" "$username"
unset hash
} }
# login(username, password, [forever]) -> [res] # login(username, password)
function login() { function login() {
if [[ ! "$1" || ! "$2" ]]; then local username=$(echo -ne $(sed -E 's/%/\\x/g' <<< "$1"))
reason="User/password empty!" IFS=':'
return 1 local user=($(grep -P "$username:" secret/users.dat))
fi
local username=$(url_decode "$1")
[[ "$3" ]] && local forever=true
unset IFS unset IFS
if [[ $(echo -n $2${user[2]} | sha256sum | cut -c 1-64 ) == "${user[1]}" ]]; then
if ! data_get secret/users.dat "$username" 0 user; then set_cookie_permanent "sh_session" "${user[3]}"
reason="Bad credentials" set_cookie_permanent "username" "$username"
return 1
fi
_password_hash "$2" "${user[2]}"
if [[ "$hash" == "${user[1]}" ]]; then
_new_session "$username" "$forever"
if [[ "$forever" == true ]]; then
set_cookie_permanent "sh_session" "${session[2]}"
set_cookie_permanent "username" "$username"
else
set_cookie "sh_session" "${session[2]}"
set_cookie "username" "$username"
fi
declare -ga res=("${user[@]:4}")
unset hash
return 0 return 0
else else
remove_cookie "sh_session" remove_cookie "sh_session"
remove_cookie "username" remove_cookie "username"
reason="Bad credentials" reason="Invalid credentials!!11"
unset hash
return 1 return 1
fi fi
} }
# login_simple(base64) # login_simple(base64)
function login_simple() { function login_simple() {
local data=$(base64 -d <<< "$3") local data=$(base64 -d <<< "$3")
local password=$(sed -E 's/^(.*)\://' <<< "$data") local password=$(sed -E 's/^(.*)\://' <<< "$data")
local login=$(sed -E 's/\:(.*)$//' <<< "$data") local login=$(sed -E 's/\:(.*)$//' <<< "$data")
if [[ ! "$password" || ! "$login" ]]; then IFS=':'
return 1 local user=($(grep "$login:" secret/users.dat))
fi unset IFS
if [[ $(echo -n $password${user[2]} | sha256sum | cut -c 1-64 ) == ${user[1]} ]]; then
data_get secret/users.dat "$login" 0 user
_password_hash "$password" "${user[2]}"
if [[ "$hash" == "${user[1]}" ]]; then
r[authorized]=true r[authorized]=true
else else
r[authorized]=false r[authorized]=false
fi fi
unset hash
} }
# logout() # logout()
function logout() { function logout() {
if [[ "${cookies[sh_session]}" ]]; then
data_yeet secret/sessions.dat "${cookies[sh_session]}" 2
fi
remove_cookie "sh_session" remove_cookie "sh_session"
remove_cookie "username" remove_cookie "username"
} }
# session_verify(session) -> [res] # session_verify(session)
function session_verify() { function session_verify() {
[[ ! "$1" ]] && return 1 if [[ $(grep ":$1" secret/users.dat) != '' && $1 != '' ]]; then
unset IFS return 0
local session else
local user return 1
if data_get secret/sessions.dat "$1" 2 session; then
if data_get secret/users.dat "${session[0]}" 0 user; then # double-check if tables agree
declare -ga res=("${user[@]:4}")
return 0
fi
fi fi
return 1
} }
# session_get_username(session) # session_get_username(session)
function session_get_username() { function session_get_username() {
[[ ! "$1" ]] && return 1 [[ "$1" == "" ]] && return
unset IFS
local session
if data_get secret/sessions.dat "$1" 2 session; then IFS=':'
if data_get secret/users.dat "${session[0]}" 0 user; then # double-check if tables agree local data=($(grep ":$1$" secret/users.dat))
echo "${user[0]}" unset IFS
return 0 echo ${data[0]}
fi
fi
return 1
} }
# THIS FUNCTION IS DANGEROUS # THIS FUNCTION IS DANGEROUS
# delete_account(username) # delete_account(username)
function delete_account() { function delete_account() {
[[ ! "$1" ]] && return 1 [[ "$1" == "" ]] && return
data_yeet secret/users.dat "$1" sed -i "s/^$1:.*//;/^$/d" secret/users.dat
}
# user_reset_password(username, token, new_password) -> $?, ${user[@]}
user_reset_password() {
[[ ! "$1" ]] && return 1 # sensitive function, so we're checking all three
[[ ! "$2" ]] && return 1 # there's probably a better way,
[[ ! "$3" ]] && return 1 # but i don't care.
if data_get secret/users.dat "$1" 0 user; then
if [[ "$2" == "${user[3]}" ]]; then
_password_hash "$3" "${user[2]}"
user[1]="$hash"
user[3]=''
data_replace secret/users.dat "$1" user
session_purge "$1"
unset hash token
return 0
fi
fi
return 1
}
# user_change_password(username, old_password, new_password) -> $?, ${user[@]}
user_change_password() {
[[ ! "$1" ]] && return 1
[[ ! "$2" ]] && return 1
[[ ! "$3" ]] && return 1
if data_get secret/users.dat "$1" 0 user; then
_password_hash "$2" "${user[2]}"
if [[ "$hash" == "${user[1]}" ]]; then
_password_hash "$3" "${user[2]}"
[[ ! "$hash" ]] && return
user[1]="$hash"
user[3]=''
data_replace secret/users.dat "$1" user
session_purge "$1"
unset hash token
return 0
fi
fi
unset hash
return 1
}
# user_gen_reset_token(username) -> $?, $token, ${user[@]}
user_gen_reset_token() {
[[ ! "$1" ]] && return 1
if data_get secret/users.dat "$1" 0 user; then
user[3]="$(dd if=/dev/urandom bs=20 count=1 status=none | xxd -p)"
data_replace secret/users.dat "$1" user
token="${user[3]}"
else
return 1
fi
}
# logs out ALL sessions for user
#
# session_purge(username)
session_purge() {
data_yeet secret/sessions.dat "$1"
}
# _new_session(username, forever) -> $session
_new_session() {
[[ ! "$1" ]] && return 1
[[ "$2" == true ]] && local forever=true || local forever=false
session=("$1" "$(date '+%s')" "$(dd if=/dev/urandom bs=24 count=1 status=none | xxd -p)" "$forever")
data_add secret/sessions.dat session
}
_password_hash() {
[[ ! "$1" ]] && return 1
[[ ! "$2" ]] && return 1
if [[ "${cfg[hash]}" == "argon2id" ]]; then
hash="$(echo -n "$1" | argon2 "$2" -id -e)"
else
hash=$(echo -n $1$2 | sha256sum | cut -c 1-64)
fi
} }

View file

@ -2,4 +2,3 @@ sha1sum
sha256sum sha256sum
curl curl
iconv iconv
argon2

View file

@ -6,4 +6,3 @@ mktemp
date date
dd dd
file file
xxd

View file

@ -28,7 +28,5 @@ function mailsend() {
--upload-file "$tmp" \ --upload-file "$tmp" \
--user "${cfg[mail]}:${cfg[mail_password]}" --user "${cfg[mail]}:${cfg[mail_password]}"
res=$?
rm "$tmp" rm "$tmp"
return $res
} }

View file

@ -14,29 +14,16 @@
function get_mime() { function get_mime() {
local file="$@" local file="$@"
if [[ -f "$file" ]]; then local mime="$(file --mime-type -b "$file")"
local mime="$(file --mime-type -b "$file")" if [[ $file == *".htm" || $file == *".html" ]]; then
if [[ $file == *".htm" || $file == *".html" || $mime == "text/html" ]]; then mimetype="text/html"
mimetype="text/html" elif [[ $file == *".shs" || $file == *".py" || $file == *".php" ]]; then
elif [[ $file == *".shs" || $file == *".py" || $file == *".php" ]]; then
mimetype=""
elif [[ $file == *".css" ]]; then
mimetype="text/css"
elif [[ $mime == "text/"* && $mime != "text/xml" ]]; then
mimetype="text/plain"
# Technically image/x-icon isn't correct for all images (image/ico also exists) but
# it's what browser (firefox (sample size: 1)) seem to have the least problems with.
# image/vnd.microsoft.icon was standardized by the IANA but no microsoft software
# understands it, they use image/ico instead. What a mess.
elif [[ $file == *"favicon.ico" ]]; then
mimetype="image/x-icon"
elif [[ $file == *".ico" || $mime == "image/vnd.microsoft.icon" ]]; then
mimetype="image/ico"
else
mimetype="$mime"
fi
else
mimetype="" mimetype=""
elif [[ $file == *".css" ]]; then
mimetype="text/css"
elif [[ $mime == "text/"* && $mime != "text/xml" ]]; then
mimetype="text/plain"
else
mimetype="$mime"
fi fi
} }

View file

@ -3,20 +3,17 @@
# set_cookie(cookie_name, cookie_content) # set_cookie(cookie_name, cookie_content)
function set_cookie() { function set_cookie() {
r[headers]+="Set-Cookie: $1=$2; Path=${cfg[cookie_path]}\r\n" r[headers]+="Set-Cookie: $1=$2\r\n"
cookies["$1"]="$2"
} }
# set_cookie_permanent(cookie_name, cookie_content) # set_cookie_permanent(cookie_name, cookie_content)
function set_cookie_permanent() { function set_cookie_permanent() {
r[headers]+="Set-Cookie: $1=$2; Expires=Mon, 26 Jul 2100 22:45:00 GMT; Path=${cfg[cookie_path]}\r\n" r[headers]+="Set-Cookie: $1=$2; Expires=Mon, 26 Jul 2100 22:45:00 GMT\r\n"
cookies["$1"]="$2"
} }
# remove_cookie(cookie_name) # remove_cookie(cookie_name)
function remove_cookie() { function remove_cookie() {
r[headers]+="Set-Cookie: $1=; Expires=Sat, 02 Apr 2005 20:37:00 GMT\r\n" r[headers]+="Set-Cookie: $1=; Expires=Sat, 02 Apr 2005 20:37:00 GMT\r\n"
unset cookies["$1"]
} }
# header(header, header...) # header(header, header...)
@ -51,19 +48,12 @@ function html_encode() {
# url_encode(string) # url_encode(string)
function url_encode() { function url_encode() {
echo -n "$1" | xxd -p | tr -d '\n' | sed -E 's/.{2}/%&/g' xxd -ps -u <<< "$1" | tr -d '\n' | sed -E 's/.{2}/%&/g'
} }
# url_decode(string) # url_decode(string)
function url_decode() { function url_decode() {
# we should probably fail on invalid data here, echo -ne "$(sed -E 's/%[0-1][0-9a-f]//g;s/%/\\x/g' <<< "$1")"
# but this function is kinda sorta infallible right now
local t=$'\01'
local a="${1//$t}" # strip all of our control chrs for safety
a="${a//+/ }" # handle whitespace
a="${a//%[A-Fa-f0-9][A-Fa-f0-9]/$t&}" # match '%xx', prepend with token
echo -ne "${a//$t%/\\x}" # replace the above with '\\x' and evaluate
} }
# bogus function! # bogus function!

View file

@ -1,337 +0,0 @@
#!/bin/bash
## notORM.sh - clearly, not an ORM.
# basic interface for saving semi-arbitrary data organized in "tables".
## limitations:
# - only for strings (we trim some bytes; see `reserved values` below)
# - currently only supports saving to CSV-with-extra-steps
## function return values:
#
# 0 - success
# 1 - general failure
# 2 - entry not found
# 3 - locked, try again later
# 4 - file not found
## data reserved values:
#
# \x00 - bash yeets it out of existence
# \x01 - delimeter
# \x02 - newline
# \x03 - control chr for sed
delim=$'\01'
newline=$'\02'
ctrl=$'\03'
# TODO: proper locking
# TODO: matching more than one column
repeat() {
local IFS=$'\n'
[[ "$1" -gt 0 ]] && printf -- "$2%.0s" $(seq 1 $1)
}
shopt -s expand_aliases
# internal. parses the `{ }` syntax, starting with 2nd arg.
# alias, not a function, because we want to modify the argv of the parent
# _data_parse_pairs(_, { search, column }, [{ search2, column2 }], ...) -> ${search[@]}, ${column[@]}
alias _data_parse_pairs='
local search=()
local column=()
while shift; do # "shebang reference?" ~ mei
[[ "$1" != "{" ]] && break # yes, we need to match this twice
if [[ "$2" != "}" || "$3" == "}" || "$4" == "}" ]]; then # make sure we dont want to match the bracket
search+=("$2")
else # empty search - just match ANY record
search+=("")
column+=(0)
shift 2
break
fi
if [[ "$3" != "}" ]]; then
column+=("$3")
[[ "$4" != "}" ]] && return 1 # we accept only values in pairs
shift 3
else
column+=(0)
shift 2
if [[ "$2" != "{" ]]; then
shift
break
fi
fi
done
'
# internal function. take search and column, generate a sed matching expr from them
# data_gen_expr() -> $expr
_data_gen_expr() {
# we need the pairs sorted due to how the sed expr generation works
local IFS=$'\01\n'
local i
sorted=($(for (( i=0; i<${#search[@]}; i++ )); do
echo "${column[i]}"$'\01'"${search[i]}"
done | sort -n -t$'\01'))
local last=0
for (( i=0; i<${#sorted[@]}; i=i+2 )); do
if [[ $((sorted[i] - last)) -le 1 ]]; then
expr+="$(_sed_sanitize "${sorted[i+1]}")${delim}"
else
expr+="$(repeat $((sorted[i] - last)) ".*$delim")$(_sed_sanitize "${sorted[i+1]}")${delim}"
fi
last="${sorted[i]}"
done
}
# adds a flat `array` to the `store`.
# a store can be any file, as long as we have r/w access to it and the
# adjacent directory.
#
# 3rd argument is optional, and will specify whether to insert an auto-increment
# ID column. False by default; Setting to true will cause an internal data_iter
# call. The inserted ID column is always the zeroeth one.
#
# this function will create some helper files if they don't exist. those
# shouldn't be removed, as other functions may use them for data mangling.
#
# data_add(store, array, [numbered])
data_add() {
[[ ! -v "$2" ]] && return 1
local -n ref="$2"
local res=
local IFS=$'\n'
if [[ ! -f "$1" ]]; then
if [[ "$3" == true ]]; then
res+="0$delim"
echo "$((${#ref[@]}+1))" > "${1}.cols"
else
echo "${#ref[@]}" > "${1}.cols"
fi
elif [[ "$3" == true ]]; then
local data
data_iter "$1" { } : # get last element
local id=$(( ${data[0]}+1 )) # returns 1 on non-int values
res+="$id$delim"
fi
local i
for i in "${ref[@]}"; do
_trim_control "$i"
res+="$tr$delim"
done
echo "$res" >> "$1" # TODO: some locking
}
# get one entry from store, filtering by search. exit after first result.
# by default uses the 0th column. override with optional `column`.
# returns the data to $res. override with optional `res`
#
# 2nd and 3rd arguments can be repeated, given you enclose each pair
# in curly braces. (e.g. `{ search } { search2 column2 }`)
#
# also can be used as `data_get store { } meow` to match all records
#
# data_get(store, { search, [column] }, ... [res]]) -> $res / ${!-1}
# data_get(store, search, [column], [res]) -> $res / ${!4}
data_get() {
[[ ! "$2" ]] && return 1
[[ ! -f "$1" ]] && return 4
local IFS=$'\n'
local store="$1"
if [[ "$2" == '{' ]]; then
_data_parse_pairs
local -n ref="${1:-res}"
else # compat
local search=("$2")
local column=("${3:-0}")
local -n ref=${4:-res}
fi
local line
while read -r line; do
IFS=$delim
# LOAD-BEARING!!
# without an intermediate variable, bash trims out empty
# objects. expansions be damned
local x="${line//$newline/$'\n'}"
ref=($x)
local i
for (( i=0; i<${#search[@]}; i++ )); do
if [[ "${ref[column[i]]}" != "${search[i]}" && "${search[i]}" ]]; then
continue 2
fi
done
return 0 # only reached if an entry matched all constraints
done < "$store"
unset ref
return 2
}
# run `callback` on all entries from `store` that match `search`.
# by default uses the 0th column. override with optional `column`
#
# immediately exits with 255 if the callback function returned 255
# if there were no matches, returns 2
# if the store wasn't found, returns 4
#
# data_iter(store, { search, [column] }, ... callback) -> $data
# data_iter(store, search, callback, [column]) -> $data
data_iter() {
[[ ! "$3" ]] && return 1
[[ ! -f "$1" ]] && return 4
local store="$1"
local IFS=$'\n'
local r=2
if [[ "$2" == '{' ]]; then
_data_parse_pairs
local callback="$1"
else # compat
local callback="$3"
local search=("$2")
local column=("${4:-0}")
fi
while read -r line; do
IFS=$delim
# LOAD BEARING; see data_get
local x="${line//$newline/$'\n'}"
data=($x)
IFS=
local i
for (( i=0; i<${#search[@]}; i++ )); do
if [[ "${data[column[i]]}" != "${search[i]}" && "${search[i]}" ]]; then
continue 2
fi
done
"$callback" # only reached if an entry matched all constraints
[[ $? == 255 ]] && return 255
r=0
done < "$store"
return $r
}
# replace a value in `store` with `array`, filtering by `search`.
# by default uses the 0th column. override with optional `column`
#
# `value` is any string, which will directly replace `search`
#
# data_replace_value(store, search, value, [column])
data_replace_value() {
[[ ! "$3" ]] && return 1
[[ ! -f "$1" ]] && return 4
local column=${4:-0}
local IFS=' '
# NOTE: sed in normal (not extended -E mode) requires `\(asdf\)` to make a match!
if [[ $column == 0 ]]; then
local expr="s$ctrl^$(_sed_sanitize "$2")\(${delim}.*\)$ctrl$(_sed_sanitize "$3")\1$ctrl"
else
local expr="s$ctrl^\($(repeat $column ".*$delim")\)$(_sed_sanitize "$2")\($delim$(repeat $(( $(cat "${1}.cols") - column - 1 )) ".*$delim")\)"'$'"$ctrl\1$(_sed_sanitize "$3")\2$ctrl"
fi
sed -i "$expr" "$1"
}
# replace an entire entry in `store` with `array`, filtering by `search`.
# by default uses the 0th column. override with optional `column`
#
# pass `array` without expanding (`arr`, not `$arr`).
#
# data_replace(store, search, array, [column])
data_replace() {
[[ ! "$3" ]] && return 1
[[ ! -f "$1" ]] && return 4
local store="$1"
local output=
local tr
## currently broken
# if [[ "$2" == '{' ]]; then
# _data_parse_pairs
#
# local -n ref="$1"
#
# local expr
# _data_gen_expr
# expr="s$ctrl^${expr}.*$ctrl"
# else
local column=${4:-0}
local -n ref="$3"
local IFS=' '
if [[ $column == 0 ]]; then
local expr="s$ctrl^$(_sed_sanitize "$2")${delim}.*$ctrl"
else
local expr="s$ctrl^$(repeat $column ".*$delim")$(_sed_sanitize "$2")$delim$(repeat $(( $(cat "${store}.cols") - column - 1 )) ".*$delim")"'$'"$ctrl"
fi
# fi
local i
for i in "${ref[@]}"; do
_trim_control "$i"
output+="$tr$delim"
done
expr+="$(_sed_sanitize_array "$output")$ctrl"
sed -i "$expr" "$store"
}
# deletes entries from the `store` using `search`.
# by default uses the 0th column. override with optional `column`
#
# data_yeet(store, search, [column])
# data_yeet(store, { search, [column] }, ...)
data_yeet() {
[[ ! "$2" ]] && return 1
[[ ! -f "$1" ]] && return 4
local store="$1"
if [[ "$2" == '{' ]]; then
_data_parse_pairs
local expr
_data_gen_expr
expr="/^${expr}.*/d"
else # compat
local search="$2"
local column="${3:-0}"
local IFS=' '
if [[ $column == 0 ]]; then
local expr="/^$(_sed_sanitize "$2")${delim}.*/d"
else
local expr="/^$(repeat $column ".*$delim")$(_sed_sanitize "$2")$delim$(repeat $(( $(cat "${store}.cols") - column - 1 )) ".*$delim")"'$'"/d"
fi
fi
sed -i "$expr" "$store"
}
_sed_sanitize() {
_trim_control "$1"
echo -n "$tr" | xxd -p | tr -d '\n' | sed 's/../\\x&/g'
}
_sed_sanitize_array() {
echo -n "$1" | xxd -p | tr -d '\n' | sed 's/../\\x&/g'
}
# _trim_control(string) -> $tr
_trim_control() {
tr="${1//$delim}" # remove 0x01
tr="${tr//$newline}" # remove 0x02
tr="${tr//$ctrl}" # remove 0x03
tr="${tr//$'\n'/$newline}" # \n -> 0x02
}
shopt -u expand_aliases # back to the default

View file

@ -3,10 +3,10 @@ Connection: Upgrade
Upgrade: WebSocket Upgrade: WebSocket
${cfg[extra_headers]}" ${cfg[extra_headers]}"
if [[ ${r[websocket_key]} != '' ]]; then if [[ ${r[websocket_key]} != '' ]]; then
accept=$(echo -ne $(echo "${r[websocket_key]}""258EAFA5-E914-47DA-95CA-C5AB0DC85B11" | sha1sum | sed 's/ //g;s/-//g;s/.\{2\}/\\x&/g') | base64) accept=$(echo -ne $(printf "${r[websocket_key]}""258EAFA5-E914-47DA-95CA-C5AB0DC85B11" | sha1sum | sed 's/ //g;s/-//g;s/.\{2\}/\\x&/g') | base64)
echo "Sec-WebSocket-Accept: "$accept echo "Sec-WebSocket-Accept: "$accept
fi fi
echo -e "\r\n\r\n" printf "\r\n\r\n"
#echo "Laura is cute <3" #echo "Laura is cute <3"
#WebSocket-Location: ws://localhost:1337/ #WebSocket-Location: ws://localhost:1337/

View file

@ -1,29 +1,21 @@
# TODO: move parts of this into server.sh, or rename the file appropriately
# __headers(end)
# Sets the header and terminates the header block if end is NOT set to false
function __headers() { function __headers() {
if [[ "${cfg[unbuffered]}" != true ]]; then if [[ "${cfg[unbuffered]}" != true ]]; then
if [[ "${r[headers]}" == *'Location'* ]]; then # override for redirects if [[ "${r[headers]}" == *'Location'* ]]; then
echo -ne "HTTP/1.0 302 aaaaa\r\n" printf "HTTP/1.0 302 aaaaa\r\n"
elif [[ "${r[status]}" == '200' || "${r[status]}" == '212' ]]; then # normal or router, should just return 200 else
echo -ne "HTTP/1.0 200 OK\r\n" printf "HTTP/1.0 200 OK\r\n"
else # changed by the user in the meantime :)
[[ ! "${r[status]}" ]] && r[status]=500 # ... if they left it blank
echo -ne "HTTP/1.0 ${r[status]} meow\r\n"
fi fi
[[ "${r[headers]}" != '' ]] && echo -ne "${r[headers]}" [[ "${r[headers]}" != '' ]] && printf "${r[headers]}"
echo -ne "${cfg[extra_headers]}\r\n" printf "${cfg[extra_headers]}\r\n"
else else
echo "uh oh - we're running unbuffered" > /dev/stderr echo "uh oh - we're running unbuffered" > /dev/stderr
fi fi
if [[ ${r[status]} == 200 ]]; then if [[ ${r[status]} == 200 ]]; then
get_mime "${r[uri]}" get_mime "${r[uri]}"
[[ "$mimetype" != '' ]] && echo -ne "content-type: $mimetype\r\n" [[ "$mimetype" != '' ]] && printf "content-type: $mimetype\r\n"
fi fi
printf "\r\n"
[[ "$1" != false ]] && echo -ne "\r\n"
} }
if [[ ${r[status]} == 212 ]]; then if [[ ${r[status]} == 212 ]]; then
@ -32,20 +24,30 @@ if [[ ${r[status]} == 212 ]]; then
else else
temp=$(mktemp) temp=$(mktemp)
source "${r[view]}" > $temp source "${r[view]}" > $temp
__headers false __headers
get_mime "$temp"
# Defaults to text/plain for things it doesn't know, eg. CSS
[[ "$mimetype" != 'text/plain' ]] && echo -ne "content-type: $mimetype\r\n"
echo -ne "\r\n"
cat $temp cat $temp
rm $temp rm $temp
fi fi
elif [[ "${cfg[php_enabled]}" == true && "${r[uri]}" =~ ".php" ]]; then
temp=$(mktemp)
php "${r[uri]}" "$(get_dump)" "$(post_dump)" > $temp
__headers
cat $temp
rm $temp
elif [[ "${cfg[python_enabled]}" == true && "${r[uri]}" =~ ".py" ]]; then
temp=$(mktemp)
python "${r[uri]}" "$(get_dump)" "$(post_dump)" > $temp
__headers
cat $temp
rm $temp
elif [[ "${r[uri]}" =~ \.${cfg[extension]}$ ]]; then elif [[ "${r[uri]}" =~ \.${cfg[extension]}$ ]]; then
temp=$(mktemp) temp=$(mktemp)
source "${r[uri]}" > $temp source "${r[uri]}" > $temp
__headers __headers
if [[ "${cfg[encoding]}" ]]; then if [[ "${cfg[encoding]}" != '' ]]; then
iconv $temp -f UTF-8 -t "${cfg[encoding]}" iconv $temp -f UTF-8 -t "${cfg[encoding]}"
else else
cat $temp cat $temp

View file

@ -1,3 +1,3 @@
echo -ne "HTTP/1.0 401 Unauthorized printf "HTTP/1.0 401 Unauthorized
WWW-Authenticate: Basic realm=\"${cfg[auth_realm]}\" WWW-Authenticate: Basic realm=\"${cfg[auth_realm]}\"
${cfg[extra_headers]}\r\n" ${cfg[extra_headers]}\r\n"

View file

@ -1,5 +1,4 @@
echo -ne "HTTP/1.0 403 Forbidden printf "HTTP/1.0 403 Forbidden
content-type: text/html
${cfg[extra_headers]}\r\n\r\n" ${cfg[extra_headers]}\r\n\r\n"
source templates/head.sh source templates/head.sh
echo "<h1>403: You've been naughty</h1>" echo "<h1>403: You've been naughty</h1>"

View file

@ -1,5 +1,4 @@
echo -ne "HTTP/1.0 404 Not Found printf "HTTP/1.0 404 Not Found
content-type: text/html
${cfg[extra_headers]}\r\n\r\n" ${cfg[extra_headers]}\r\n\r\n"
source templates/head.sh source templates/head.sh
echo "<h1>404 Not Found</h1>" echo "<h1>404 Not Found</h1>"

View file

@ -1,16 +1,15 @@
echo -ne "HTTP/1.0 200 OK printf "HTTP/1.0 200 OK
content-type: text/html
${cfg[extra_headers]}\r\n\r\n" ${cfg[extra_headers]}\r\n\r\n"
source templates/head.sh source templates/head.sh
echo "<h1>Index of $([[ ${r[url]} == '' ]] && echo '/' || echo $(html_encode ${r[url]}))</h1>" printf "<h1>Index of $([[ ${r[url]} == '' ]] && echo '/' || echo $(html_encode ${r[url]}))</h1>"
if [[ ${cookies[username]} != '' ]]; then if [[ ${cookies[username]} != '' ]]; then
echo "Logged in as $(html_encode ${cookies[username]})" echo "Logged in as $(html_encode ${cookies[username]})"
fi fi
echo "<table> printf "<table>
<tr> <tr>
<th>File</th> <th>File</th>
<th>Size</th> <th>Size</th>
@ -25,10 +24,10 @@ for i in $(ls ${r[uri]}); do
unset IFS unset IFS
stats=($(ls -hld "${r[uri]}/$i")) # -hld stands for Half-Life Dedicated stats=($(ls -hld "${r[uri]}/$i")) # -hld stands for Half-Life Dedicated
if [[ -d "${r[uri]}"'/'"$i" ]]; then if [[ -d "${r[uri]}"'/'"$i" ]]; then
echo "<tr><td><a href='$(html_encode "${r[url]}/$i/")'>$(html_encode "$i")</a></td><td>&lt;DIR&gt;</td><td>${stats[5]} ${stats[6]} ${stats[7]}</td></tr>" printf "<tr><td><a href='$(html_encode "${r[url]}/$i/")'>$(html_encode "$i")</a></td><td>&lt;DIR&gt;</td><td>${stats[5]} ${stats[6]} ${stats[7]}</td></tr>"
else else
echo "<tr><td><a href='$(html_encode "${r[url]}/$i")'>$(html_encode "$i")</a></td><td>${stats[4]}B</td><td>${stats[5]} ${stats[6]} ${stats[7]}</td></tr>" printf "<tr><td><a href='$(html_encode "${r[url]}/$i")'>$(html_encode "$i")</a></td><td>${stats[4]}B</td><td>${stats[5]} ${stats[6]} ${stats[7]}</td></tr>"
fi fi
done done
echo "</table><p><i>HTTP.sh server on $(html_encode ${r[host]})</i></p><p>meow!</p>" printf "</table><p><i>HTTP.sh server on $(html_encode ${r[host]})</i></p><p>laura is cute</p>"

22
src/response/proxy.sh Executable file
View file

@ -0,0 +1,22 @@
#!/usr/bin/env bash
url="$(url_decode "$(url_decode "$(sed -E 's/\?/<2F><>Lun4_iS_CuTe<54>/;s/^(.*)<29><>Lun4_iS_CuTe<54>//;s/'"${cfg[proxy_param]}"'=//g' <<< "${r[url]}")")")"
if [[ $(grep -Poh "${cfg[proxy_url]}" <<< "$url") == '' ]]; then
exit 1
fi
host="$(sed -E 's@http(s|)://@@;s@/.*@@' <<< "$url")"
proxy_url="$(sed -E 's/\?.*//g' <<< "${r[url]}")"
headers="$(tr '\r' '\n' <<< "${r[req_headers]}")"
headers+=$'\n'
#params=()
while read line; do
if [[ "$line" != "GET"* && "$line" != "Host:"* && "$line" != '' ]]; then
args+=('-H')
args+=("$line")
fi
done <<< "$headers"
curl --http1.1 "$url" "${args[@]}" -D /dev/stdout | grep -aiv "Transfer-Encoding: chunked" | sed -E '/Location/s/\?/%3f/g;/Location/s/\&/%26/g;/Location/s/\:/%3a/g;/Location/s@/@%2f@g;s@Location%3a @Location: '"$proxy_url"'?'"${cfg[proxy_param]}"'=@'

View file

@ -1,12 +1,4 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# If $1 is set to true, enable the call trace
if [[ "$1" == true ]]; then
set -x
fi
shopt -s extglob
source src/version.sh
source config/master.sh source config/master.sh
source src/mime.sh source src/mime.sh
source src/misc.sh source src/misc.sh
@ -14,7 +6,6 @@ source src/account.sh
source src/mail.sh source src/mail.sh
source src/route.sh source src/route.sh
source src/template.sh source src/template.sh
source src/notORM.sh # to be split off HTTP.sh at some point :^)
[[ -f "${cfg[namespace]}/config.sh" ]] && source "${cfg[namespace]}/config.sh" [[ -f "${cfg[namespace]}/config.sh" ]] && source "${cfg[namespace]}/config.sh"
declare -A r # current request / response declare -A r # current request / response
@ -26,136 +17,100 @@ declare -A params # parsed router data
r[status]=210 # Mommy always said that I was special r[status]=210 # Mommy always said that I was special
r[req_headers]='' r[req_headers]=''
r[payload_type]=none # placeholder
post_length=0 post_length=0
# start reading the stream here instead of the loop below;
# this way, we can detect if the connection is even valid HTTP.
# we're reading up to 8 characters and waiting for a space.
read -d' ' -r -n8 param
shopt -s nocasematch # only for initial parse; saves us *many* sed calls
if [[ "${param,,}" =~ ^(get|post|patch|put|delete|meow) ]]; then # TODO: OPTIONS, HEAD
r[method]="${param%% *}"
read -r param
[[ "${r[method],,}" != "get" ]] && r[post]=true
r[url]="$(sed -E 's/^ *//;s/HTTP\/[0-9]+\.[0-9]+//;s/ //g;s/\/*\r//g;s/\/\/*/\//g' <<< "$param")"
unset IFS
if [[ "${r[url]}" == *'?'* ]]; then
while read -d'&' i; do
name="${i%%=*}"
if [[ "$name" ]]; then
value="${i#*=}"
get_data[$name]="$(url_decode "$value")"
fi
done <<< "${r[url]#*\?}&"
fi
else
exit 1 # TODO: throw 400 here
fi
declare -A headers
IFS=$'\n'
# continue with reading the headers
while read -r param; do while read -r param; do
[[ "$param" == $'\r' ]] && break r[req_headers]+="$param"
[[ "$param" != *":"* ]] && exit 1 # TODO: throw 400 param_l="${param,,}" # lowercase
name=''
IFS=':' value=''
read -ra header_pair <<< "$param" data=''
header_key="${header_pair[0],,}" # To lowercase...
header_key="${header_key##*( )}" # ...trim leading whitespace...
header_key="${header_key%%*( )}" # ...and trailing whitespaces
header_value="${header_pair[@]:1}"
header_value="${header_value##*( )}" # Trim leading whitespace...
headers["${header_key}"]="${header_value%%*( )*($'\r')}" # ...and trailing whitespace and \r
done
unset IFS
# TODO: remove deprecated fields below
r[content_length]="${headers["content-length"]}"
r[user_agent]="${headers["user-agent"]}"
r[websocket_key]="${headers["sec-websocket-key"]}"
r[req_headers]="$headers"
r[url]="$(url_decode "${r[url]}")" # doing this here for.. reasons
r[uri]="$(realpath "${cfg[namespace]}/${cfg[root]}/$(sed -E 's/\?(.*)$//' <<< "${r[url]}")")"
r[url_clean]="${r[url]%\?*}"
[[ -d "${r[uri]}/" ]] && pwd="${r[uri]}" || pwd=$(dirname "${r[uri]}") # dead code
if [[ -n "${headers["content-type"]}" ]]; then
IFS=';'
read -ra content_type <<< "${headers["content-type"]}"
r[content_type]="${content_type[0]}"
if [[ "${r[content_type]}" == "application/x-www-form-urlencoded" ]]; then
r[payload_type]="urlencoded" # TODO: do we want to have a better indicator for this?
elif [[ "${r[content_type]}" == "multipart/form-data" ]]; then
r[payload_type]="multipart"
tmpdir=$(mktemp -d)
if [[ "${r[content_type]}" == "boundary="* ]]; then
boundary="${content_type[@]:1}"
r[content_boundary]="${boundary##*boundary=}"
fi
fi
unset IFS unset IFS
fi
if [[ "$param_l" == $'\015' ]]; then
break
elif [[ "$param_l" == *"content-length:"* ]]; then
r[content_length]="$(sed 's/Content-Length: //i;s/\r//' <<< "$param")"
if [[ -n "${headers["host"]}" ]]; then elif [[ "$param_l" == *"content-type:"* ]]; then
r[host]="${headers["host"]}" r[content_type]="$(sed 's/Content-Type: //i;s/\r//' <<< "$param")"
r[host_portless]="${headers["host"]%%:*}" if [[ "${r[content_type]}" == *"multipart/form-data"* ]]; then
tmpdir=$(mktemp -d)
fi
if [[ "${r[content_type]}" == *"boundary="* ]]; then
r[content_boundary]="$(sed -E 's/(.*)boundary=//i;s/\r//;s/ //' <<< "${r[content_type]}")"
fi
elif [[ "$param_l" == *"host:"* ]]; then
r[host]="$(sed 's/Host: //i;s/\r//;s/\\//g' <<< "$param")"
r[host_portless]="$(sed -E 's/:(.*)$//' <<< "${r[host]}")"
if [[ -f "config/$(basename -- ${r[host]})" ]]; then
source "config/$(basename -- ${r[host]})"
elif [[ -f "config/$(basename -- ${r[host_portless]})" ]]; then
source "config/$(basename -- ${r[host_portless]})"
fi
if [[ -f "config/$(basename -- ${r[host]})" ]]; then elif [[ "$param_l" == *"user-agent:"* ]]; then
source "config/$(basename -- ${r[host]})" r[user_agent]="$(sed 's/User-Agent: //i;s/\r//;s/\\//g' <<< "$param")"
elif [[ -f "config/$(basename -- ${r[host_portless]})" ]]; then
source "config/$(basename -- ${r[host_portless]})" elif [[ "$param_l" == *"upgrade:"* && $(sed 's/Upgrade: //i;s/\r//' <<< "$param") == "websocket" ]]; then
r[status]=101
elif [[ "$param_l" == *"sec-websocket-key:"* ]]; then
r[websocket_key]="$(sed 's/Sec-WebSocket-Key: //i;s/\r//' <<< "$param")"
elif [[ "$param_l" == *"authorization: basic"* ]]; then
login_simple "$param"
elif [[ "$param_l" == *"authorization: bearer"* ]]; then
r[authorization]="$(sed 's/Authorization: Bearer //i;s/\r//' <<< "$param")"
elif [[ "$param_l" == *"cookie: "* ]]; then
IFS=';'
for i in $(IFS=' '; echo "$param" | sed -E 's/Cookie: //i;;s/%/\\x/g'); do
name="$((grep -Poh "[^ ].*?(?==)" | head -1) <<< $i)"
value="$(sed "s/$name=//;s/^ //;s/ $//" <<< $i)"
cookies[$name]="$(echo -e $value)"
done
elif [[ "$param_l" == *"range: bytes="* ]]; then
r[range]="$(sed 's/Range: bytes=//;s/\r//' <<< "$param")"
elif [[ "$param" == *"GET "* ]]; then
r[url]="$(echo -ne "$(url_decode "$(sed -E 's/GET //;s/HTTP\/[0-9]+\.[0-9]+//;s/ //g;s/\/*\r//g;s/\/\/*/\//g' <<< "$param")")")"
data="$(sed -E 's/\?/<2F><>Lun4_iS_CuTe<54>/;s/^(.*)<29><>Lun4_iS_CuTe<54>//;s/\&/ /g' <<< "${r[url]}")"
if [[ "$data" != "${r[url]}" ]]; then
data="$(sed -E 's/\?/<2F><>Lun4_iS_CuTe<54>/;s/^(.*)<29><>Lun4_iS_CuTe<54>//' <<< "${r[url]}")"
IFS='&'
for i in $data; do
name="$(sed -E 's/\=(.*)$//' <<< "$i")"
value="$(sed "s/$name\=//" <<< "$i")"
get_data[$name]="$value"
done
fi
elif [[ "$param" == *"POST "* ]]; then
r[url]="$(echo -ne "$(url_decode "$(sed -E 's/POST //;s/HTTP\/[0-9]+\.[0-9]+//;s/ //g;s/\/*\r//g;s/\/\/*/\//g' <<< "$param")")")"
r[post]=true
# below shamelessly copied from GET, should be moved to a function
data="$(sed -E 's/\?/<2F><>Lun4_iS_CuTe<54>/;s/^(.*)<29><>Lun4_iS_CuTe<54>//;s/\&/ /g' <<< "${r[url]}")"
if [[ "$data" != "${r[url]}" ]]; then
data="$(sed -E 's/\?/<2F><>Lun4_iS_CuTe<54>/;s/^(.*)<29><>Lun4_iS_CuTe<54>//' <<< "${r[url]}")"
IFS='&'
for i in $data; do
name="$(sed -E 's/\=(.*)$//' <<< "$i")"
value="$(sed "s/$name\=//" <<< "$i")"
get_data[$name]="$value"
done
fi
fi fi
fi done
if [[ "${headers["connection"]}" == "upgrade" && "${headers["upgrade"]}" == "websocket" ]]; then r[uri]="$(realpath "${cfg[namespace]}/${cfg[root]}$(sed -E 's/\?(.*)$//' <<< "${r[url]}")")"
r[status]=101 [[ -d "${r[uri]}/" ]] && pwd="${r[uri]}" || pwd=$(dirname "${r[uri]}")
fi
shopt -u nocasematch if [[ $NCAT_LOCAL_PORT == '' ]]; then
if [[ -n "${headers["authorization"]}" ]]; then
if [[ "${headers["authorization"],,}" == "basic"* ]]; then
base64="${headers["authorization"]#[Bb]asic*( )}"
login_simple "${base64##*( )}"
elif [[ "${headers["authorization"],,}" == "bearer"* ]]; then
bearer="${headers["authorization"]#[Bb]earer*( )}"
r[authorization]="${bearer##*( )}"
fi
fi
if [[ -n "${headers["cookie"]}" ]]; then
while read -r -d';' cookie_pair; do
cookie_pair="$(url_decode "$cookie_pair")"
name="${cookie_pair%%=*}"
if [[ -n "$name" ]]; then
# get value, strip potential whitespace
value="${cookie_pair##*=}"
value="${value##*( )}"
value="${value%%*( )}"
cookies["$name"]="$value"
fi
done <<< "${headers["cookie"]};" # This hack is beyond me, just trust the process
fi
if [[ "${headers["range"]}" == "bytes"* ]]; then
r[range]="${headers["range"]#*=}"
fi
if [[ ${headers["x-forwarded-for"]} ]]; then
r[proto]='http'
r[ip]="${headers["x-forwarded-for"]%%[, ]*}"
elif [[ -z "$NCAT_LOCAL_PORT" ]]; then
r[proto]='http' r[proto]='http'
r[ip]="NCAT_IS_BORK" r[ip]="NCAT_IS_BORK"
else else
@ -168,18 +123,18 @@ echo "$(date) - IP: ${r[ip]}, PROTO: ${r[proto]}, URL: ${r[url]}, GET_data: ${ge
[[ -f "${cfg[namespace]}/routes.sh" ]] && source "${cfg[namespace]}/routes.sh" [[ -f "${cfg[namespace]}/routes.sh" ]] && source "${cfg[namespace]}/routes.sh"
if [[ ${r[status]} != 101 ]]; then if [[ ${r[status]} != 101 ]]; then
clean_url="$(sed -E 's/\?.*//' <<< "${r[url]}")"
for (( i=0; i<${#route[@]}; i=i+3 )); do for (( i=0; i<${#route[@]}; i=i+3 )); do
if [[ "$(grep -Poh "^${route[$((i+1))]}$" <<< "${r[url_clean]}")" != "" ]] || [[ "$(grep -Poh "^${route[$((i+1))]}$" <<< "${r[url_clean]}/")" != "" ]]; then if [[ "$(grep -Poh "^${route[$((i+1))]}$" <<< "$clean_url")" != "" ]] || [[ "$(grep -Poh "^${route[$((i+1))]}$" <<< "$clean_url/")" != "" ]]; then
r[status]=212 r[status]=212
r[view]="${route[$((i+2))]}" r[view]="${route[$((i+2))]}"
IFS='/' IFS='/'
url=(${route[$i]}) url=(${route[$i]})
url_=(${r[url_clean]}) url_=(${r[url]})
unset IFS unset IFS
for (( j=0; j<${#url[@]}; j++ )); do for (( j=0; j<${#url[@]}; j++ )); do
# TODO: think about the significance of this if really hard when i'm less tired if [[ ${url_[$j]} != '' ]]; then
if [[ ${url_[$j]} != '' && ${url[$j]} == ":"* ]]; then params[$(sed 's/://' <<< "${url[$j]}")]="${url_[$j]}"
params[${url[$j]/:/}]="${url_[$j]}"
fi fi
done done
break break
@ -189,7 +144,7 @@ if [[ ${r[status]} != 101 ]]; then
if [[ ${r[status]} != 212 ]]; then if [[ ${r[status]} != 212 ]]; then
if [[ -a "${r[uri]}" && ! -r "${r[uri]}" ]]; then if [[ -a "${r[uri]}" && ! -r "${r[uri]}" ]]; then
r[status]=403 r[status]=403
elif [[ "${r[uri]}" != "$(realpath "${cfg[namespace]}/${cfg[root]}")"* ]]; then elif [[ "$(echo -n "${r[uri]}")" != "$(realpath "${cfg[namespace]}/${cfg[root]}")"* ]]; then
r[status]=403 r[status]=403
elif [[ -f "${r[uri]}" ]]; then elif [[ -f "${r[uri]}" ]]; then
r[status]=200 r[status]=200
@ -216,6 +171,10 @@ if [[ "${cfg[auth_required]}" == true && "${r[authorized]}" != true ]]; then
r[status]=401 r[status]=401
fi fi
if [[ "${cfg[proxy]}" == true ]]; then
r[status]=211
fi
if [[ "${r[post]}" == true ]] && [[ "${r[status]}" == 200 || "${r[status]}" == 212 ]]; then if [[ "${r[post]}" == true ]] && [[ "${r[status]}" == 200 || "${r[status]}" == 212 ]]; then
# This whole ordeal is here to prevent passing binary data as a variable. # This whole ordeal is here to prevent passing binary data as a variable.
# I could have done it as an array, but this solution works, and it's # I could have done it as an array, but this solution works, and it's
@ -225,14 +184,14 @@ if [[ "${r[post]}" == true ]] && [[ "${r[status]}" == 200 || "${r[status]}" ==
declare post_multipart declare post_multipart
tmpfile=$(mktemp -p $tmpdir) tmpfile=$(mktemp -p $tmpdir)
dd iflag=fullblock of=$tmpfile ibs=${r[content_length]} count=1 obs=1M dd iflag=fullblock of=$tmpfile ibs=${r[content_length]} count=1 obs=1M
delimeter_len=$(echo -n "${r[content_boundary]}"$'\015' | wc -c) delimeter_len=$(echo -n "${r[content_boundary]}"$'\015' | wc -c)
boundaries_list=$(echo -ne $(grep $tmpfile -ao -e ${r[content_boundary]} --byte-offset | sed -E 's/:(.*)//g') | sed -E 's/ [0-9]+$//') boundaries_list=$(echo -ne $(grep $tmpfile -ao -e ${r[content_boundary]} --byte-offset | sed -E 's/:(.*)//g') | sed -E 's/ [0-9]+$//')
for i in $boundaries_list; do for i in $boundaries_list; do
tmpout=$(mktemp -p $tmpdir) tmpout=$(mktemp -p $tmpdir)
dd iflag=fullblock if=$tmpfile ibs=$(($i+$delimeter_len)) obs=1M skip=1 | while true; do dd iflag=fullblock if=$tmpfile ibs=$(($i+$delimeter_len)) obs=1M skip=1 | while true; do
read -r line read line
if [[ $line == $'\015' ]]; then if [[ $line == $'\015' ]]; then
cat - > $tmpout cat - > $tmpout
break break
@ -246,25 +205,22 @@ if [[ "${r[post]}" == true ]] && [[ "${r[status]}" == 200 || "${r[status]}" ==
done done
rm $tmpfile rm $tmpfile
else else
read -r -N "${r[content_length]}" data read -N "${r[content_length]}" data
if [[ "${r[payload_type]}" == "urlencoded" ]]; then IFS='&'
unset IFS for i in $(tr -d '\n' <<< "$data"); do
while read -r -d'&' i; do name="$(sed -E 's/\=(.*)$//' <<< "$i")"
name="${i%%=*}" param="$(sed "s/$name\=//" <<< "$i")"
value="${i#*=}" post_data[$name]="$param"
post_data[$name]="$(url_decode "$value")" done
echo post_data[$name]="$value" >/dev/stderr unset IFS
done <<< "${data}&"
else
# this is fine?
post_data[0]="${data%\&}"
fi
fi fi
fi fi
if [[ ${r[status]} == 210 && ${cfg[autoindex]} == true ]]; then if [[ ${r[status]} == 210 && ${cfg[autoindex]} == true ]]; then
source "src/response/listing.sh" source "src/response/listing.sh"
elif [[ ${r[status]} == 211 ]]; then
source "src/response/proxy.sh"
elif [[ ${r[status]} == 200 || ${r[status]} == 212 ]]; then elif [[ ${r[status]} == 200 || ${r[status]} == 212 ]]; then
source "src/response/200.sh" source "src/response/200.sh"
elif [[ ${r[status]} == 401 ]]; then elif [[ ${r[status]} == 401 ]]; then

View file

@ -13,7 +13,6 @@ function render() {
local tmp=$(mktemp) local tmp=$(mktemp)
local key local key
IFS=$'\n'
for key in ${!ref[@]}; do for key in ${!ref[@]}; do
if [[ "$key" == "_"* ]]; then # iter mode if [[ "$key" == "_"* ]]; then # iter mode
local subtemplate=$(mktemp) local subtemplate=$(mktemp)
@ -21,7 +20,7 @@ function render() {
echo 's'$'\02''\{\{start '"$key"'\}\}.*\{\{end '"$key"'\}\}'$'\02''\{\{'"$key"'\}\}'$'\02'';' >> "$tmp" echo 's'$'\02''\{\{start '"$key"'\}\}.*\{\{end '"$key"'\}\}'$'\02''\{\{'"$key"'\}\}'$'\02'';' >> "$tmp"
local -n asdf=${ref["$key"]} local -n asdf=${ref[$key]}
local j local j
local value='' local value=''
for j in ${!asdf[@]}; do for j in ${!asdf[@]}; do
@ -33,60 +32,29 @@ function render() {
echo 's'$'\02''\{\{'"$key"'\}\}'$'\02'''"$value"''$'\02'';' >> "$tmp" echo 's'$'\02''\{\{'"$key"'\}\}'$'\02'''"$value"''$'\02'';' >> "$tmp"
rm "$subtemplate" rm "$subtemplate"
elif [[ "$key" == "@"* && "${ref["$key"]}" != '' ]]; then elif [[ "$key" == "@"* && "${ref[$key]}" != '' ]]; then
local value="$(sed -E 's/\&/<2F>UwU<77>/g' <<< "${ref["$key"]}")" local value="$(sed -E 's/\&/<2F>UwU<77>/g' <<< "${ref[$key]}")"
echo 's'$'\02''\{\{\'"$key"'\}\}'$'\02'''"$value"''$'\02''g;' >> "$tmp" #' echo 's'$'\02''\{\{\'"$key"'\}\}'$'\02'''"$value"''$'\02''g;' >> "$tmp"
elif [[ "$key" == '?'* ]]; then elif [[ "$key" == '?'* ]]; then
local _key="\\?${key/?/}" local _key="\\?${key/?/}"
local subtemplate=$(mktemp) local subtemplate=$(mktemp)
echo 's'$'\02''\{\{start '"$_key"'\}\}((.*)\{\{else '"$_key"'\}\}.*\{\{end '"$_key"'\}\}|(.*)\{\{end '"$_key"'\}\})'$'\02''\2\3'$'\02'';' >> "$subtemplate" echo 's'$'\02''\{\{start '"$_key"'\}\}((.*)\{\{else '"$_key"'\}\}.*\{\{end '"$_key"'\}\}|(.*)\{\{end '"$_key"'\}\})'$'\02''\2\3'$'\02'';' >> "$subtemplate"
# TODO: check if this is needed?
# the code below makes sure to resolve the conditional blocks
# *before* anything else. I can't think of *why* this is needed
# right now, but I definitely had a reason in this. Question is, what reason.
cat <<< $(cat "$subtemplate" "$tmp") > "$tmp" # call that cat abuse cat <<< $(cat "$subtemplate" "$tmp") > "$tmp" # call that cat abuse
rm "$subtemplate" rm "$subtemplate"
elif [[ "${ref["$key"]}" != "" ]]; then elif [[ "${ref[$key]}" != "" ]]; then
echo "VALUE: ${ref["$key"]}" > /dev/stderr echo "VALUE: ${ref[$key]}" > /dev/stderr
if [[ "$3" != true ]]; then if [[ "$3" != true ]]; then
local value="$(html_encode <<< "${ref["$key"]}" | sed -E 's/\&/<2F>UwU<77>/g')" local value="$(html_encode <<< "${ref[$key]}" | sed -E 's/\&/<2F>UwU<77>/g')"
else else
local value="$(echo -n "${ref["$key"]}" | tr -d $'\01'$'\02' | tr $'\n' $'\01' | sed -E 's/\\\\/<2F>OwO<77>/g;s/\\//g;s/<2F>OwO<77>/\\/g' | html_encode | sed -E 's/\&/<2F>UwU<77>/g')" local value="$(sed -E 's/\\\\/<2F>OwO<77>/g;s/\\//g;s/<2F>OwO<77>/\\/g' <<< "${ref[$key]}" | html_encode | sed -E 's/\&/<2F>UwU<77>/g')"
fi fi
echo 's'$'\02''\{\{\.'"$key"'\}\}'$'\02'''"$value"''$'\02''g;' >> "$tmp" echo 's'$'\02''\{\{\.'"$key"'\}\}'$'\02'''"$value"''$'\02''g;' >> "$tmp"
else else
echo 's'$'\02''\{\{\.'"$key"'\}\}'$'\02'$'\02''g;' >> "$tmp" echo 's'$'\02''\{\{\.'"$key"'\}\}'$'\02'$'\02''g;' >> "$tmp"
fi fi
done done
unset IFS
# process file includes;
# achtung: even though this is *after* the main loop, it actually executes sed reaplces *before* it;
# recursion is currently unsupported here, i feel like it may break things?
if [[ "$template" == *'{{#'* && "$3" != true ]]; then
local subtemplate=$(mktemp)
while read key; do
# below check prevents the loop loading itself as a template.
# this is possibly not enough to prevent all recursions, but
# i see it as a last-ditch measure. so it'll do here.
if [[ "$file" == "$2" ]]; then
echo 's'$'\02''\{\{\#'"$key"'\}\}'$'\02''I cowardly refuse to endlessly recurse\!'$'\02''g;' >> "$subtemplate"
elif [[ -f "$key" ]]; then
echo 's'$'\02''\{\{\#'"$key"'\}\}'$'\02'"$(tr -d $'\01'$'\02' < "$key" | tr $'\n' $'\01' | sed 's/\&/<2F>UwU<77>/g')"$'\02''g;' >> "$subtemplate"
_template_find_special_uri "$(cat "$key")"
fi
done <<< "$(grep -Poh '{{#.*?}}' <<< "$template" | sed 's/{{#//;s/}}$//')"
cat <<< $(cat "$subtemplate" "$tmp") > "$tmp"
rm "$subtemplate"
fi
_template_find_special_uri "$template"
_template_gen_special_uri >> "$tmp"
if [[ "$3" != true ]]; then # are we recursing? if [[ "$3" != true ]]; then # are we recursing?
cat "$tmp" | tr '\n' $'\01' | sed -E 's/'$'\02'';'$'\01''/'$'\02'';/g;s/'$'\02''g;'$'\01''/'$'\02''g;/g' > "${tmp}_" cat "$tmp" | tr '\n' $'\01' | sed -E 's/'$'\02'';'$'\01''/'$'\02'';/g;s/'$'\02''g;'$'\01''/'$'\02''g;/g' > "${tmp}_"
@ -101,38 +69,6 @@ function render() {
fi fi
} }
_template_uri_list=()
# internal function that finds all occurences of the special `{{-uri-N}}` tag.
# here to also make it run on subtemplates
#
# _template_find_special_uri(tpl_string)
_template_find_special_uri() {
local IFS=$'\n'
local line
if [[ "$1" == *'{{-uri'* ]]; then
while read line; do
_template_uri_list+=("${line//[^0-9]}")
done <<< "$(grep -Poh '{{-uri-[0-9]*}}' <<< "$1")"
fi
}
# internal function that takes the output from _template_find_special_uri and
# transforms it into sed exprs
#
# _template_gen_special_uri() -> stdout
_template_gen_special_uri() {
local IFS=$'\n'
local num
local uri
# {{-uri-<num>}}, where num is amount of slashed parts to include
sort <<< ${_template_uri_list[*]} | uniq | while read num; do
uri="$(grep -Poh '^(/.*?){'"$((num+1))"'}' <<< "${r[url_clean]}/")"
echo 's'$'\02''\{\{-uri-'"$num"'\}\}'$'\02'"$uri"$'\02''g;'
done
# for replacing plain {{-uri}} without a number
echo 's'$'\02''\{\{-uri\}\}'$'\02'"${r[url_clean]}"$'\02''g;'
}
# render_unsafe(array, template_file) # render_unsafe(array, template_file)
function render_unsafe() { function render_unsafe() {
local template="$(cat "$2")" local template="$(cat "$2")"
@ -141,14 +77,14 @@ function render_unsafe() {
for key in ${!ref[@]}; do for key in ${!ref[@]}; do
if [[ "$key" == "_"* ]]; then # iter mode if [[ "$key" == "_"* ]]; then # iter mode
# grep "start _test" -A99999 | grep "end _test" -B99999 # grep "start _test" -A99999 | grep "end _test" -B99999
local -n item_array=${ref["$key"]} local -n item_array=${ref[$key]}
local value local value
for ((_i = 0; _i < ${#item_array[@]}; _i++)); do for ((_i = 0; _i < ${#item_array[@]}; _i++)); do
value+="$(xxd -p <<< "${item_array[$_i]}" | tr -d '\n' | sed -E 's/../\\x&/g')" value+="$(xxd -p <<< "${item_array[$_i]}" | tr -d '\n' | sed -E 's/../\\x&/g')"
done done
echo 's/\{\{'"$key"'\}\}/'"$value"'/g' >> "$tmp" echo 's/\{\{'"$key"'\}\}/'"$value"'/g' >> "$tmp"
else else
local value="$(xxd -p <<< "${ref["$key"]}" | tr -d '\n' | sed -E 's/../\\x&/g')" local value="$(xxd -p <<< "${ref[$key]}" | tr -d '\n' | sed -E 's/../\\x&/g')"
echo 's/\{\{\.'"$key"'\}\}/'"$value"'/g' >> "$tmp" echo 's/\{\{\.'"$key"'\}\}/'"$value"'/g' >> "$tmp"
fi fi
done done
@ -172,12 +108,13 @@ function nested_declare() {
# nested_add(ref, array) # nested_add(ref, array)
function nested_add() { function nested_add() {
local nested_id=$(_nested_random) local nested_id=$(_nested_random)
declare -n nested_ref=$2
declare -g -A _$nested_id declare -g -A _$nested_id
local a # poor man's array copy
a="$(declare -p "$2")" for k in ${!nested_ref[@]}; do
# pain declare -g -A _$nested_id[$k]="${nested_ref[$k]}"
eval "${a/ $2=/ -g _$nested_id=}" done
local -n ref=$1 local -n ref=$1
ref+=("$nested_id") ref+=("$nested_id")
@ -186,5 +123,5 @@ function nested_add() {
# nested_get(ref, i) # nested_get(ref, i)
function nested_get() { function nested_get() {
local -n ref=$1 local -n ref=$1
declare -g -n res=_${ref["$2"]} declare -g -n res=_${ref[$2]}
} }

View file

@ -1,2 +0,0 @@
#!/usr/bin/env bash
HTTPSH_VERSION=0.97

View file

@ -1,35 +1,25 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# worker.sh - setup and control of workers # worker.sh - setup and control of workers
# worker_add(name, interval) # worker_add(name, interval)
function worker_add() { function worker_add() {
if [[ -x "${cfg[namespace]}/workers/$1/worker.sh" ]]; then if [[ -x "${cfg[namespace]}/workers/$1/worker.sh" ]]; then
echo "[WRKR] adding worker $1" echo "[WRKR] adding worker $1"
{ while true; do
shopt -s extglob source "${cfg[namespace]}/workers/$1/worker.sh"
x() { declare -p data;} # for notORM sleep $2
source config/master.sh if [[ $(cat "${cfg[namespace]}/workers/$1/control") == "die" ]]; then
source src/account.sh echo "" > ${cfg[namespace]}/workers/$1/control
source src/mail.sh while true; do
source src/mime.sh if [[ $(cat "${cfg[namespace]}/workers/$1/control") == "run" ]]; then
source src/misc.sh echo "" > "${cfg[namespace]}/workers/$1/control"
source src/notORM.sh break
source src/template.sh fi
while true; do sleep $2
source "${cfg[namespace]}/workers/$1/worker.sh" done
sleep $2 fi
if [[ $(cat "${cfg[namespace]}/workers/$1/control") == "die" ]]; then done &
echo "" > ${cfg[namespace]}/workers/$1/control
while true; do
if [[ $(cat "${cfg[namespace]}/workers/$1/control") == "run" ]]; then
echo "" > "${cfg[namespace]}/workers/$1/control"
break
fi
sleep $2
done
fi
done
} &
else else
echo "[WRKR] Broken config - workers/$1/worker.sh does not exist, or is not executable?" echo "[WRKR] Broken config - workers/$1/worker.sh does not exist, or is not executable?"
fi fi

View file

@ -1,17 +0,0 @@
#!/bin/bash
prepare() {
[[ ! -d app ]] && ./http.sh init
./http.sh >/dev/null &
}
tst() {
for i in {1..10}; do
if [[ "$(ss -tulnap | grep LISTEN | grep 1337)" ]]; then
return 0
fi
sleep 0.5
done
return 255
}

View file

@ -1,174 +0,0 @@
#!/bin/bash
server_output() {
prepare() {
cat <<"EOF" > app/webroot/meow.shs
#!/bin/bash
echo meow
EOF
}
tst() {
curl -s localhost:1337/meow.shs
}
match="meow"
}
server_get_param() {
prepare() {
cat <<"EOF" > app/webroot/meow.shs
#!/bin/bash
echo "${get_data[meow]}"
EOF
}
tst() {
curl -s "localhost:1337/meow.shs?meow=nyaa"
}
match="nyaa"
}
server_get_random() {
prepare() {
cat <<"EOF" > app/webroot/meow.shs
#!/bin/bash
echo "${get_data[meow]}"
EOF
}
tst() {
curl -s "localhost:1337/meow.shs?meow=nyaa"
}
match="nyaa"
}
server_post_param() {
prepare() {
cat <<"EOF" > app/webroot/meow.shs
#!/bin/bash
echo "${post_data[meow]}"
EOF
}
tst() {
curl -s "localhost:1337/meow.shs" -d 'meow=nyaa'
}
match="nyaa"
}
server_res_header() {
tst() {
curl -s -I localhost:1337
}
match_sub="HTTP.sh"
}
server_res_header_custom() {
prepare() {
cat <<"EOF" > app/webroot/meow.shs
#!/bin/bash
header "meow: a custom header!"
EOF
}
tst() {
curl -s -v localhost:1337/meow.shs 2>&1
}
match_sub="a custom header!"
}
server_req_header() {
prepare() {
cat <<"EOF" > app/webroot/meow.shs
#!/bin/bash
echo "${headers[meow]}"
EOF
}
tst() {
curl -s "localhost:1337/meow.shs" -H 'meow: nyaa'
}
match="nyaa"
}
server_req_header_case() {
tst() {
curl -s "localhost:1337/meow.shs" -H 'Meow: nyaa'
}
match="nyaa"
}
server_req_header_dup() {
tst() {
curl -s "localhost:1337/meow.shs" -H 'Meow: nyaa' -H 'mEow: asdf'
}
# TODO: maybe we should return 400 when we detect sth like this?
match="asdf"
}
server_req_header_invalid() {
tst() {
# we have to trick curl into sending an invalid header for us
curl -s "localhost:1337/meow.shs" -H $'a:\nasdf asdf asdf asdf' -H "meow: asdf"
}
match_not="asdf"
}
server_req_header_special_value() {
rand="$(cat /dev/urandom | cut -c 1-10 | head -n1 | sed -E 's/[\r\0]//')"
tst() {
# this needs some more polish, we sometimes confuse curl xD
curl -s "localhost:1337/meow.shs" -H "meow: $rand"
}
match="$rand"
}
server_req_header_special_name() {
rand="$(cat /dev/urandom | cut -c 1-10 | head -n1 | sed -E 's/[\r\0]//')"
prepare() {
cat <<EOF > app/webroot/meow.shs
#!/bin/bash
rand="\$(xxd -p -r <<< "$(echo "$rand" | xxd -p)")"
echo "\${headers["\${rand,,}"]}" # normalize to lowercase
EOF
}
tst() {
curl -s "localhost:1337/meow.shs" -H "$rand: nyaa"
}
cleanup() {
# *sigh* we need a better way to do this tbh
rm app/webroot/meow.shs
}
match="nyaa"
}
subtest_list=(
server_output
server_get_param
server_post_param
server_res_header
server_res_header_custom
server_req_header
server_req_header_case
server_req_header_dup
server_req_header_invalid
server_req_header_special_value
server_req_header_special_name
)

View file

@ -1,33 +0,0 @@
#!/bin/bash
tpl_basic() {
prepare() {
source src/misc.sh
source src/template.sh
}
tst() {
declare -A meow
meow[asdf]="$value"
render meow <(echo "value: {{.asdf}}")
}
value="A quick brown fox jumped over the lazy dog"
match="value: $value"
}
tpl_basic_specialchars() {
value="&#$%^&*() <-- look at me go"
match="value: $(html_encode "$value")"
}
tpl_basic_newline() {
value=$'\n'a$'\n'
match="value: $(html_encode "$value")"
}
subtest_list=(
tpl_basic
tpl_basic_specialchars
tpl_basic_newline
)

View file

@ -1,101 +0,0 @@
#!/bin/bash
misc_html_escape_basic() {
prepare() {
source src/misc.sh
}
tst() {
html_encode "$value"
}
value="meow"
match="meow"
}
misc_html_escape_special() {
value="<script>"
match_not="<"
}
misc_html_escape_apos() {
value="<img src='asdf'>"
match_not="'"
}
misc_html_escape_quot() {
value='<img src="meow">'
match_not='"'
}
# ---
misc_url_encode() {
tst() {
url_encode "$value"
}
value="nyaa"
match=""
}
misc_url_encode_special01() {
value="%%"
match="%25%25"
}
misc_url_encode_special02() {
value="&"
match_not="&"
}
misc_url_encode_special03() {
value="?asdf=meow&nyaa="
match_not="?"
}
misc_url_encode_url() {
value="https://example.org/?nyaa=meow"
# i promise we'll get a better impl of this at some point xD
match="%68%74%74%70%73%3a%2f%2f%65%78%61%6d%70%6c%65%2e%6f%72%67%2f%3f%6e%79%61%61%3d%6d%65%6f%77"
}
# ---
misc_url_decode_encode() {
tst() {
url_decode "$(url_encode "$value")"
}
value="https://example.org/?nyaa=meow&as=df"
match="$value"
}
# ---
misc_url_decode01() {
tst() {
url_decode "$value"
}
value='%25'
match='%'
}
misc_url_decode02() {
value='%2525'
match='%25'
}
subtest_list=(
misc_html_escape_basic
misc_html_escape_special
misc_html_escape_apos
misc_html_escape_quot
misc_url_encode
misc_url_encode_special01
misc_url_encode_special02
misc_url_encode_special03
misc_url_encode_url
misc_url_decode_encode
misc_url_decode01
misc_url_decode02
)

View file

@ -1,100 +0,0 @@
#!/bin/bash
store="storage/notORM-test.dat"
notORM_add_get() {
prepare() {
source src/notORM.sh
rm "$store"
a=("$value" 1 "$value_")
data_add "$store" a
for i in {2..16}; do
a[1]=$i
data_add "$store" a
done
}
tst() {
data_get "$store" { } || return $?
echo "${res[0]}"
}
value="A quick brown fox jumped over the lazy dog"
value_=$'meow?\n:3c'
match="$value"
}
notORM_get_multiline() {
tst() {
data_get "$store" { }
echo "${res[2]}"
}
match="$value_"
}
notORM_get_filter() {
tst() {
data_get "$store" { "2" 1 }
return $?
}
}
notORM_get_oldsyntax() {
tst() {
data_get "$store" 2 1 meow || return $?
[[ "${meow[0]}" == "$value" ]] && return 0 || return 1
}
}
notORM_yeet_oldsyntax() {
tst() {
data_yeet "$store" 1 1
data_get "$store" 1 1
if [[ $? == 2 ]]; then
return 0
fi
return 1
}
}
notORM_yeet() {
tst() {
data_yeet "$store" { 2 1 }
data_get "$store" { 2 1 }
if [[ $? == 2 ]]; then
return 0
fi
return 1
}
}
notORM_yeet_multiple_filters() {
tst() {
data_yeet "$store" { 3 1 } { "$value" }
data_get "$store" { 3 1 }
if [[ $? == 2 ]]; then
return 0
fi
return 1
}
}
notORM_replace_oldsyntax() {
tst() {
data_get "$store" { } out
out[2]='meow!'
data_replace "$store" 4 out 1 || return $?
data_get "$store" 4 || return $?
[[ "${res[@]}" == "${out[@]}" ]] && return 0 || return 1
}
}
subtest_list=(
notORM_add_get
notORM_get_multiline
notORM_get_filter
notORM_get_oldsyntax
notORM_yeet_oldsyntax
notORM_yeet
notORM_yeet_multiple_filters
notORM_replace_oldsyntax
)

154
tst.sh
View file

@ -1,154 +0,0 @@
#!/usr/bin/env bash
_defaults() {
match=""
match_begin=""
match_end=""
match_sub=""
tst() {
echo "dummy test! please set me up properly" > /dev/stderr
exit 1
}
prepare() {
:
}
cleanup () {
:
}
}
_defaults
on_error() {
on_error_default
}
on_success() {
on_success_default
}
on_success_default() {
echo "OK: $test_name"
(( ok_count++ ))
return 0
}
on_error_default() {
echo "FAIL: $test_name"
echo "(res: $res)"
(( fail_count++ ))
return 0
}
on_fatal() {
echo "FATAL: $test_name"
_final_cleanup
exit 1
}
IFS=$'\n'
for i in "$@"; do
if [[ ! -f "$i" ]]; then
echo -e "$0 - basic test framework\n\nusage: $0 <test> [test] [...]"
exit 1
fi
done
unset IFS
ok_count=0
fail_count=0
_a() {
[[ "$res_code" == 255 ]] && on_fatal
# Q: why not `[[ ... ]] && a || b`?
# A: simple; if `a` returns 1, `b` will get called erroneously.
# normally one wouldn't care, but those functions are meant to
# be overriden. I don't want to fund anyone a lot of frustration,
# so splitting the ifs is a saner option here :)
if [[ "$match" ]]; then
if [[ "$res" == "$match" ]]; then
on_success
else
on_error
fi
elif [[ "$match_sub" ]]; then
if [[ "$res" == *"$match_sub"* ]]; then
on_success
else
on_error
fi
elif [[ "$match_begin" ]]; then
if [[ "$res" == "$match_begin"* ]]; then
on_success
else
on_error
fi
elif [[ "$match_end" ]]; then
if [[ "$res" == *"$match_end" ]]; then
on_success
else
on_error
fi
elif [[ "$match_not" ]]; then
if [[ "$res" == *"$match_not"* ]]; then
on_error
else
on_success
fi
else
if [[ "$res_code" == 0 ]]; then
on_success
else
on_error
fi
fi
unset match match_sub match_begin match_end match_not
prepare() { :; }
}
_final_cleanup() {
# handle spawned processes
for i in $(jobs -p); do
pkill -P $i
done
sleep 2
for i in $(jobs -p); do
pkill -9 -P $i
done
pkill -P $$
}
for j in "$@"; do
source "$j"
if [[ "${#subtest_list[@]}" == 0 ]]; then
test_name="$j"
prepare
res="$(tst)"
res_code=$?
cleanup
_a
else
echo "--- $j ---"
for i in "${subtest_list[@]}"; do
test_name="$i"
"$i"
prepare
res="$(tst)"
res_code=$?
cleanup
_a
done
fi
_defaults
done
_final_cleanup
echo -e "\n\nTesting done!
OK: $ok_count
FAIL: $fail_count"