reconnect moved files to git repo
This commit is contained in:
8
venv/lib/python3.11/site-packages/gunicorn/__init__.py
Normal file
8
venv/lib/python3.11/site-packages/gunicorn/__init__.py
Normal file
@ -0,0 +1,8 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
version_info = (23, 0, 0)
|
||||
__version__ = ".".join([str(v) for v in version_info])
|
||||
SERVER = "gunicorn"
|
||||
SERVER_SOFTWARE = "%s/%s" % (SERVER, __version__)
|
||||
10
venv/lib/python3.11/site-packages/gunicorn/__main__.py
Normal file
10
venv/lib/python3.11/site-packages/gunicorn/__main__.py
Normal file
@ -0,0 +1,10 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
from gunicorn.app.wsgiapp import run
|
||||
|
||||
if __name__ == "__main__":
|
||||
# see config.py - argparse defaults to basename(argv[0]) == "__main__.py"
|
||||
# todo: let runpy.run_module take care of argv[0] rewriting
|
||||
run(prog="gunicorn")
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,3 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
235
venv/lib/python3.11/site-packages/gunicorn/app/base.py
Normal file
235
venv/lib/python3.11/site-packages/gunicorn/app/base.py
Normal file
@ -0,0 +1,235 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
import importlib.util
|
||||
import importlib.machinery
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from gunicorn import util
|
||||
from gunicorn.arbiter import Arbiter
|
||||
from gunicorn.config import Config, get_default_config_file
|
||||
from gunicorn import debug
|
||||
|
||||
|
||||
class BaseApplication:
|
||||
"""
|
||||
An application interface for configuring and loading
|
||||
the various necessities for any given web framework.
|
||||
"""
|
||||
def __init__(self, usage=None, prog=None):
|
||||
self.usage = usage
|
||||
self.cfg = None
|
||||
self.callable = None
|
||||
self.prog = prog
|
||||
self.logger = None
|
||||
self.do_load_config()
|
||||
|
||||
def do_load_config(self):
|
||||
"""
|
||||
Loads the configuration
|
||||
"""
|
||||
try:
|
||||
self.load_default_config()
|
||||
self.load_config()
|
||||
except Exception as e:
|
||||
print("\nError: %s" % str(e), file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
sys.exit(1)
|
||||
|
||||
def load_default_config(self):
|
||||
# init configuration
|
||||
self.cfg = Config(self.usage, prog=self.prog)
|
||||
|
||||
def init(self, parser, opts, args):
|
||||
raise NotImplementedError
|
||||
|
||||
def load(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def load_config(self):
|
||||
"""
|
||||
This method is used to load the configuration from one or several input(s).
|
||||
Custom Command line, configuration file.
|
||||
You have to override this method in your class.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def reload(self):
|
||||
self.do_load_config()
|
||||
if self.cfg.spew:
|
||||
debug.spew()
|
||||
|
||||
def wsgi(self):
|
||||
if self.callable is None:
|
||||
self.callable = self.load()
|
||||
return self.callable
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
Arbiter(self).run()
|
||||
except RuntimeError as e:
|
||||
print("\nError: %s\n" % e, file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class Application(BaseApplication):
|
||||
|
||||
# 'init' and 'load' methods are implemented by WSGIApplication.
|
||||
# pylint: disable=abstract-method
|
||||
|
||||
def chdir(self):
|
||||
# chdir to the configured path before loading,
|
||||
# default is the current dir
|
||||
os.chdir(self.cfg.chdir)
|
||||
|
||||
# add the path to sys.path
|
||||
if self.cfg.chdir not in sys.path:
|
||||
sys.path.insert(0, self.cfg.chdir)
|
||||
|
||||
def get_config_from_filename(self, filename):
|
||||
|
||||
if not os.path.exists(filename):
|
||||
raise RuntimeError("%r doesn't exist" % filename)
|
||||
|
||||
ext = os.path.splitext(filename)[1]
|
||||
|
||||
try:
|
||||
module_name = '__config__'
|
||||
if ext in [".py", ".pyc"]:
|
||||
spec = importlib.util.spec_from_file_location(module_name, filename)
|
||||
else:
|
||||
msg = "configuration file should have a valid Python extension.\n"
|
||||
util.warn(msg)
|
||||
loader_ = importlib.machinery.SourceFileLoader(module_name, filename)
|
||||
spec = importlib.util.spec_from_file_location(module_name, filename, loader=loader_)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
sys.modules[module_name] = mod
|
||||
spec.loader.exec_module(mod)
|
||||
except Exception:
|
||||
print("Failed to read config file: %s" % filename, file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
sys.stderr.flush()
|
||||
sys.exit(1)
|
||||
|
||||
return vars(mod)
|
||||
|
||||
def get_config_from_module_name(self, module_name):
|
||||
return vars(importlib.import_module(module_name))
|
||||
|
||||
def load_config_from_module_name_or_filename(self, location):
|
||||
"""
|
||||
Loads the configuration file: the file is a python file, otherwise raise an RuntimeError
|
||||
Exception or stop the process if the configuration file contains a syntax error.
|
||||
"""
|
||||
|
||||
if location.startswith("python:"):
|
||||
module_name = location[len("python:"):]
|
||||
cfg = self.get_config_from_module_name(module_name)
|
||||
else:
|
||||
if location.startswith("file:"):
|
||||
filename = location[len("file:"):]
|
||||
else:
|
||||
filename = location
|
||||
cfg = self.get_config_from_filename(filename)
|
||||
|
||||
for k, v in cfg.items():
|
||||
# Ignore unknown names
|
||||
if k not in self.cfg.settings:
|
||||
continue
|
||||
try:
|
||||
self.cfg.set(k.lower(), v)
|
||||
except Exception:
|
||||
print("Invalid value for %s: %s\n" % (k, v), file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
raise
|
||||
|
||||
return cfg
|
||||
|
||||
def load_config_from_file(self, filename):
|
||||
return self.load_config_from_module_name_or_filename(location=filename)
|
||||
|
||||
def load_config(self):
|
||||
# parse console args
|
||||
parser = self.cfg.parser()
|
||||
args = parser.parse_args()
|
||||
|
||||
# optional settings from apps
|
||||
cfg = self.init(parser, args, args.args)
|
||||
|
||||
# set up import paths and follow symlinks
|
||||
self.chdir()
|
||||
|
||||
# Load up the any app specific configuration
|
||||
if cfg:
|
||||
for k, v in cfg.items():
|
||||
self.cfg.set(k.lower(), v)
|
||||
|
||||
env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())
|
||||
|
||||
if args.config:
|
||||
self.load_config_from_file(args.config)
|
||||
elif env_args.config:
|
||||
self.load_config_from_file(env_args.config)
|
||||
else:
|
||||
default_config = get_default_config_file()
|
||||
if default_config is not None:
|
||||
self.load_config_from_file(default_config)
|
||||
|
||||
# Load up environment configuration
|
||||
for k, v in vars(env_args).items():
|
||||
if v is None:
|
||||
continue
|
||||
if k == "args":
|
||||
continue
|
||||
self.cfg.set(k.lower(), v)
|
||||
|
||||
# Lastly, update the configuration with any command line settings.
|
||||
for k, v in vars(args).items():
|
||||
if v is None:
|
||||
continue
|
||||
if k == "args":
|
||||
continue
|
||||
self.cfg.set(k.lower(), v)
|
||||
|
||||
# current directory might be changed by the config now
|
||||
# set up import paths and follow symlinks
|
||||
self.chdir()
|
||||
|
||||
def run(self):
|
||||
if self.cfg.print_config:
|
||||
print(self.cfg)
|
||||
|
||||
if self.cfg.print_config or self.cfg.check_config:
|
||||
try:
|
||||
self.load()
|
||||
except Exception:
|
||||
msg = "\nError while loading the application:\n"
|
||||
print(msg, file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
sys.stderr.flush()
|
||||
sys.exit(1)
|
||||
sys.exit(0)
|
||||
|
||||
if self.cfg.spew:
|
||||
debug.spew()
|
||||
|
||||
if self.cfg.daemon:
|
||||
if os.environ.get('NOTIFY_SOCKET'):
|
||||
msg = "Warning: you shouldn't specify `daemon = True`" \
|
||||
" when launching by systemd with `Type = notify`"
|
||||
print(msg, file=sys.stderr, flush=True)
|
||||
|
||||
util.daemonize(self.cfg.enable_stdio_inheritance)
|
||||
|
||||
# set python paths
|
||||
if self.cfg.pythonpath:
|
||||
paths = self.cfg.pythonpath.split(",")
|
||||
for path in paths:
|
||||
pythonpath = os.path.abspath(path)
|
||||
if pythonpath not in sys.path:
|
||||
sys.path.insert(0, pythonpath)
|
||||
|
||||
super().run()
|
||||
74
venv/lib/python3.11/site-packages/gunicorn/app/pasterapp.py
Normal file
74
venv/lib/python3.11/site-packages/gunicorn/app/pasterapp.py
Normal file
@ -0,0 +1,74 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import configparser
|
||||
import os
|
||||
|
||||
from paste.deploy import loadapp
|
||||
|
||||
from gunicorn.app.wsgiapp import WSGIApplication
|
||||
from gunicorn.config import get_default_config_file
|
||||
|
||||
|
||||
def get_wsgi_app(config_uri, name=None, defaults=None):
|
||||
if ':' not in config_uri:
|
||||
config_uri = "config:%s" % config_uri
|
||||
|
||||
return loadapp(
|
||||
config_uri,
|
||||
name=name,
|
||||
relative_to=os.getcwd(),
|
||||
global_conf=defaults,
|
||||
)
|
||||
|
||||
|
||||
def has_logging_config(config_file):
|
||||
parser = configparser.ConfigParser()
|
||||
parser.read([config_file])
|
||||
return parser.has_section('loggers')
|
||||
|
||||
|
||||
def serve(app, global_conf, **local_conf):
|
||||
"""\
|
||||
A Paste Deployment server runner.
|
||||
|
||||
Example configuration:
|
||||
|
||||
[server:main]
|
||||
use = egg:gunicorn#main
|
||||
host = 127.0.0.1
|
||||
port = 5000
|
||||
"""
|
||||
config_file = global_conf['__file__']
|
||||
gunicorn_config_file = local_conf.pop('config', None)
|
||||
|
||||
host = local_conf.pop('host', '')
|
||||
port = local_conf.pop('port', '')
|
||||
if host and port:
|
||||
local_conf['bind'] = '%s:%s' % (host, port)
|
||||
elif host:
|
||||
local_conf['bind'] = host.split(',')
|
||||
|
||||
class PasterServerApplication(WSGIApplication):
|
||||
def load_config(self):
|
||||
self.cfg.set("default_proc_name", config_file)
|
||||
|
||||
if has_logging_config(config_file):
|
||||
self.cfg.set("logconfig", config_file)
|
||||
|
||||
if gunicorn_config_file:
|
||||
self.load_config_from_file(gunicorn_config_file)
|
||||
else:
|
||||
default_gunicorn_config_file = get_default_config_file()
|
||||
if default_gunicorn_config_file is not None:
|
||||
self.load_config_from_file(default_gunicorn_config_file)
|
||||
|
||||
for k, v in local_conf.items():
|
||||
if v is not None:
|
||||
self.cfg.set(k.lower(), v)
|
||||
|
||||
def load(self):
|
||||
return app
|
||||
|
||||
PasterServerApplication().run()
|
||||
70
venv/lib/python3.11/site-packages/gunicorn/app/wsgiapp.py
Normal file
70
venv/lib/python3.11/site-packages/gunicorn/app/wsgiapp.py
Normal file
@ -0,0 +1,70 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import os
|
||||
|
||||
from gunicorn.errors import ConfigError
|
||||
from gunicorn.app.base import Application
|
||||
from gunicorn import util
|
||||
|
||||
|
||||
class WSGIApplication(Application):
|
||||
def init(self, parser, opts, args):
|
||||
self.app_uri = None
|
||||
|
||||
if opts.paste:
|
||||
from .pasterapp import has_logging_config
|
||||
|
||||
config_uri = os.path.abspath(opts.paste)
|
||||
config_file = config_uri.split('#')[0]
|
||||
|
||||
if not os.path.exists(config_file):
|
||||
raise ConfigError("%r not found" % config_file)
|
||||
|
||||
self.cfg.set("default_proc_name", config_file)
|
||||
self.app_uri = config_uri
|
||||
|
||||
if has_logging_config(config_file):
|
||||
self.cfg.set("logconfig", config_file)
|
||||
|
||||
return
|
||||
|
||||
if len(args) > 0:
|
||||
self.cfg.set("default_proc_name", args[0])
|
||||
self.app_uri = args[0]
|
||||
|
||||
def load_config(self):
|
||||
super().load_config()
|
||||
|
||||
if self.app_uri is None:
|
||||
if self.cfg.wsgi_app is not None:
|
||||
self.app_uri = self.cfg.wsgi_app
|
||||
else:
|
||||
raise ConfigError("No application module specified.")
|
||||
|
||||
def load_wsgiapp(self):
|
||||
return util.import_app(self.app_uri)
|
||||
|
||||
def load_pasteapp(self):
|
||||
from .pasterapp import get_wsgi_app
|
||||
return get_wsgi_app(self.app_uri, defaults=self.cfg.paste_global_conf)
|
||||
|
||||
def load(self):
|
||||
if self.cfg.paste is not None:
|
||||
return self.load_pasteapp()
|
||||
else:
|
||||
return self.load_wsgiapp()
|
||||
|
||||
|
||||
def run(prog=None):
|
||||
"""\
|
||||
The ``gunicorn`` command line runner for launching Gunicorn with
|
||||
generic WSGI applications.
|
||||
"""
|
||||
from gunicorn.app.wsgiapp import WSGIApplication
|
||||
WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]", prog=prog).run()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run()
|
||||
671
venv/lib/python3.11/site-packages/gunicorn/arbiter.py
Normal file
671
venv/lib/python3.11/site-packages/gunicorn/arbiter.py
Normal file
@ -0,0 +1,671 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
import errno
|
||||
import os
|
||||
import random
|
||||
import select
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from gunicorn.errors import HaltServer, AppImportError
|
||||
from gunicorn.pidfile import Pidfile
|
||||
from gunicorn import sock, systemd, util
|
||||
|
||||
from gunicorn import __version__, SERVER_SOFTWARE
|
||||
|
||||
|
||||
class Arbiter:
|
||||
"""
|
||||
Arbiter maintain the workers processes alive. It launches or
|
||||
kills them if needed. It also manages application reloading
|
||||
via SIGHUP/USR2.
|
||||
"""
|
||||
|
||||
# A flag indicating if a worker failed to
|
||||
# to boot. If a worker process exist with
|
||||
# this error code, the arbiter will terminate.
|
||||
WORKER_BOOT_ERROR = 3
|
||||
|
||||
# A flag indicating if an application failed to be loaded
|
||||
APP_LOAD_ERROR = 4
|
||||
|
||||
START_CTX = {}
|
||||
|
||||
LISTENERS = []
|
||||
WORKERS = {}
|
||||
PIPE = []
|
||||
|
||||
# I love dynamic languages
|
||||
SIG_QUEUE = []
|
||||
SIGNALS = [getattr(signal, "SIG%s" % x)
|
||||
for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()]
|
||||
SIG_NAMES = dict(
|
||||
(getattr(signal, name), name[3:].lower()) for name in dir(signal)
|
||||
if name[:3] == "SIG" and name[3] != "_"
|
||||
)
|
||||
|
||||
def __init__(self, app):
|
||||
os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE
|
||||
|
||||
self._num_workers = None
|
||||
self._last_logged_active_worker_count = None
|
||||
self.log = None
|
||||
|
||||
self.setup(app)
|
||||
|
||||
self.pidfile = None
|
||||
self.systemd = False
|
||||
self.worker_age = 0
|
||||
self.reexec_pid = 0
|
||||
self.master_pid = 0
|
||||
self.master_name = "Master"
|
||||
|
||||
cwd = util.getcwd()
|
||||
|
||||
args = sys.argv[:]
|
||||
args.insert(0, sys.executable)
|
||||
|
||||
# init start context
|
||||
self.START_CTX = {
|
||||
"args": args,
|
||||
"cwd": cwd,
|
||||
0: sys.executable
|
||||
}
|
||||
|
||||
def _get_num_workers(self):
|
||||
return self._num_workers
|
||||
|
||||
def _set_num_workers(self, value):
|
||||
old_value = self._num_workers
|
||||
self._num_workers = value
|
||||
self.cfg.nworkers_changed(self, value, old_value)
|
||||
num_workers = property(_get_num_workers, _set_num_workers)
|
||||
|
||||
def setup(self, app):
|
||||
self.app = app
|
||||
self.cfg = app.cfg
|
||||
|
||||
if self.log is None:
|
||||
self.log = self.cfg.logger_class(app.cfg)
|
||||
|
||||
# reopen files
|
||||
if 'GUNICORN_PID' in os.environ:
|
||||
self.log.reopen_files()
|
||||
|
||||
self.worker_class = self.cfg.worker_class
|
||||
self.address = self.cfg.address
|
||||
self.num_workers = self.cfg.workers
|
||||
self.timeout = self.cfg.timeout
|
||||
self.proc_name = self.cfg.proc_name
|
||||
|
||||
self.log.debug('Current configuration:\n{0}'.format(
|
||||
'\n'.join(
|
||||
' {0}: {1}'.format(config, value.value)
|
||||
for config, value
|
||||
in sorted(self.cfg.settings.items(),
|
||||
key=lambda setting: setting[1]))))
|
||||
|
||||
# set environment' variables
|
||||
if self.cfg.env:
|
||||
for k, v in self.cfg.env.items():
|
||||
os.environ[k] = v
|
||||
|
||||
if self.cfg.preload_app:
|
||||
self.app.wsgi()
|
||||
|
||||
def start(self):
|
||||
"""\
|
||||
Initialize the arbiter. Start listening and set pidfile if needed.
|
||||
"""
|
||||
self.log.info("Starting gunicorn %s", __version__)
|
||||
|
||||
if 'GUNICORN_PID' in os.environ:
|
||||
self.master_pid = int(os.environ.get('GUNICORN_PID'))
|
||||
self.proc_name = self.proc_name + ".2"
|
||||
self.master_name = "Master.2"
|
||||
|
||||
self.pid = os.getpid()
|
||||
if self.cfg.pidfile is not None:
|
||||
pidname = self.cfg.pidfile
|
||||
if self.master_pid != 0:
|
||||
pidname += ".2"
|
||||
self.pidfile = Pidfile(pidname)
|
||||
self.pidfile.create(self.pid)
|
||||
self.cfg.on_starting(self)
|
||||
|
||||
self.init_signals()
|
||||
|
||||
if not self.LISTENERS:
|
||||
fds = None
|
||||
listen_fds = systemd.listen_fds()
|
||||
if listen_fds:
|
||||
self.systemd = True
|
||||
fds = range(systemd.SD_LISTEN_FDS_START,
|
||||
systemd.SD_LISTEN_FDS_START + listen_fds)
|
||||
|
||||
elif self.master_pid:
|
||||
fds = []
|
||||
for fd in os.environ.pop('GUNICORN_FD').split(','):
|
||||
fds.append(int(fd))
|
||||
|
||||
self.LISTENERS = sock.create_sockets(self.cfg, self.log, fds)
|
||||
|
||||
listeners_str = ",".join([str(lnr) for lnr in self.LISTENERS])
|
||||
self.log.debug("Arbiter booted")
|
||||
self.log.info("Listening at: %s (%s)", listeners_str, self.pid)
|
||||
self.log.info("Using worker: %s", self.cfg.worker_class_str)
|
||||
systemd.sd_notify("READY=1\nSTATUS=Gunicorn arbiter booted", self.log)
|
||||
|
||||
# check worker class requirements
|
||||
if hasattr(self.worker_class, "check_config"):
|
||||
self.worker_class.check_config(self.cfg, self.log)
|
||||
|
||||
self.cfg.when_ready(self)
|
||||
|
||||
def init_signals(self):
|
||||
"""\
|
||||
Initialize master signal handling. Most of the signals
|
||||
are queued. Child signals only wake up the master.
|
||||
"""
|
||||
# close old PIPE
|
||||
for p in self.PIPE:
|
||||
os.close(p)
|
||||
|
||||
# initialize the pipe
|
||||
self.PIPE = pair = os.pipe()
|
||||
for p in pair:
|
||||
util.set_non_blocking(p)
|
||||
util.close_on_exec(p)
|
||||
|
||||
self.log.close_on_exec()
|
||||
|
||||
# initialize all signals
|
||||
for s in self.SIGNALS:
|
||||
signal.signal(s, self.signal)
|
||||
signal.signal(signal.SIGCHLD, self.handle_chld)
|
||||
|
||||
def signal(self, sig, frame):
|
||||
if len(self.SIG_QUEUE) < 5:
|
||||
self.SIG_QUEUE.append(sig)
|
||||
self.wakeup()
|
||||
|
||||
def run(self):
|
||||
"Main master loop."
|
||||
self.start()
|
||||
util._setproctitle("master [%s]" % self.proc_name)
|
||||
|
||||
try:
|
||||
self.manage_workers()
|
||||
|
||||
while True:
|
||||
self.maybe_promote_master()
|
||||
|
||||
sig = self.SIG_QUEUE.pop(0) if self.SIG_QUEUE else None
|
||||
if sig is None:
|
||||
self.sleep()
|
||||
self.murder_workers()
|
||||
self.manage_workers()
|
||||
continue
|
||||
|
||||
if sig not in self.SIG_NAMES:
|
||||
self.log.info("Ignoring unknown signal: %s", sig)
|
||||
continue
|
||||
|
||||
signame = self.SIG_NAMES.get(sig)
|
||||
handler = getattr(self, "handle_%s" % signame, None)
|
||||
if not handler:
|
||||
self.log.error("Unhandled signal: %s", signame)
|
||||
continue
|
||||
self.log.info("Handling signal: %s", signame)
|
||||
handler()
|
||||
self.wakeup()
|
||||
except (StopIteration, KeyboardInterrupt):
|
||||
self.halt()
|
||||
except HaltServer as inst:
|
||||
self.halt(reason=inst.reason, exit_status=inst.exit_status)
|
||||
except SystemExit:
|
||||
raise
|
||||
except Exception:
|
||||
self.log.error("Unhandled exception in main loop",
|
||||
exc_info=True)
|
||||
self.stop(False)
|
||||
if self.pidfile is not None:
|
||||
self.pidfile.unlink()
|
||||
sys.exit(-1)
|
||||
|
||||
def handle_chld(self, sig, frame):
|
||||
"SIGCHLD handling"
|
||||
self.reap_workers()
|
||||
self.wakeup()
|
||||
|
||||
def handle_hup(self):
|
||||
"""\
|
||||
HUP handling.
|
||||
- Reload configuration
|
||||
- Start the new worker processes with a new configuration
|
||||
- Gracefully shutdown the old worker processes
|
||||
"""
|
||||
self.log.info("Hang up: %s", self.master_name)
|
||||
self.reload()
|
||||
|
||||
def handle_term(self):
|
||||
"SIGTERM handling"
|
||||
raise StopIteration
|
||||
|
||||
def handle_int(self):
|
||||
"SIGINT handling"
|
||||
self.stop(False)
|
||||
raise StopIteration
|
||||
|
||||
def handle_quit(self):
|
||||
"SIGQUIT handling"
|
||||
self.stop(False)
|
||||
raise StopIteration
|
||||
|
||||
def handle_ttin(self):
|
||||
"""\
|
||||
SIGTTIN handling.
|
||||
Increases the number of workers by one.
|
||||
"""
|
||||
self.num_workers += 1
|
||||
self.manage_workers()
|
||||
|
||||
def handle_ttou(self):
|
||||
"""\
|
||||
SIGTTOU handling.
|
||||
Decreases the number of workers by one.
|
||||
"""
|
||||
if self.num_workers <= 1:
|
||||
return
|
||||
self.num_workers -= 1
|
||||
self.manage_workers()
|
||||
|
||||
def handle_usr1(self):
|
||||
"""\
|
||||
SIGUSR1 handling.
|
||||
Kill all workers by sending them a SIGUSR1
|
||||
"""
|
||||
self.log.reopen_files()
|
||||
self.kill_workers(signal.SIGUSR1)
|
||||
|
||||
def handle_usr2(self):
|
||||
"""\
|
||||
SIGUSR2 handling.
|
||||
Creates a new arbiter/worker set as a fork of the current
|
||||
arbiter without affecting old workers. Use this to do live
|
||||
deployment with the ability to backout a change.
|
||||
"""
|
||||
self.reexec()
|
||||
|
||||
def handle_winch(self):
|
||||
"""SIGWINCH handling"""
|
||||
if self.cfg.daemon:
|
||||
self.log.info("graceful stop of workers")
|
||||
self.num_workers = 0
|
||||
self.kill_workers(signal.SIGTERM)
|
||||
else:
|
||||
self.log.debug("SIGWINCH ignored. Not daemonized")
|
||||
|
||||
def maybe_promote_master(self):
|
||||
if self.master_pid == 0:
|
||||
return
|
||||
|
||||
if self.master_pid != os.getppid():
|
||||
self.log.info("Master has been promoted.")
|
||||
# reset master infos
|
||||
self.master_name = "Master"
|
||||
self.master_pid = 0
|
||||
self.proc_name = self.cfg.proc_name
|
||||
del os.environ['GUNICORN_PID']
|
||||
# rename the pidfile
|
||||
if self.pidfile is not None:
|
||||
self.pidfile.rename(self.cfg.pidfile)
|
||||
# reset proctitle
|
||||
util._setproctitle("master [%s]" % self.proc_name)
|
||||
|
||||
def wakeup(self):
|
||||
"""\
|
||||
Wake up the arbiter by writing to the PIPE
|
||||
"""
|
||||
try:
|
||||
os.write(self.PIPE[1], b'.')
|
||||
except OSError as e:
|
||||
if e.errno not in [errno.EAGAIN, errno.EINTR]:
|
||||
raise
|
||||
|
||||
def halt(self, reason=None, exit_status=0):
|
||||
""" halt arbiter """
|
||||
self.stop()
|
||||
|
||||
log_func = self.log.info if exit_status == 0 else self.log.error
|
||||
log_func("Shutting down: %s", self.master_name)
|
||||
if reason is not None:
|
||||
log_func("Reason: %s", reason)
|
||||
|
||||
if self.pidfile is not None:
|
||||
self.pidfile.unlink()
|
||||
self.cfg.on_exit(self)
|
||||
sys.exit(exit_status)
|
||||
|
||||
def sleep(self):
|
||||
"""\
|
||||
Sleep until PIPE is readable or we timeout.
|
||||
A readable PIPE means a signal occurred.
|
||||
"""
|
||||
try:
|
||||
ready = select.select([self.PIPE[0]], [], [], 1.0)
|
||||
if not ready[0]:
|
||||
return
|
||||
while os.read(self.PIPE[0], 1):
|
||||
pass
|
||||
except OSError as e:
|
||||
# TODO: select.error is a subclass of OSError since Python 3.3.
|
||||
error_number = getattr(e, 'errno', e.args[0])
|
||||
if error_number not in [errno.EAGAIN, errno.EINTR]:
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
sys.exit()
|
||||
|
||||
def stop(self, graceful=True):
|
||||
"""\
|
||||
Stop workers
|
||||
|
||||
:attr graceful: boolean, If True (the default) workers will be
|
||||
killed gracefully (ie. trying to wait for the current connection)
|
||||
"""
|
||||
unlink = (
|
||||
self.reexec_pid == self.master_pid == 0
|
||||
and not self.systemd
|
||||
and not self.cfg.reuse_port
|
||||
)
|
||||
sock.close_sockets(self.LISTENERS, unlink)
|
||||
|
||||
self.LISTENERS = []
|
||||
sig = signal.SIGTERM
|
||||
if not graceful:
|
||||
sig = signal.SIGQUIT
|
||||
limit = time.time() + self.cfg.graceful_timeout
|
||||
# instruct the workers to exit
|
||||
self.kill_workers(sig)
|
||||
# wait until the graceful timeout
|
||||
while self.WORKERS and time.time() < limit:
|
||||
time.sleep(0.1)
|
||||
|
||||
self.kill_workers(signal.SIGKILL)
|
||||
|
||||
def reexec(self):
|
||||
"""\
|
||||
Relaunch the master and workers.
|
||||
"""
|
||||
if self.reexec_pid != 0:
|
||||
self.log.warning("USR2 signal ignored. Child exists.")
|
||||
return
|
||||
|
||||
if self.master_pid != 0:
|
||||
self.log.warning("USR2 signal ignored. Parent exists.")
|
||||
return
|
||||
|
||||
master_pid = os.getpid()
|
||||
self.reexec_pid = os.fork()
|
||||
if self.reexec_pid != 0:
|
||||
return
|
||||
|
||||
self.cfg.pre_exec(self)
|
||||
|
||||
environ = self.cfg.env_orig.copy()
|
||||
environ['GUNICORN_PID'] = str(master_pid)
|
||||
|
||||
if self.systemd:
|
||||
environ['LISTEN_PID'] = str(os.getpid())
|
||||
environ['LISTEN_FDS'] = str(len(self.LISTENERS))
|
||||
else:
|
||||
environ['GUNICORN_FD'] = ','.join(
|
||||
str(lnr.fileno()) for lnr in self.LISTENERS)
|
||||
|
||||
os.chdir(self.START_CTX['cwd'])
|
||||
|
||||
# exec the process using the original environment
|
||||
os.execvpe(self.START_CTX[0], self.START_CTX['args'], environ)
|
||||
|
||||
def reload(self):
|
||||
old_address = self.cfg.address
|
||||
|
||||
# reset old environment
|
||||
for k in self.cfg.env:
|
||||
if k in self.cfg.env_orig:
|
||||
# reset the key to the value it had before
|
||||
# we launched gunicorn
|
||||
os.environ[k] = self.cfg.env_orig[k]
|
||||
else:
|
||||
# delete the value set by gunicorn
|
||||
try:
|
||||
del os.environ[k]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# reload conf
|
||||
self.app.reload()
|
||||
self.setup(self.app)
|
||||
|
||||
# reopen log files
|
||||
self.log.reopen_files()
|
||||
|
||||
# do we need to change listener ?
|
||||
if old_address != self.cfg.address:
|
||||
# close all listeners
|
||||
for lnr in self.LISTENERS:
|
||||
lnr.close()
|
||||
# init new listeners
|
||||
self.LISTENERS = sock.create_sockets(self.cfg, self.log)
|
||||
listeners_str = ",".join([str(lnr) for lnr in self.LISTENERS])
|
||||
self.log.info("Listening at: %s", listeners_str)
|
||||
|
||||
# do some actions on reload
|
||||
self.cfg.on_reload(self)
|
||||
|
||||
# unlink pidfile
|
||||
if self.pidfile is not None:
|
||||
self.pidfile.unlink()
|
||||
|
||||
# create new pidfile
|
||||
if self.cfg.pidfile is not None:
|
||||
self.pidfile = Pidfile(self.cfg.pidfile)
|
||||
self.pidfile.create(self.pid)
|
||||
|
||||
# set new proc_name
|
||||
util._setproctitle("master [%s]" % self.proc_name)
|
||||
|
||||
# spawn new workers
|
||||
for _ in range(self.cfg.workers):
|
||||
self.spawn_worker()
|
||||
|
||||
# manage workers
|
||||
self.manage_workers()
|
||||
|
||||
def murder_workers(self):
|
||||
"""\
|
||||
Kill unused/idle workers
|
||||
"""
|
||||
if not self.timeout:
|
||||
return
|
||||
workers = list(self.WORKERS.items())
|
||||
for (pid, worker) in workers:
|
||||
try:
|
||||
if time.monotonic() - worker.tmp.last_update() <= self.timeout:
|
||||
continue
|
||||
except (OSError, ValueError):
|
||||
continue
|
||||
|
||||
if not worker.aborted:
|
||||
self.log.critical("WORKER TIMEOUT (pid:%s)", pid)
|
||||
worker.aborted = True
|
||||
self.kill_worker(pid, signal.SIGABRT)
|
||||
else:
|
||||
self.kill_worker(pid, signal.SIGKILL)
|
||||
|
||||
def reap_workers(self):
|
||||
"""\
|
||||
Reap workers to avoid zombie processes
|
||||
"""
|
||||
try:
|
||||
while True:
|
||||
wpid, status = os.waitpid(-1, os.WNOHANG)
|
||||
if not wpid:
|
||||
break
|
||||
if self.reexec_pid == wpid:
|
||||
self.reexec_pid = 0
|
||||
else:
|
||||
# A worker was terminated. If the termination reason was
|
||||
# that it could not boot, we'll shut it down to avoid
|
||||
# infinite start/stop cycles.
|
||||
exitcode = status >> 8
|
||||
if exitcode != 0:
|
||||
self.log.error('Worker (pid:%s) exited with code %s', wpid, exitcode)
|
||||
if exitcode == self.WORKER_BOOT_ERROR:
|
||||
reason = "Worker failed to boot."
|
||||
raise HaltServer(reason, self.WORKER_BOOT_ERROR)
|
||||
if exitcode == self.APP_LOAD_ERROR:
|
||||
reason = "App failed to load."
|
||||
raise HaltServer(reason, self.APP_LOAD_ERROR)
|
||||
|
||||
if exitcode > 0:
|
||||
# If the exit code of the worker is greater than 0,
|
||||
# let the user know.
|
||||
self.log.error("Worker (pid:%s) exited with code %s.",
|
||||
wpid, exitcode)
|
||||
elif status > 0:
|
||||
# If the exit code of the worker is 0 and the status
|
||||
# is greater than 0, then it was most likely killed
|
||||
# via a signal.
|
||||
try:
|
||||
sig_name = signal.Signals(status).name
|
||||
except ValueError:
|
||||
sig_name = "code {}".format(status)
|
||||
msg = "Worker (pid:{}) was sent {}!".format(
|
||||
wpid, sig_name)
|
||||
|
||||
# Additional hint for SIGKILL
|
||||
if status == signal.SIGKILL:
|
||||
msg += " Perhaps out of memory?"
|
||||
self.log.error(msg)
|
||||
|
||||
worker = self.WORKERS.pop(wpid, None)
|
||||
if not worker:
|
||||
continue
|
||||
worker.tmp.close()
|
||||
self.cfg.child_exit(self, worker)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ECHILD:
|
||||
raise
|
||||
|
||||
def manage_workers(self):
|
||||
"""\
|
||||
Maintain the number of workers by spawning or killing
|
||||
as required.
|
||||
"""
|
||||
if len(self.WORKERS) < self.num_workers:
|
||||
self.spawn_workers()
|
||||
|
||||
workers = self.WORKERS.items()
|
||||
workers = sorted(workers, key=lambda w: w[1].age)
|
||||
while len(workers) > self.num_workers:
|
||||
(pid, _) = workers.pop(0)
|
||||
self.kill_worker(pid, signal.SIGTERM)
|
||||
|
||||
active_worker_count = len(workers)
|
||||
if self._last_logged_active_worker_count != active_worker_count:
|
||||
self._last_logged_active_worker_count = active_worker_count
|
||||
self.log.debug("{0} workers".format(active_worker_count),
|
||||
extra={"metric": "gunicorn.workers",
|
||||
"value": active_worker_count,
|
||||
"mtype": "gauge"})
|
||||
|
||||
def spawn_worker(self):
|
||||
self.worker_age += 1
|
||||
worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS,
|
||||
self.app, self.timeout / 2.0,
|
||||
self.cfg, self.log)
|
||||
self.cfg.pre_fork(self, worker)
|
||||
pid = os.fork()
|
||||
if pid != 0:
|
||||
worker.pid = pid
|
||||
self.WORKERS[pid] = worker
|
||||
return pid
|
||||
|
||||
# Do not inherit the temporary files of other workers
|
||||
for sibling in self.WORKERS.values():
|
||||
sibling.tmp.close()
|
||||
|
||||
# Process Child
|
||||
worker.pid = os.getpid()
|
||||
try:
|
||||
util._setproctitle("worker [%s]" % self.proc_name)
|
||||
self.log.info("Booting worker with pid: %s", worker.pid)
|
||||
self.cfg.post_fork(self, worker)
|
||||
worker.init_process()
|
||||
sys.exit(0)
|
||||
except SystemExit:
|
||||
raise
|
||||
except AppImportError as e:
|
||||
self.log.debug("Exception while loading the application",
|
||||
exc_info=True)
|
||||
print("%s" % e, file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
sys.exit(self.APP_LOAD_ERROR)
|
||||
except Exception:
|
||||
self.log.exception("Exception in worker process")
|
||||
if not worker.booted:
|
||||
sys.exit(self.WORKER_BOOT_ERROR)
|
||||
sys.exit(-1)
|
||||
finally:
|
||||
self.log.info("Worker exiting (pid: %s)", worker.pid)
|
||||
try:
|
||||
worker.tmp.close()
|
||||
self.cfg.worker_exit(self, worker)
|
||||
except Exception:
|
||||
self.log.warning("Exception during worker exit:\n%s",
|
||||
traceback.format_exc())
|
||||
|
||||
def spawn_workers(self):
|
||||
"""\
|
||||
Spawn new workers as needed.
|
||||
|
||||
This is where a worker process leaves the main loop
|
||||
of the master process.
|
||||
"""
|
||||
|
||||
for _ in range(self.num_workers - len(self.WORKERS)):
|
||||
self.spawn_worker()
|
||||
time.sleep(0.1 * random.random())
|
||||
|
||||
def kill_workers(self, sig):
|
||||
"""\
|
||||
Kill all workers with the signal `sig`
|
||||
:attr sig: `signal.SIG*` value
|
||||
"""
|
||||
worker_pids = list(self.WORKERS.keys())
|
||||
for pid in worker_pids:
|
||||
self.kill_worker(pid, sig)
|
||||
|
||||
def kill_worker(self, pid, sig):
|
||||
"""\
|
||||
Kill a worker
|
||||
|
||||
:attr pid: int, worker pid
|
||||
:attr sig: `signal.SIG*` value
|
||||
"""
|
||||
try:
|
||||
os.kill(pid, sig)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ESRCH:
|
||||
try:
|
||||
worker = self.WORKERS.pop(pid)
|
||||
worker.tmp.close()
|
||||
self.cfg.worker_exit(self, worker)
|
||||
return
|
||||
except (KeyError, OSError):
|
||||
return
|
||||
raise
|
||||
2442
venv/lib/python3.11/site-packages/gunicorn/config.py
Normal file
2442
venv/lib/python3.11/site-packages/gunicorn/config.py
Normal file
File diff suppressed because it is too large
Load Diff
68
venv/lib/python3.11/site-packages/gunicorn/debug.py
Normal file
68
venv/lib/python3.11/site-packages/gunicorn/debug.py
Normal file
@ -0,0 +1,68 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
"""The debug module contains utilities and functions for better
|
||||
debugging Gunicorn."""
|
||||
|
||||
import sys
|
||||
import linecache
|
||||
import re
|
||||
import inspect
|
||||
|
||||
__all__ = ['spew', 'unspew']
|
||||
|
||||
_token_spliter = re.compile(r'\W+')
|
||||
|
||||
|
||||
class Spew:
|
||||
|
||||
def __init__(self, trace_names=None, show_values=True):
|
||||
self.trace_names = trace_names
|
||||
self.show_values = show_values
|
||||
|
||||
def __call__(self, frame, event, arg):
|
||||
if event == 'line':
|
||||
lineno = frame.f_lineno
|
||||
if '__file__' in frame.f_globals:
|
||||
filename = frame.f_globals['__file__']
|
||||
if (filename.endswith('.pyc') or
|
||||
filename.endswith('.pyo')):
|
||||
filename = filename[:-1]
|
||||
name = frame.f_globals['__name__']
|
||||
line = linecache.getline(filename, lineno)
|
||||
else:
|
||||
name = '[unknown]'
|
||||
try:
|
||||
src = inspect.getsourcelines(frame)
|
||||
line = src[lineno]
|
||||
except OSError:
|
||||
line = 'Unknown code named [%s]. VM instruction #%d' % (
|
||||
frame.f_code.co_name, frame.f_lasti)
|
||||
if self.trace_names is None or name in self.trace_names:
|
||||
print('%s:%s: %s' % (name, lineno, line.rstrip()))
|
||||
if not self.show_values:
|
||||
return self
|
||||
details = []
|
||||
tokens = _token_spliter.split(line)
|
||||
for tok in tokens:
|
||||
if tok in frame.f_globals:
|
||||
details.append('%s=%r' % (tok, frame.f_globals[tok]))
|
||||
if tok in frame.f_locals:
|
||||
details.append('%s=%r' % (tok, frame.f_locals[tok]))
|
||||
if details:
|
||||
print("\t%s" % ' '.join(details))
|
||||
return self
|
||||
|
||||
|
||||
def spew(trace_names=None, show_values=False):
|
||||
"""Install a trace hook which writes incredibly detailed logs
|
||||
about what code is being executed to stdout.
|
||||
"""
|
||||
sys.settrace(Spew(trace_names, show_values))
|
||||
|
||||
|
||||
def unspew():
|
||||
"""Remove the trace hook installed by spew.
|
||||
"""
|
||||
sys.settrace(None)
|
||||
28
venv/lib/python3.11/site-packages/gunicorn/errors.py
Normal file
28
venv/lib/python3.11/site-packages/gunicorn/errors.py
Normal file
@ -0,0 +1,28 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
# We don't need to call super() in __init__ methods of our
|
||||
# BaseException and Exception classes because we also define
|
||||
# our own __str__ methods so there is no need to pass 'message'
|
||||
# to the base class to get a meaningful output from 'str(exc)'.
|
||||
# pylint: disable=super-init-not-called
|
||||
|
||||
|
||||
# we inherit from BaseException here to make sure to not be caught
|
||||
# at application level
|
||||
class HaltServer(BaseException):
|
||||
def __init__(self, reason, exit_status=1):
|
||||
self.reason = reason
|
||||
self.exit_status = exit_status
|
||||
|
||||
def __str__(self):
|
||||
return "<HaltServer %r %d>" % (self.reason, self.exit_status)
|
||||
|
||||
|
||||
class ConfigError(Exception):
|
||||
""" Exception raised on config error """
|
||||
|
||||
|
||||
class AppImportError(Exception):
|
||||
""" Exception raised when loading an application """
|
||||
473
venv/lib/python3.11/site-packages/gunicorn/glogging.py
Normal file
473
venv/lib/python3.11/site-packages/gunicorn/glogging.py
Normal file
@ -0,0 +1,473 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import base64
|
||||
import binascii
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
logging.Logger.manager.emittedNoHandlerWarning = 1 # noqa
|
||||
from logging.config import dictConfig
|
||||
from logging.config import fileConfig
|
||||
import os
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from gunicorn import util
|
||||
|
||||
|
||||
# syslog facility codes
|
||||
SYSLOG_FACILITIES = {
|
||||
"auth": 4,
|
||||
"authpriv": 10,
|
||||
"cron": 9,
|
||||
"daemon": 3,
|
||||
"ftp": 11,
|
||||
"kern": 0,
|
||||
"lpr": 6,
|
||||
"mail": 2,
|
||||
"news": 7,
|
||||
"security": 4, # DEPRECATED
|
||||
"syslog": 5,
|
||||
"user": 1,
|
||||
"uucp": 8,
|
||||
"local0": 16,
|
||||
"local1": 17,
|
||||
"local2": 18,
|
||||
"local3": 19,
|
||||
"local4": 20,
|
||||
"local5": 21,
|
||||
"local6": 22,
|
||||
"local7": 23
|
||||
}
|
||||
|
||||
CONFIG_DEFAULTS = {
|
||||
"version": 1,
|
||||
"disable_existing_loggers": False,
|
||||
"root": {"level": "INFO", "handlers": ["console"]},
|
||||
"loggers": {
|
||||
"gunicorn.error": {
|
||||
"level": "INFO",
|
||||
"handlers": ["error_console"],
|
||||
"propagate": True,
|
||||
"qualname": "gunicorn.error"
|
||||
},
|
||||
|
||||
"gunicorn.access": {
|
||||
"level": "INFO",
|
||||
"handlers": ["console"],
|
||||
"propagate": True,
|
||||
"qualname": "gunicorn.access"
|
||||
}
|
||||
},
|
||||
"handlers": {
|
||||
"console": {
|
||||
"class": "logging.StreamHandler",
|
||||
"formatter": "generic",
|
||||
"stream": "ext://sys.stdout"
|
||||
},
|
||||
"error_console": {
|
||||
"class": "logging.StreamHandler",
|
||||
"formatter": "generic",
|
||||
"stream": "ext://sys.stderr"
|
||||
},
|
||||
},
|
||||
"formatters": {
|
||||
"generic": {
|
||||
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
|
||||
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
|
||||
"class": "logging.Formatter"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def loggers():
|
||||
""" get list of all loggers """
|
||||
root = logging.root
|
||||
existing = list(root.manager.loggerDict.keys())
|
||||
return [logging.getLogger(name) for name in existing]
|
||||
|
||||
|
||||
class SafeAtoms(dict):
|
||||
|
||||
def __init__(self, atoms):
|
||||
dict.__init__(self)
|
||||
for key, value in atoms.items():
|
||||
if isinstance(value, str):
|
||||
self[key] = value.replace('"', '\\"')
|
||||
else:
|
||||
self[key] = value
|
||||
|
||||
def __getitem__(self, k):
|
||||
if k.startswith("{"):
|
||||
kl = k.lower()
|
||||
if kl in self:
|
||||
return super().__getitem__(kl)
|
||||
else:
|
||||
return "-"
|
||||
if k in self:
|
||||
return super().__getitem__(k)
|
||||
else:
|
||||
return '-'
|
||||
|
||||
|
||||
def parse_syslog_address(addr):
|
||||
|
||||
# unix domain socket type depends on backend
|
||||
# SysLogHandler will try both when given None
|
||||
if addr.startswith("unix://"):
|
||||
sock_type = None
|
||||
|
||||
# set socket type only if explicitly requested
|
||||
parts = addr.split("#", 1)
|
||||
if len(parts) == 2:
|
||||
addr = parts[0]
|
||||
if parts[1] == "dgram":
|
||||
sock_type = socket.SOCK_DGRAM
|
||||
|
||||
return (sock_type, addr.split("unix://")[1])
|
||||
|
||||
if addr.startswith("udp://"):
|
||||
addr = addr.split("udp://")[1]
|
||||
socktype = socket.SOCK_DGRAM
|
||||
elif addr.startswith("tcp://"):
|
||||
addr = addr.split("tcp://")[1]
|
||||
socktype = socket.SOCK_STREAM
|
||||
else:
|
||||
raise RuntimeError("invalid syslog address")
|
||||
|
||||
if '[' in addr and ']' in addr:
|
||||
host = addr.split(']')[0][1:].lower()
|
||||
elif ':' in addr:
|
||||
host = addr.split(':')[0].lower()
|
||||
elif addr == "":
|
||||
host = "localhost"
|
||||
else:
|
||||
host = addr.lower()
|
||||
|
||||
addr = addr.split(']')[-1]
|
||||
if ":" in addr:
|
||||
port = addr.split(':', 1)[1]
|
||||
if not port.isdigit():
|
||||
raise RuntimeError("%r is not a valid port number." % port)
|
||||
port = int(port)
|
||||
else:
|
||||
port = 514
|
||||
|
||||
return (socktype, (host, port))
|
||||
|
||||
|
||||
class Logger:
|
||||
|
||||
LOG_LEVELS = {
|
||||
"critical": logging.CRITICAL,
|
||||
"error": logging.ERROR,
|
||||
"warning": logging.WARNING,
|
||||
"info": logging.INFO,
|
||||
"debug": logging.DEBUG
|
||||
}
|
||||
loglevel = logging.INFO
|
||||
|
||||
error_fmt = r"%(asctime)s [%(process)d] [%(levelname)s] %(message)s"
|
||||
datefmt = r"[%Y-%m-%d %H:%M:%S %z]"
|
||||
|
||||
access_fmt = "%(message)s"
|
||||
syslog_fmt = "[%(process)d] %(message)s"
|
||||
|
||||
atoms_wrapper_class = SafeAtoms
|
||||
|
||||
def __init__(self, cfg):
|
||||
self.error_log = logging.getLogger("gunicorn.error")
|
||||
self.error_log.propagate = False
|
||||
self.access_log = logging.getLogger("gunicorn.access")
|
||||
self.access_log.propagate = False
|
||||
self.error_handlers = []
|
||||
self.access_handlers = []
|
||||
self.logfile = None
|
||||
self.lock = threading.Lock()
|
||||
self.cfg = cfg
|
||||
self.setup(cfg)
|
||||
|
||||
def setup(self, cfg):
|
||||
self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)
|
||||
self.error_log.setLevel(self.loglevel)
|
||||
self.access_log.setLevel(logging.INFO)
|
||||
|
||||
# set gunicorn.error handler
|
||||
if self.cfg.capture_output and cfg.errorlog != "-":
|
||||
for stream in sys.stdout, sys.stderr:
|
||||
stream.flush()
|
||||
|
||||
self.logfile = open(cfg.errorlog, 'a+')
|
||||
os.dup2(self.logfile.fileno(), sys.stdout.fileno())
|
||||
os.dup2(self.logfile.fileno(), sys.stderr.fileno())
|
||||
|
||||
self._set_handler(self.error_log, cfg.errorlog,
|
||||
logging.Formatter(self.error_fmt, self.datefmt))
|
||||
|
||||
# set gunicorn.access handler
|
||||
if cfg.accesslog is not None:
|
||||
self._set_handler(
|
||||
self.access_log, cfg.accesslog,
|
||||
fmt=logging.Formatter(self.access_fmt), stream=sys.stdout
|
||||
)
|
||||
|
||||
# set syslog handler
|
||||
if cfg.syslog:
|
||||
self._set_syslog_handler(
|
||||
self.error_log, cfg, self.syslog_fmt, "error"
|
||||
)
|
||||
if not cfg.disable_redirect_access_to_syslog:
|
||||
self._set_syslog_handler(
|
||||
self.access_log, cfg, self.syslog_fmt, "access"
|
||||
)
|
||||
|
||||
if cfg.logconfig_dict:
|
||||
config = CONFIG_DEFAULTS.copy()
|
||||
config.update(cfg.logconfig_dict)
|
||||
try:
|
||||
dictConfig(config)
|
||||
except (
|
||||
AttributeError,
|
||||
ImportError,
|
||||
ValueError,
|
||||
TypeError
|
||||
) as exc:
|
||||
raise RuntimeError(str(exc))
|
||||
elif cfg.logconfig_json:
|
||||
config = CONFIG_DEFAULTS.copy()
|
||||
if os.path.exists(cfg.logconfig_json):
|
||||
try:
|
||||
config_json = json.load(open(cfg.logconfig_json))
|
||||
config.update(config_json)
|
||||
dictConfig(config)
|
||||
except (
|
||||
json.JSONDecodeError,
|
||||
AttributeError,
|
||||
ImportError,
|
||||
ValueError,
|
||||
TypeError
|
||||
) as exc:
|
||||
raise RuntimeError(str(exc))
|
||||
elif cfg.logconfig:
|
||||
if os.path.exists(cfg.logconfig):
|
||||
defaults = CONFIG_DEFAULTS.copy()
|
||||
defaults['__file__'] = cfg.logconfig
|
||||
defaults['here'] = os.path.dirname(cfg.logconfig)
|
||||
fileConfig(cfg.logconfig, defaults=defaults,
|
||||
disable_existing_loggers=False)
|
||||
else:
|
||||
msg = "Error: log config '%s' not found"
|
||||
raise RuntimeError(msg % cfg.logconfig)
|
||||
|
||||
def critical(self, msg, *args, **kwargs):
|
||||
self.error_log.critical(msg, *args, **kwargs)
|
||||
|
||||
def error(self, msg, *args, **kwargs):
|
||||
self.error_log.error(msg, *args, **kwargs)
|
||||
|
||||
def warning(self, msg, *args, **kwargs):
|
||||
self.error_log.warning(msg, *args, **kwargs)
|
||||
|
||||
def info(self, msg, *args, **kwargs):
|
||||
self.error_log.info(msg, *args, **kwargs)
|
||||
|
||||
def debug(self, msg, *args, **kwargs):
|
||||
self.error_log.debug(msg, *args, **kwargs)
|
||||
|
||||
def exception(self, msg, *args, **kwargs):
|
||||
self.error_log.exception(msg, *args, **kwargs)
|
||||
|
||||
def log(self, lvl, msg, *args, **kwargs):
|
||||
if isinstance(lvl, str):
|
||||
lvl = self.LOG_LEVELS.get(lvl.lower(), logging.INFO)
|
||||
self.error_log.log(lvl, msg, *args, **kwargs)
|
||||
|
||||
def atoms(self, resp, req, environ, request_time):
|
||||
""" Gets atoms for log formatting.
|
||||
"""
|
||||
status = resp.status
|
||||
if isinstance(status, str):
|
||||
status = status.split(None, 1)[0]
|
||||
atoms = {
|
||||
'h': environ.get('REMOTE_ADDR', '-'),
|
||||
'l': '-',
|
||||
'u': self._get_user(environ) or '-',
|
||||
't': self.now(),
|
||||
'r': "%s %s %s" % (environ['REQUEST_METHOD'],
|
||||
environ['RAW_URI'],
|
||||
environ["SERVER_PROTOCOL"]),
|
||||
's': status,
|
||||
'm': environ.get('REQUEST_METHOD'),
|
||||
'U': environ.get('PATH_INFO'),
|
||||
'q': environ.get('QUERY_STRING'),
|
||||
'H': environ.get('SERVER_PROTOCOL'),
|
||||
'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
|
||||
'B': getattr(resp, 'sent', None),
|
||||
'f': environ.get('HTTP_REFERER', '-'),
|
||||
'a': environ.get('HTTP_USER_AGENT', '-'),
|
||||
'T': request_time.seconds,
|
||||
'D': (request_time.seconds * 1000000) + request_time.microseconds,
|
||||
'M': (request_time.seconds * 1000) + int(request_time.microseconds / 1000),
|
||||
'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
|
||||
'p': "<%s>" % os.getpid()
|
||||
}
|
||||
|
||||
# add request headers
|
||||
if hasattr(req, 'headers'):
|
||||
req_headers = req.headers
|
||||
else:
|
||||
req_headers = req
|
||||
|
||||
if hasattr(req_headers, "items"):
|
||||
req_headers = req_headers.items()
|
||||
|
||||
atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
|
||||
|
||||
resp_headers = resp.headers
|
||||
if hasattr(resp_headers, "items"):
|
||||
resp_headers = resp_headers.items()
|
||||
|
||||
# add response headers
|
||||
atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
|
||||
|
||||
# add environ variables
|
||||
environ_variables = environ.items()
|
||||
atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
|
||||
|
||||
return atoms
|
||||
|
||||
def access(self, resp, req, environ, request_time):
|
||||
""" See http://httpd.apache.org/docs/2.0/logs.html#combined
|
||||
for format details
|
||||
"""
|
||||
|
||||
if not (self.cfg.accesslog or self.cfg.logconfig or
|
||||
self.cfg.logconfig_dict or self.cfg.logconfig_json or
|
||||
(self.cfg.syslog and not self.cfg.disable_redirect_access_to_syslog)):
|
||||
return
|
||||
|
||||
# wrap atoms:
|
||||
# - make sure atoms will be test case insensitively
|
||||
# - if atom doesn't exist replace it by '-'
|
||||
safe_atoms = self.atoms_wrapper_class(
|
||||
self.atoms(resp, req, environ, request_time)
|
||||
)
|
||||
|
||||
try:
|
||||
self.access_log.info(self.cfg.access_log_format, safe_atoms)
|
||||
except Exception:
|
||||
self.error(traceback.format_exc())
|
||||
|
||||
def now(self):
|
||||
""" return date in Apache Common Log Format """
|
||||
return time.strftime('[%d/%b/%Y:%H:%M:%S %z]')
|
||||
|
||||
def reopen_files(self):
|
||||
if self.cfg.capture_output and self.cfg.errorlog != "-":
|
||||
for stream in sys.stdout, sys.stderr:
|
||||
stream.flush()
|
||||
|
||||
with self.lock:
|
||||
if self.logfile is not None:
|
||||
self.logfile.close()
|
||||
self.logfile = open(self.cfg.errorlog, 'a+')
|
||||
os.dup2(self.logfile.fileno(), sys.stdout.fileno())
|
||||
os.dup2(self.logfile.fileno(), sys.stderr.fileno())
|
||||
|
||||
for log in loggers():
|
||||
for handler in log.handlers:
|
||||
if isinstance(handler, logging.FileHandler):
|
||||
handler.acquire()
|
||||
try:
|
||||
if handler.stream:
|
||||
handler.close()
|
||||
handler.stream = handler._open()
|
||||
finally:
|
||||
handler.release()
|
||||
|
||||
def close_on_exec(self):
|
||||
for log in loggers():
|
||||
for handler in log.handlers:
|
||||
if isinstance(handler, logging.FileHandler):
|
||||
handler.acquire()
|
||||
try:
|
||||
if handler.stream:
|
||||
util.close_on_exec(handler.stream.fileno())
|
||||
finally:
|
||||
handler.release()
|
||||
|
||||
def _get_gunicorn_handler(self, log):
|
||||
for h in log.handlers:
|
||||
if getattr(h, "_gunicorn", False):
|
||||
return h
|
||||
|
||||
def _set_handler(self, log, output, fmt, stream=None):
|
||||
# remove previous gunicorn log handler
|
||||
h = self._get_gunicorn_handler(log)
|
||||
if h:
|
||||
log.handlers.remove(h)
|
||||
|
||||
if output is not None:
|
||||
if output == "-":
|
||||
h = logging.StreamHandler(stream)
|
||||
else:
|
||||
util.check_is_writable(output)
|
||||
h = logging.FileHandler(output)
|
||||
# make sure the user can reopen the file
|
||||
try:
|
||||
os.chown(h.baseFilename, self.cfg.user, self.cfg.group)
|
||||
except OSError:
|
||||
# it's probably OK there, we assume the user has given
|
||||
# /dev/null as a parameter.
|
||||
pass
|
||||
|
||||
h.setFormatter(fmt)
|
||||
h._gunicorn = True
|
||||
log.addHandler(h)
|
||||
|
||||
def _set_syslog_handler(self, log, cfg, fmt, name):
|
||||
# setup format
|
||||
prefix = cfg.syslog_prefix or cfg.proc_name.replace(":", ".")
|
||||
|
||||
prefix = "gunicorn.%s.%s" % (prefix, name)
|
||||
|
||||
# set format
|
||||
fmt = logging.Formatter(r"%s: %s" % (prefix, fmt))
|
||||
|
||||
# syslog facility
|
||||
try:
|
||||
facility = SYSLOG_FACILITIES[cfg.syslog_facility.lower()]
|
||||
except KeyError:
|
||||
raise RuntimeError("unknown facility name")
|
||||
|
||||
# parse syslog address
|
||||
socktype, addr = parse_syslog_address(cfg.syslog_addr)
|
||||
|
||||
# finally setup the syslog handler
|
||||
h = logging.handlers.SysLogHandler(address=addr,
|
||||
facility=facility, socktype=socktype)
|
||||
|
||||
h.setFormatter(fmt)
|
||||
h._gunicorn = True
|
||||
log.addHandler(h)
|
||||
|
||||
def _get_user(self, environ):
|
||||
user = None
|
||||
http_auth = environ.get("HTTP_AUTHORIZATION")
|
||||
if http_auth and http_auth.lower().startswith('basic'):
|
||||
auth = http_auth.split(" ", 1)
|
||||
if len(auth) == 2:
|
||||
try:
|
||||
# b64decode doesn't accept unicode in Python < 3.3
|
||||
# so we need to convert it to a byte string
|
||||
auth = base64.b64decode(auth[1].strip().encode('utf-8'))
|
||||
# b64decode returns a byte string
|
||||
user = auth.split(b":", 1)[0].decode("UTF-8")
|
||||
except (TypeError, binascii.Error, UnicodeDecodeError) as exc:
|
||||
self.debug("Couldn't get username: %s", exc)
|
||||
return user
|
||||
@ -0,0 +1,8 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
from gunicorn.http.message import Message, Request
|
||||
from gunicorn.http.parser import RequestParser
|
||||
|
||||
__all__ = ['Message', 'Request', 'RequestParser']
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
268
venv/lib/python3.11/site-packages/gunicorn/http/body.py
Normal file
268
venv/lib/python3.11/site-packages/gunicorn/http/body.py
Normal file
@ -0,0 +1,268 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import io
|
||||
import sys
|
||||
|
||||
from gunicorn.http.errors import (NoMoreData, ChunkMissingTerminator,
|
||||
InvalidChunkSize)
|
||||
|
||||
|
||||
class ChunkedReader:
|
||||
def __init__(self, req, unreader):
|
||||
self.req = req
|
||||
self.parser = self.parse_chunked(unreader)
|
||||
self.buf = io.BytesIO()
|
||||
|
||||
def read(self, size):
|
||||
if not isinstance(size, int):
|
||||
raise TypeError("size must be an integer type")
|
||||
if size < 0:
|
||||
raise ValueError("Size must be positive.")
|
||||
if size == 0:
|
||||
return b""
|
||||
|
||||
if self.parser:
|
||||
while self.buf.tell() < size:
|
||||
try:
|
||||
self.buf.write(next(self.parser))
|
||||
except StopIteration:
|
||||
self.parser = None
|
||||
break
|
||||
|
||||
data = self.buf.getvalue()
|
||||
ret, rest = data[:size], data[size:]
|
||||
self.buf = io.BytesIO()
|
||||
self.buf.write(rest)
|
||||
return ret
|
||||
|
||||
def parse_trailers(self, unreader, data):
|
||||
buf = io.BytesIO()
|
||||
buf.write(data)
|
||||
|
||||
idx = buf.getvalue().find(b"\r\n\r\n")
|
||||
done = buf.getvalue()[:2] == b"\r\n"
|
||||
while idx < 0 and not done:
|
||||
self.get_data(unreader, buf)
|
||||
idx = buf.getvalue().find(b"\r\n\r\n")
|
||||
done = buf.getvalue()[:2] == b"\r\n"
|
||||
if done:
|
||||
unreader.unread(buf.getvalue()[2:])
|
||||
return b""
|
||||
self.req.trailers = self.req.parse_headers(buf.getvalue()[:idx], from_trailer=True)
|
||||
unreader.unread(buf.getvalue()[idx + 4:])
|
||||
|
||||
def parse_chunked(self, unreader):
|
||||
(size, rest) = self.parse_chunk_size(unreader)
|
||||
while size > 0:
|
||||
while size > len(rest):
|
||||
size -= len(rest)
|
||||
yield rest
|
||||
rest = unreader.read()
|
||||
if not rest:
|
||||
raise NoMoreData()
|
||||
yield rest[:size]
|
||||
# Remove \r\n after chunk
|
||||
rest = rest[size:]
|
||||
while len(rest) < 2:
|
||||
new_data = unreader.read()
|
||||
if not new_data:
|
||||
break
|
||||
rest += new_data
|
||||
if rest[:2] != b'\r\n':
|
||||
raise ChunkMissingTerminator(rest[:2])
|
||||
(size, rest) = self.parse_chunk_size(unreader, data=rest[2:])
|
||||
|
||||
def parse_chunk_size(self, unreader, data=None):
|
||||
buf = io.BytesIO()
|
||||
if data is not None:
|
||||
buf.write(data)
|
||||
|
||||
idx = buf.getvalue().find(b"\r\n")
|
||||
while idx < 0:
|
||||
self.get_data(unreader, buf)
|
||||
idx = buf.getvalue().find(b"\r\n")
|
||||
|
||||
data = buf.getvalue()
|
||||
line, rest_chunk = data[:idx], data[idx + 2:]
|
||||
|
||||
# RFC9112 7.1.1: BWS before chunk-ext - but ONLY then
|
||||
chunk_size, *chunk_ext = line.split(b";", 1)
|
||||
if chunk_ext:
|
||||
chunk_size = chunk_size.rstrip(b" \t")
|
||||
if any(n not in b"0123456789abcdefABCDEF" for n in chunk_size):
|
||||
raise InvalidChunkSize(chunk_size)
|
||||
if len(chunk_size) == 0:
|
||||
raise InvalidChunkSize(chunk_size)
|
||||
chunk_size = int(chunk_size, 16)
|
||||
|
||||
if chunk_size == 0:
|
||||
try:
|
||||
self.parse_trailers(unreader, rest_chunk)
|
||||
except NoMoreData:
|
||||
pass
|
||||
return (0, None)
|
||||
return (chunk_size, rest_chunk)
|
||||
|
||||
def get_data(self, unreader, buf):
|
||||
data = unreader.read()
|
||||
if not data:
|
||||
raise NoMoreData()
|
||||
buf.write(data)
|
||||
|
||||
|
||||
class LengthReader:
|
||||
def __init__(self, unreader, length):
|
||||
self.unreader = unreader
|
||||
self.length = length
|
||||
|
||||
def read(self, size):
|
||||
if not isinstance(size, int):
|
||||
raise TypeError("size must be an integral type")
|
||||
|
||||
size = min(self.length, size)
|
||||
if size < 0:
|
||||
raise ValueError("Size must be positive.")
|
||||
if size == 0:
|
||||
return b""
|
||||
|
||||
buf = io.BytesIO()
|
||||
data = self.unreader.read()
|
||||
while data:
|
||||
buf.write(data)
|
||||
if buf.tell() >= size:
|
||||
break
|
||||
data = self.unreader.read()
|
||||
|
||||
buf = buf.getvalue()
|
||||
ret, rest = buf[:size], buf[size:]
|
||||
self.unreader.unread(rest)
|
||||
self.length -= size
|
||||
return ret
|
||||
|
||||
|
||||
class EOFReader:
|
||||
def __init__(self, unreader):
|
||||
self.unreader = unreader
|
||||
self.buf = io.BytesIO()
|
||||
self.finished = False
|
||||
|
||||
def read(self, size):
|
||||
if not isinstance(size, int):
|
||||
raise TypeError("size must be an integral type")
|
||||
if size < 0:
|
||||
raise ValueError("Size must be positive.")
|
||||
if size == 0:
|
||||
return b""
|
||||
|
||||
if self.finished:
|
||||
data = self.buf.getvalue()
|
||||
ret, rest = data[:size], data[size:]
|
||||
self.buf = io.BytesIO()
|
||||
self.buf.write(rest)
|
||||
return ret
|
||||
|
||||
data = self.unreader.read()
|
||||
while data:
|
||||
self.buf.write(data)
|
||||
if self.buf.tell() > size:
|
||||
break
|
||||
data = self.unreader.read()
|
||||
|
||||
if not data:
|
||||
self.finished = True
|
||||
|
||||
data = self.buf.getvalue()
|
||||
ret, rest = data[:size], data[size:]
|
||||
self.buf = io.BytesIO()
|
||||
self.buf.write(rest)
|
||||
return ret
|
||||
|
||||
|
||||
class Body:
|
||||
def __init__(self, reader):
|
||||
self.reader = reader
|
||||
self.buf = io.BytesIO()
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
ret = self.readline()
|
||||
if not ret:
|
||||
raise StopIteration()
|
||||
return ret
|
||||
|
||||
next = __next__
|
||||
|
||||
def getsize(self, size):
|
||||
if size is None:
|
||||
return sys.maxsize
|
||||
elif not isinstance(size, int):
|
||||
raise TypeError("size must be an integral type")
|
||||
elif size < 0:
|
||||
return sys.maxsize
|
||||
return size
|
||||
|
||||
def read(self, size=None):
|
||||
size = self.getsize(size)
|
||||
if size == 0:
|
||||
return b""
|
||||
|
||||
if size < self.buf.tell():
|
||||
data = self.buf.getvalue()
|
||||
ret, rest = data[:size], data[size:]
|
||||
self.buf = io.BytesIO()
|
||||
self.buf.write(rest)
|
||||
return ret
|
||||
|
||||
while size > self.buf.tell():
|
||||
data = self.reader.read(1024)
|
||||
if not data:
|
||||
break
|
||||
self.buf.write(data)
|
||||
|
||||
data = self.buf.getvalue()
|
||||
ret, rest = data[:size], data[size:]
|
||||
self.buf = io.BytesIO()
|
||||
self.buf.write(rest)
|
||||
return ret
|
||||
|
||||
def readline(self, size=None):
|
||||
size = self.getsize(size)
|
||||
if size == 0:
|
||||
return b""
|
||||
|
||||
data = self.buf.getvalue()
|
||||
self.buf = io.BytesIO()
|
||||
|
||||
ret = []
|
||||
while 1:
|
||||
idx = data.find(b"\n", 0, size)
|
||||
idx = idx + 1 if idx >= 0 else size if len(data) >= size else 0
|
||||
if idx:
|
||||
ret.append(data[:idx])
|
||||
self.buf.write(data[idx:])
|
||||
break
|
||||
|
||||
ret.append(data)
|
||||
size -= len(data)
|
||||
data = self.reader.read(min(1024, size))
|
||||
if not data:
|
||||
break
|
||||
|
||||
return b"".join(ret)
|
||||
|
||||
def readlines(self, size=None):
|
||||
ret = []
|
||||
data = self.read()
|
||||
while data:
|
||||
pos = data.find(b"\n")
|
||||
if pos < 0:
|
||||
ret.append(data)
|
||||
data = b""
|
||||
else:
|
||||
line, data = data[:pos + 1], data[pos + 1:]
|
||||
ret.append(line)
|
||||
return ret
|
||||
145
venv/lib/python3.11/site-packages/gunicorn/http/errors.py
Normal file
145
venv/lib/python3.11/site-packages/gunicorn/http/errors.py
Normal file
@ -0,0 +1,145 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
# We don't need to call super() in __init__ methods of our
|
||||
# BaseException and Exception classes because we also define
|
||||
# our own __str__ methods so there is no need to pass 'message'
|
||||
# to the base class to get a meaningful output from 'str(exc)'.
|
||||
# pylint: disable=super-init-not-called
|
||||
|
||||
|
||||
class ParseException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class NoMoreData(IOError):
|
||||
def __init__(self, buf=None):
|
||||
self.buf = buf
|
||||
|
||||
def __str__(self):
|
||||
return "No more data after: %r" % self.buf
|
||||
|
||||
|
||||
class ConfigurationProblem(ParseException):
|
||||
def __init__(self, info):
|
||||
self.info = info
|
||||
self.code = 500
|
||||
|
||||
def __str__(self):
|
||||
return "Configuration problem: %s" % self.info
|
||||
|
||||
|
||||
class InvalidRequestLine(ParseException):
|
||||
def __init__(self, req):
|
||||
self.req = req
|
||||
self.code = 400
|
||||
|
||||
def __str__(self):
|
||||
return "Invalid HTTP request line: %r" % self.req
|
||||
|
||||
|
||||
class InvalidRequestMethod(ParseException):
|
||||
def __init__(self, method):
|
||||
self.method = method
|
||||
|
||||
def __str__(self):
|
||||
return "Invalid HTTP method: %r" % self.method
|
||||
|
||||
|
||||
class InvalidHTTPVersion(ParseException):
|
||||
def __init__(self, version):
|
||||
self.version = version
|
||||
|
||||
def __str__(self):
|
||||
return "Invalid HTTP Version: %r" % (self.version,)
|
||||
|
||||
|
||||
class InvalidHeader(ParseException):
|
||||
def __init__(self, hdr, req=None):
|
||||
self.hdr = hdr
|
||||
self.req = req
|
||||
|
||||
def __str__(self):
|
||||
return "Invalid HTTP Header: %r" % self.hdr
|
||||
|
||||
|
||||
class ObsoleteFolding(ParseException):
|
||||
def __init__(self, hdr):
|
||||
self.hdr = hdr
|
||||
|
||||
def __str__(self):
|
||||
return "Obsolete line folding is unacceptable: %r" % (self.hdr, )
|
||||
|
||||
|
||||
class InvalidHeaderName(ParseException):
|
||||
def __init__(self, hdr):
|
||||
self.hdr = hdr
|
||||
|
||||
def __str__(self):
|
||||
return "Invalid HTTP header name: %r" % self.hdr
|
||||
|
||||
|
||||
class UnsupportedTransferCoding(ParseException):
|
||||
def __init__(self, hdr):
|
||||
self.hdr = hdr
|
||||
self.code = 501
|
||||
|
||||
def __str__(self):
|
||||
return "Unsupported transfer coding: %r" % self.hdr
|
||||
|
||||
|
||||
class InvalidChunkSize(IOError):
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def __str__(self):
|
||||
return "Invalid chunk size: %r" % self.data
|
||||
|
||||
|
||||
class ChunkMissingTerminator(IOError):
|
||||
def __init__(self, term):
|
||||
self.term = term
|
||||
|
||||
def __str__(self):
|
||||
return "Invalid chunk terminator is not '\\r\\n': %r" % self.term
|
||||
|
||||
|
||||
class LimitRequestLine(ParseException):
|
||||
def __init__(self, size, max_size):
|
||||
self.size = size
|
||||
self.max_size = max_size
|
||||
|
||||
def __str__(self):
|
||||
return "Request Line is too large (%s > %s)" % (self.size, self.max_size)
|
||||
|
||||
|
||||
class LimitRequestHeaders(ParseException):
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
|
||||
class InvalidProxyLine(ParseException):
|
||||
def __init__(self, line):
|
||||
self.line = line
|
||||
self.code = 400
|
||||
|
||||
def __str__(self):
|
||||
return "Invalid PROXY line: %r" % self.line
|
||||
|
||||
|
||||
class ForbiddenProxyRequest(ParseException):
|
||||
def __init__(self, host):
|
||||
self.host = host
|
||||
self.code = 403
|
||||
|
||||
def __str__(self):
|
||||
return "Proxy request from %r not allowed" % self.host
|
||||
|
||||
|
||||
class InvalidSchemeHeaders(ParseException):
|
||||
def __str__(self):
|
||||
return "Contradictory scheme headers"
|
||||
463
venv/lib/python3.11/site-packages/gunicorn/http/message.py
Normal file
463
venv/lib/python3.11/site-packages/gunicorn/http/message.py
Normal file
@ -0,0 +1,463 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import io
|
||||
import re
|
||||
import socket
|
||||
|
||||
from gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body
|
||||
from gunicorn.http.errors import (
|
||||
InvalidHeader, InvalidHeaderName, NoMoreData,
|
||||
InvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion,
|
||||
LimitRequestLine, LimitRequestHeaders,
|
||||
UnsupportedTransferCoding, ObsoleteFolding,
|
||||
)
|
||||
from gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest
|
||||
from gunicorn.http.errors import InvalidSchemeHeaders
|
||||
from gunicorn.util import bytes_to_str, split_request_uri
|
||||
|
||||
MAX_REQUEST_LINE = 8190
|
||||
MAX_HEADERS = 32768
|
||||
DEFAULT_MAX_HEADERFIELD_SIZE = 8190
|
||||
|
||||
# verbosely on purpose, avoid backslash ambiguity
|
||||
RFC9110_5_6_2_TOKEN_SPECIALS = r"!#$%&'*+-.^_`|~"
|
||||
TOKEN_RE = re.compile(r"[%s0-9a-zA-Z]+" % (re.escape(RFC9110_5_6_2_TOKEN_SPECIALS)))
|
||||
METHOD_BADCHAR_RE = re.compile("[a-z#]")
|
||||
# usually 1.0 or 1.1 - RFC9112 permits restricting to single-digit versions
|
||||
VERSION_RE = re.compile(r"HTTP/(\d)\.(\d)")
|
||||
RFC9110_5_5_INVALID_AND_DANGEROUS = re.compile(r"[\0\r\n]")
|
||||
|
||||
|
||||
class Message:
|
||||
def __init__(self, cfg, unreader, peer_addr):
|
||||
self.cfg = cfg
|
||||
self.unreader = unreader
|
||||
self.peer_addr = peer_addr
|
||||
self.remote_addr = peer_addr
|
||||
self.version = None
|
||||
self.headers = []
|
||||
self.trailers = []
|
||||
self.body = None
|
||||
self.scheme = "https" if cfg.is_ssl else "http"
|
||||
self.must_close = False
|
||||
|
||||
# set headers limits
|
||||
self.limit_request_fields = cfg.limit_request_fields
|
||||
if (self.limit_request_fields <= 0
|
||||
or self.limit_request_fields > MAX_HEADERS):
|
||||
self.limit_request_fields = MAX_HEADERS
|
||||
self.limit_request_field_size = cfg.limit_request_field_size
|
||||
if self.limit_request_field_size < 0:
|
||||
self.limit_request_field_size = DEFAULT_MAX_HEADERFIELD_SIZE
|
||||
|
||||
# set max header buffer size
|
||||
max_header_field_size = self.limit_request_field_size or DEFAULT_MAX_HEADERFIELD_SIZE
|
||||
self.max_buffer_headers = self.limit_request_fields * \
|
||||
(max_header_field_size + 2) + 4
|
||||
|
||||
unused = self.parse(self.unreader)
|
||||
self.unreader.unread(unused)
|
||||
self.set_body_reader()
|
||||
|
||||
def force_close(self):
|
||||
self.must_close = True
|
||||
|
||||
def parse(self, unreader):
|
||||
raise NotImplementedError()
|
||||
|
||||
def parse_headers(self, data, from_trailer=False):
|
||||
cfg = self.cfg
|
||||
headers = []
|
||||
|
||||
# Split lines on \r\n
|
||||
lines = [bytes_to_str(line) for line in data.split(b"\r\n")]
|
||||
|
||||
# handle scheme headers
|
||||
scheme_header = False
|
||||
secure_scheme_headers = {}
|
||||
forwarder_headers = []
|
||||
if from_trailer:
|
||||
# nonsense. either a request is https from the beginning
|
||||
# .. or we are just behind a proxy who does not remove conflicting trailers
|
||||
pass
|
||||
elif ('*' in cfg.forwarded_allow_ips or
|
||||
not isinstance(self.peer_addr, tuple)
|
||||
or self.peer_addr[0] in cfg.forwarded_allow_ips):
|
||||
secure_scheme_headers = cfg.secure_scheme_headers
|
||||
forwarder_headers = cfg.forwarder_headers
|
||||
|
||||
# Parse headers into key/value pairs paying attention
|
||||
# to continuation lines.
|
||||
while lines:
|
||||
if len(headers) >= self.limit_request_fields:
|
||||
raise LimitRequestHeaders("limit request headers fields")
|
||||
|
||||
# Parse initial header name: value pair.
|
||||
curr = lines.pop(0)
|
||||
header_length = len(curr) + len("\r\n")
|
||||
if curr.find(":") <= 0:
|
||||
raise InvalidHeader(curr)
|
||||
name, value = curr.split(":", 1)
|
||||
if self.cfg.strip_header_spaces:
|
||||
name = name.rstrip(" \t")
|
||||
if not TOKEN_RE.fullmatch(name):
|
||||
raise InvalidHeaderName(name)
|
||||
|
||||
# this is still a dangerous place to do this
|
||||
# but it is more correct than doing it before the pattern match:
|
||||
# after we entered Unicode wonderland, 8bits could case-shift into ASCII:
|
||||
# b"\xDF".decode("latin-1").upper().encode("ascii") == b"SS"
|
||||
name = name.upper()
|
||||
|
||||
value = [value.strip(" \t")]
|
||||
|
||||
# Consume value continuation lines..
|
||||
while lines and lines[0].startswith((" ", "\t")):
|
||||
# .. which is obsolete here, and no longer done by default
|
||||
if not self.cfg.permit_obsolete_folding:
|
||||
raise ObsoleteFolding(name)
|
||||
curr = lines.pop(0)
|
||||
header_length += len(curr) + len("\r\n")
|
||||
if header_length > self.limit_request_field_size > 0:
|
||||
raise LimitRequestHeaders("limit request headers "
|
||||
"fields size")
|
||||
value.append(curr.strip("\t "))
|
||||
value = " ".join(value)
|
||||
|
||||
if RFC9110_5_5_INVALID_AND_DANGEROUS.search(value):
|
||||
raise InvalidHeader(name)
|
||||
|
||||
if header_length > self.limit_request_field_size > 0:
|
||||
raise LimitRequestHeaders("limit request headers fields size")
|
||||
|
||||
if name in secure_scheme_headers:
|
||||
secure = value == secure_scheme_headers[name]
|
||||
scheme = "https" if secure else "http"
|
||||
if scheme_header:
|
||||
if scheme != self.scheme:
|
||||
raise InvalidSchemeHeaders()
|
||||
else:
|
||||
scheme_header = True
|
||||
self.scheme = scheme
|
||||
|
||||
# ambiguous mapping allows fooling downstream, e.g. merging non-identical headers:
|
||||
# X-Forwarded-For: 2001:db8::ha:cc:ed
|
||||
# X_Forwarded_For: 127.0.0.1,::1
|
||||
# HTTP_X_FORWARDED_FOR = 2001:db8::ha:cc:ed,127.0.0.1,::1
|
||||
# Only modify after fixing *ALL* header transformations; network to wsgi env
|
||||
if "_" in name:
|
||||
if name in forwarder_headers or "*" in forwarder_headers:
|
||||
# This forwarder may override our environment
|
||||
pass
|
||||
elif self.cfg.header_map == "dangerous":
|
||||
# as if we did not know we cannot safely map this
|
||||
pass
|
||||
elif self.cfg.header_map == "drop":
|
||||
# almost as if it never had been there
|
||||
# but still counts against resource limits
|
||||
continue
|
||||
else:
|
||||
# fail-safe fallthrough: refuse
|
||||
raise InvalidHeaderName(name)
|
||||
|
||||
headers.append((name, value))
|
||||
|
||||
return headers
|
||||
|
||||
def set_body_reader(self):
|
||||
chunked = False
|
||||
content_length = None
|
||||
|
||||
for (name, value) in self.headers:
|
||||
if name == "CONTENT-LENGTH":
|
||||
if content_length is not None:
|
||||
raise InvalidHeader("CONTENT-LENGTH", req=self)
|
||||
content_length = value
|
||||
elif name == "TRANSFER-ENCODING":
|
||||
# T-E can be a list
|
||||
# https://datatracker.ietf.org/doc/html/rfc9112#name-transfer-encoding
|
||||
vals = [v.strip() for v in value.split(',')]
|
||||
for val in vals:
|
||||
if val.lower() == "chunked":
|
||||
# DANGER: transfer codings stack, and stacked chunking is never intended
|
||||
if chunked:
|
||||
raise InvalidHeader("TRANSFER-ENCODING", req=self)
|
||||
chunked = True
|
||||
elif val.lower() == "identity":
|
||||
# does not do much, could still plausibly desync from what the proxy does
|
||||
# safe option: nuke it, its never needed
|
||||
if chunked:
|
||||
raise InvalidHeader("TRANSFER-ENCODING", req=self)
|
||||
elif val.lower() in ('compress', 'deflate', 'gzip'):
|
||||
# chunked should be the last one
|
||||
if chunked:
|
||||
raise InvalidHeader("TRANSFER-ENCODING", req=self)
|
||||
self.force_close()
|
||||
else:
|
||||
raise UnsupportedTransferCoding(value)
|
||||
|
||||
if chunked:
|
||||
# two potentially dangerous cases:
|
||||
# a) CL + TE (TE overrides CL.. only safe if the recipient sees it that way too)
|
||||
# b) chunked HTTP/1.0 (always faulty)
|
||||
if self.version < (1, 1):
|
||||
# framing wonky, see RFC 9112 Section 6.1
|
||||
raise InvalidHeader("TRANSFER-ENCODING", req=self)
|
||||
if content_length is not None:
|
||||
# we cannot be certain the message framing we understood matches proxy intent
|
||||
# -> whatever happens next, remaining input must not be trusted
|
||||
raise InvalidHeader("CONTENT-LENGTH", req=self)
|
||||
self.body = Body(ChunkedReader(self, self.unreader))
|
||||
elif content_length is not None:
|
||||
try:
|
||||
if str(content_length).isnumeric():
|
||||
content_length = int(content_length)
|
||||
else:
|
||||
raise InvalidHeader("CONTENT-LENGTH", req=self)
|
||||
except ValueError:
|
||||
raise InvalidHeader("CONTENT-LENGTH", req=self)
|
||||
|
||||
if content_length < 0:
|
||||
raise InvalidHeader("CONTENT-LENGTH", req=self)
|
||||
|
||||
self.body = Body(LengthReader(self.unreader, content_length))
|
||||
else:
|
||||
self.body = Body(EOFReader(self.unreader))
|
||||
|
||||
def should_close(self):
|
||||
if self.must_close:
|
||||
return True
|
||||
for (h, v) in self.headers:
|
||||
if h == "CONNECTION":
|
||||
v = v.lower().strip(" \t")
|
||||
if v == "close":
|
||||
return True
|
||||
elif v == "keep-alive":
|
||||
return False
|
||||
break
|
||||
return self.version <= (1, 0)
|
||||
|
||||
|
||||
class Request(Message):
|
||||
def __init__(self, cfg, unreader, peer_addr, req_number=1):
|
||||
self.method = None
|
||||
self.uri = None
|
||||
self.path = None
|
||||
self.query = None
|
||||
self.fragment = None
|
||||
|
||||
# get max request line size
|
||||
self.limit_request_line = cfg.limit_request_line
|
||||
if (self.limit_request_line < 0
|
||||
or self.limit_request_line >= MAX_REQUEST_LINE):
|
||||
self.limit_request_line = MAX_REQUEST_LINE
|
||||
|
||||
self.req_number = req_number
|
||||
self.proxy_protocol_info = None
|
||||
super().__init__(cfg, unreader, peer_addr)
|
||||
|
||||
def get_data(self, unreader, buf, stop=False):
|
||||
data = unreader.read()
|
||||
if not data:
|
||||
if stop:
|
||||
raise StopIteration()
|
||||
raise NoMoreData(buf.getvalue())
|
||||
buf.write(data)
|
||||
|
||||
def parse(self, unreader):
|
||||
buf = io.BytesIO()
|
||||
self.get_data(unreader, buf, stop=True)
|
||||
|
||||
# get request line
|
||||
line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
|
||||
|
||||
# proxy protocol
|
||||
if self.proxy_protocol(bytes_to_str(line)):
|
||||
# get next request line
|
||||
buf = io.BytesIO()
|
||||
buf.write(rbuf)
|
||||
line, rbuf = self.read_line(unreader, buf, self.limit_request_line)
|
||||
|
||||
self.parse_request_line(line)
|
||||
buf = io.BytesIO()
|
||||
buf.write(rbuf)
|
||||
|
||||
# Headers
|
||||
data = buf.getvalue()
|
||||
idx = data.find(b"\r\n\r\n")
|
||||
|
||||
done = data[:2] == b"\r\n"
|
||||
while True:
|
||||
idx = data.find(b"\r\n\r\n")
|
||||
done = data[:2] == b"\r\n"
|
||||
|
||||
if idx < 0 and not done:
|
||||
self.get_data(unreader, buf)
|
||||
data = buf.getvalue()
|
||||
if len(data) > self.max_buffer_headers:
|
||||
raise LimitRequestHeaders("max buffer headers")
|
||||
else:
|
||||
break
|
||||
|
||||
if done:
|
||||
self.unreader.unread(data[2:])
|
||||
return b""
|
||||
|
||||
self.headers = self.parse_headers(data[:idx], from_trailer=False)
|
||||
|
||||
ret = data[idx + 4:]
|
||||
buf = None
|
||||
return ret
|
||||
|
||||
def read_line(self, unreader, buf, limit=0):
|
||||
data = buf.getvalue()
|
||||
|
||||
while True:
|
||||
idx = data.find(b"\r\n")
|
||||
if idx >= 0:
|
||||
# check if the request line is too large
|
||||
if idx > limit > 0:
|
||||
raise LimitRequestLine(idx, limit)
|
||||
break
|
||||
if len(data) - 2 > limit > 0:
|
||||
raise LimitRequestLine(len(data), limit)
|
||||
self.get_data(unreader, buf)
|
||||
data = buf.getvalue()
|
||||
|
||||
return (data[:idx], # request line,
|
||||
data[idx + 2:]) # residue in the buffer, skip \r\n
|
||||
|
||||
def proxy_protocol(self, line):
|
||||
"""\
|
||||
Detect, check and parse proxy protocol.
|
||||
|
||||
:raises: ForbiddenProxyRequest, InvalidProxyLine.
|
||||
:return: True for proxy protocol line else False
|
||||
"""
|
||||
if not self.cfg.proxy_protocol:
|
||||
return False
|
||||
|
||||
if self.req_number != 1:
|
||||
return False
|
||||
|
||||
if not line.startswith("PROXY"):
|
||||
return False
|
||||
|
||||
self.proxy_protocol_access_check()
|
||||
self.parse_proxy_protocol(line)
|
||||
|
||||
return True
|
||||
|
||||
def proxy_protocol_access_check(self):
|
||||
# check in allow list
|
||||
if ("*" not in self.cfg.proxy_allow_ips and
|
||||
isinstance(self.peer_addr, tuple) and
|
||||
self.peer_addr[0] not in self.cfg.proxy_allow_ips):
|
||||
raise ForbiddenProxyRequest(self.peer_addr[0])
|
||||
|
||||
def parse_proxy_protocol(self, line):
|
||||
bits = line.split(" ")
|
||||
|
||||
if len(bits) != 6:
|
||||
raise InvalidProxyLine(line)
|
||||
|
||||
# Extract data
|
||||
proto = bits[1]
|
||||
s_addr = bits[2]
|
||||
d_addr = bits[3]
|
||||
|
||||
# Validation
|
||||
if proto not in ["TCP4", "TCP6"]:
|
||||
raise InvalidProxyLine("protocol '%s' not supported" % proto)
|
||||
if proto == "TCP4":
|
||||
try:
|
||||
socket.inet_pton(socket.AF_INET, s_addr)
|
||||
socket.inet_pton(socket.AF_INET, d_addr)
|
||||
except OSError:
|
||||
raise InvalidProxyLine(line)
|
||||
elif proto == "TCP6":
|
||||
try:
|
||||
socket.inet_pton(socket.AF_INET6, s_addr)
|
||||
socket.inet_pton(socket.AF_INET6, d_addr)
|
||||
except OSError:
|
||||
raise InvalidProxyLine(line)
|
||||
|
||||
try:
|
||||
s_port = int(bits[4])
|
||||
d_port = int(bits[5])
|
||||
except ValueError:
|
||||
raise InvalidProxyLine("invalid port %s" % line)
|
||||
|
||||
if not ((0 <= s_port <= 65535) and (0 <= d_port <= 65535)):
|
||||
raise InvalidProxyLine("invalid port %s" % line)
|
||||
|
||||
# Set data
|
||||
self.proxy_protocol_info = {
|
||||
"proxy_protocol": proto,
|
||||
"client_addr": s_addr,
|
||||
"client_port": s_port,
|
||||
"proxy_addr": d_addr,
|
||||
"proxy_port": d_port
|
||||
}
|
||||
|
||||
def parse_request_line(self, line_bytes):
|
||||
bits = [bytes_to_str(bit) for bit in line_bytes.split(b" ", 2)]
|
||||
if len(bits) != 3:
|
||||
raise InvalidRequestLine(bytes_to_str(line_bytes))
|
||||
|
||||
# Method: RFC9110 Section 9
|
||||
self.method = bits[0]
|
||||
|
||||
# nonstandard restriction, suitable for all IANA registered methods
|
||||
# partially enforced in previous gunicorn versions
|
||||
if not self.cfg.permit_unconventional_http_method:
|
||||
if METHOD_BADCHAR_RE.search(self.method):
|
||||
raise InvalidRequestMethod(self.method)
|
||||
if not 3 <= len(bits[0]) <= 20:
|
||||
raise InvalidRequestMethod(self.method)
|
||||
# standard restriction: RFC9110 token
|
||||
if not TOKEN_RE.fullmatch(self.method):
|
||||
raise InvalidRequestMethod(self.method)
|
||||
# nonstandard and dangerous
|
||||
# methods are merely uppercase by convention, no case-insensitive treatment is intended
|
||||
if self.cfg.casefold_http_method:
|
||||
self.method = self.method.upper()
|
||||
|
||||
# URI
|
||||
self.uri = bits[1]
|
||||
|
||||
# Python stdlib explicitly tells us it will not perform validation.
|
||||
# https://docs.python.org/3/library/urllib.parse.html#url-parsing-security
|
||||
# There are *four* `request-target` forms in rfc9112, none of them can be empty:
|
||||
# 1. origin-form, which starts with a slash
|
||||
# 2. absolute-form, which starts with a non-empty scheme
|
||||
# 3. authority-form, (for CONNECT) which contains a colon after the host
|
||||
# 4. asterisk-form, which is an asterisk (`\x2A`)
|
||||
# => manually reject one always invalid URI: empty
|
||||
if len(self.uri) == 0:
|
||||
raise InvalidRequestLine(bytes_to_str(line_bytes))
|
||||
|
||||
try:
|
||||
parts = split_request_uri(self.uri)
|
||||
except ValueError:
|
||||
raise InvalidRequestLine(bytes_to_str(line_bytes))
|
||||
self.path = parts.path or ""
|
||||
self.query = parts.query or ""
|
||||
self.fragment = parts.fragment or ""
|
||||
|
||||
# Version
|
||||
match = VERSION_RE.fullmatch(bits[2])
|
||||
if match is None:
|
||||
raise InvalidHTTPVersion(bits[2])
|
||||
self.version = (int(match.group(1)), int(match.group(2)))
|
||||
if not (1, 0) <= self.version < (2, 0):
|
||||
# if ever relaxing this, carefully review Content-Encoding processing
|
||||
if not self.cfg.permit_unconventional_http_version:
|
||||
raise InvalidHTTPVersion(self.version)
|
||||
|
||||
def set_body_reader(self):
|
||||
super().set_body_reader()
|
||||
if isinstance(self.body.reader, EOFReader):
|
||||
self.body = Body(LengthReader(self.unreader, 0))
|
||||
51
venv/lib/python3.11/site-packages/gunicorn/http/parser.py
Normal file
51
venv/lib/python3.11/site-packages/gunicorn/http/parser.py
Normal file
@ -0,0 +1,51 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
from gunicorn.http.message import Request
|
||||
from gunicorn.http.unreader import SocketUnreader, IterUnreader
|
||||
|
||||
|
||||
class Parser:
|
||||
|
||||
mesg_class = None
|
||||
|
||||
def __init__(self, cfg, source, source_addr):
|
||||
self.cfg = cfg
|
||||
if hasattr(source, "recv"):
|
||||
self.unreader = SocketUnreader(source)
|
||||
else:
|
||||
self.unreader = IterUnreader(source)
|
||||
self.mesg = None
|
||||
self.source_addr = source_addr
|
||||
|
||||
# request counter (for keepalive connetions)
|
||||
self.req_count = 0
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
# Stop if HTTP dictates a stop.
|
||||
if self.mesg and self.mesg.should_close():
|
||||
raise StopIteration()
|
||||
|
||||
# Discard any unread body of the previous message
|
||||
if self.mesg:
|
||||
data = self.mesg.body.read(8192)
|
||||
while data:
|
||||
data = self.mesg.body.read(8192)
|
||||
|
||||
# Parse the next request
|
||||
self.req_count += 1
|
||||
self.mesg = self.mesg_class(self.cfg, self.unreader, self.source_addr, self.req_count)
|
||||
if not self.mesg:
|
||||
raise StopIteration()
|
||||
return self.mesg
|
||||
|
||||
next = __next__
|
||||
|
||||
|
||||
class RequestParser(Parser):
|
||||
|
||||
mesg_class = Request
|
||||
78
venv/lib/python3.11/site-packages/gunicorn/http/unreader.py
Normal file
78
venv/lib/python3.11/site-packages/gunicorn/http/unreader.py
Normal file
@ -0,0 +1,78 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import io
|
||||
import os
|
||||
|
||||
# Classes that can undo reading data from
|
||||
# a given type of data source.
|
||||
|
||||
|
||||
class Unreader:
|
||||
def __init__(self):
|
||||
self.buf = io.BytesIO()
|
||||
|
||||
def chunk(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def read(self, size=None):
|
||||
if size is not None and not isinstance(size, int):
|
||||
raise TypeError("size parameter must be an int or long.")
|
||||
|
||||
if size is not None:
|
||||
if size == 0:
|
||||
return b""
|
||||
if size < 0:
|
||||
size = None
|
||||
|
||||
self.buf.seek(0, os.SEEK_END)
|
||||
|
||||
if size is None and self.buf.tell():
|
||||
ret = self.buf.getvalue()
|
||||
self.buf = io.BytesIO()
|
||||
return ret
|
||||
if size is None:
|
||||
d = self.chunk()
|
||||
return d
|
||||
|
||||
while self.buf.tell() < size:
|
||||
chunk = self.chunk()
|
||||
if not chunk:
|
||||
ret = self.buf.getvalue()
|
||||
self.buf = io.BytesIO()
|
||||
return ret
|
||||
self.buf.write(chunk)
|
||||
data = self.buf.getvalue()
|
||||
self.buf = io.BytesIO()
|
||||
self.buf.write(data[size:])
|
||||
return data[:size]
|
||||
|
||||
def unread(self, data):
|
||||
self.buf.seek(0, os.SEEK_END)
|
||||
self.buf.write(data)
|
||||
|
||||
|
||||
class SocketUnreader(Unreader):
|
||||
def __init__(self, sock, max_chunk=8192):
|
||||
super().__init__()
|
||||
self.sock = sock
|
||||
self.mxchunk = max_chunk
|
||||
|
||||
def chunk(self):
|
||||
return self.sock.recv(self.mxchunk)
|
||||
|
||||
|
||||
class IterUnreader(Unreader):
|
||||
def __init__(self, iterable):
|
||||
super().__init__()
|
||||
self.iter = iter(iterable)
|
||||
|
||||
def chunk(self):
|
||||
if not self.iter:
|
||||
return b""
|
||||
try:
|
||||
return next(self.iter)
|
||||
except StopIteration:
|
||||
self.iter = None
|
||||
return b""
|
||||
401
venv/lib/python3.11/site-packages/gunicorn/http/wsgi.py
Normal file
401
venv/lib/python3.11/site-packages/gunicorn/http/wsgi.py
Normal file
@ -0,0 +1,401 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from gunicorn.http.message import TOKEN_RE
|
||||
from gunicorn.http.errors import ConfigurationProblem, InvalidHeader, InvalidHeaderName
|
||||
from gunicorn import SERVER_SOFTWARE, SERVER
|
||||
from gunicorn import util
|
||||
|
||||
# Send files in at most 1GB blocks as some operating systems can have problems
|
||||
# with sending files in blocks over 2GB.
|
||||
BLKSIZE = 0x3FFFFFFF
|
||||
|
||||
# RFC9110 5.5: field-vchar = VCHAR / obs-text
|
||||
# RFC4234 B.1: VCHAR = 0x21-x07E = printable ASCII
|
||||
HEADER_VALUE_RE = re.compile(r'[ \t\x21-\x7e\x80-\xff]*')
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FileWrapper:
|
||||
|
||||
def __init__(self, filelike, blksize=8192):
|
||||
self.filelike = filelike
|
||||
self.blksize = blksize
|
||||
if hasattr(filelike, 'close'):
|
||||
self.close = filelike.close
|
||||
|
||||
def __getitem__(self, key):
|
||||
data = self.filelike.read(self.blksize)
|
||||
if data:
|
||||
return data
|
||||
raise IndexError
|
||||
|
||||
|
||||
class WSGIErrorsWrapper(io.RawIOBase):
|
||||
|
||||
def __init__(self, cfg):
|
||||
# There is no public __init__ method for RawIOBase so
|
||||
# we don't need to call super() in the __init__ method.
|
||||
# pylint: disable=super-init-not-called
|
||||
errorlog = logging.getLogger("gunicorn.error")
|
||||
handlers = errorlog.handlers
|
||||
self.streams = []
|
||||
|
||||
if cfg.errorlog == "-":
|
||||
self.streams.append(sys.stderr)
|
||||
handlers = handlers[1:]
|
||||
|
||||
for h in handlers:
|
||||
if hasattr(h, "stream"):
|
||||
self.streams.append(h.stream)
|
||||
|
||||
def write(self, data):
|
||||
for stream in self.streams:
|
||||
try:
|
||||
stream.write(data)
|
||||
except UnicodeError:
|
||||
stream.write(data.encode("UTF-8"))
|
||||
stream.flush()
|
||||
|
||||
|
||||
def base_environ(cfg):
|
||||
return {
|
||||
"wsgi.errors": WSGIErrorsWrapper(cfg),
|
||||
"wsgi.version": (1, 0),
|
||||
"wsgi.multithread": False,
|
||||
"wsgi.multiprocess": (cfg.workers > 1),
|
||||
"wsgi.run_once": False,
|
||||
"wsgi.file_wrapper": FileWrapper,
|
||||
"wsgi.input_terminated": True,
|
||||
"SERVER_SOFTWARE": SERVER_SOFTWARE,
|
||||
}
|
||||
|
||||
|
||||
def default_environ(req, sock, cfg):
|
||||
env = base_environ(cfg)
|
||||
env.update({
|
||||
"wsgi.input": req.body,
|
||||
"gunicorn.socket": sock,
|
||||
"REQUEST_METHOD": req.method,
|
||||
"QUERY_STRING": req.query,
|
||||
"RAW_URI": req.uri,
|
||||
"SERVER_PROTOCOL": "HTTP/%s" % ".".join([str(v) for v in req.version])
|
||||
})
|
||||
return env
|
||||
|
||||
|
||||
def proxy_environ(req):
|
||||
info = req.proxy_protocol_info
|
||||
|
||||
if not info:
|
||||
return {}
|
||||
|
||||
return {
|
||||
"PROXY_PROTOCOL": info["proxy_protocol"],
|
||||
"REMOTE_ADDR": info["client_addr"],
|
||||
"REMOTE_PORT": str(info["client_port"]),
|
||||
"PROXY_ADDR": info["proxy_addr"],
|
||||
"PROXY_PORT": str(info["proxy_port"]),
|
||||
}
|
||||
|
||||
|
||||
def create(req, sock, client, server, cfg):
|
||||
resp = Response(req, sock, cfg)
|
||||
|
||||
# set initial environ
|
||||
environ = default_environ(req, sock, cfg)
|
||||
|
||||
# default variables
|
||||
host = None
|
||||
script_name = os.environ.get("SCRIPT_NAME", "")
|
||||
|
||||
# add the headers to the environ
|
||||
for hdr_name, hdr_value in req.headers:
|
||||
if hdr_name == "EXPECT":
|
||||
# handle expect
|
||||
if hdr_value.lower() == "100-continue":
|
||||
sock.send(b"HTTP/1.1 100 Continue\r\n\r\n")
|
||||
elif hdr_name == 'HOST':
|
||||
host = hdr_value
|
||||
elif hdr_name == "SCRIPT_NAME":
|
||||
script_name = hdr_value
|
||||
elif hdr_name == "CONTENT-TYPE":
|
||||
environ['CONTENT_TYPE'] = hdr_value
|
||||
continue
|
||||
elif hdr_name == "CONTENT-LENGTH":
|
||||
environ['CONTENT_LENGTH'] = hdr_value
|
||||
continue
|
||||
|
||||
# do not change lightly, this is a common source of security problems
|
||||
# RFC9110 Section 17.10 discourages ambiguous or incomplete mappings
|
||||
key = 'HTTP_' + hdr_name.replace('-', '_')
|
||||
if key in environ:
|
||||
hdr_value = "%s,%s" % (environ[key], hdr_value)
|
||||
environ[key] = hdr_value
|
||||
|
||||
# set the url scheme
|
||||
environ['wsgi.url_scheme'] = req.scheme
|
||||
|
||||
# set the REMOTE_* keys in environ
|
||||
# authors should be aware that REMOTE_HOST and REMOTE_ADDR
|
||||
# may not qualify the remote addr:
|
||||
# http://www.ietf.org/rfc/rfc3875
|
||||
if isinstance(client, str):
|
||||
environ['REMOTE_ADDR'] = client
|
||||
elif isinstance(client, bytes):
|
||||
environ['REMOTE_ADDR'] = client.decode()
|
||||
else:
|
||||
environ['REMOTE_ADDR'] = client[0]
|
||||
environ['REMOTE_PORT'] = str(client[1])
|
||||
|
||||
# handle the SERVER_*
|
||||
# Normally only the application should use the Host header but since the
|
||||
# WSGI spec doesn't support unix sockets, we are using it to create
|
||||
# viable SERVER_* if possible.
|
||||
if isinstance(server, str):
|
||||
server = server.split(":")
|
||||
if len(server) == 1:
|
||||
# unix socket
|
||||
if host:
|
||||
server = host.split(':')
|
||||
if len(server) == 1:
|
||||
if req.scheme == "http":
|
||||
server.append(80)
|
||||
elif req.scheme == "https":
|
||||
server.append(443)
|
||||
else:
|
||||
server.append('')
|
||||
else:
|
||||
# no host header given which means that we are not behind a
|
||||
# proxy, so append an empty port.
|
||||
server.append('')
|
||||
environ['SERVER_NAME'] = server[0]
|
||||
environ['SERVER_PORT'] = str(server[1])
|
||||
|
||||
# set the path and script name
|
||||
path_info = req.path
|
||||
if script_name:
|
||||
if not path_info.startswith(script_name):
|
||||
raise ConfigurationProblem(
|
||||
"Request path %r does not start with SCRIPT_NAME %r" %
|
||||
(path_info, script_name))
|
||||
path_info = path_info[len(script_name):]
|
||||
environ['PATH_INFO'] = util.unquote_to_wsgi_str(path_info)
|
||||
environ['SCRIPT_NAME'] = script_name
|
||||
|
||||
# override the environ with the correct remote and server address if
|
||||
# we are behind a proxy using the proxy protocol.
|
||||
environ.update(proxy_environ(req))
|
||||
return resp, environ
|
||||
|
||||
|
||||
class Response:
|
||||
|
||||
def __init__(self, req, sock, cfg):
|
||||
self.req = req
|
||||
self.sock = sock
|
||||
self.version = SERVER
|
||||
self.status = None
|
||||
self.chunked = False
|
||||
self.must_close = False
|
||||
self.headers = []
|
||||
self.headers_sent = False
|
||||
self.response_length = None
|
||||
self.sent = 0
|
||||
self.upgrade = False
|
||||
self.cfg = cfg
|
||||
|
||||
def force_close(self):
|
||||
self.must_close = True
|
||||
|
||||
def should_close(self):
|
||||
if self.must_close or self.req.should_close():
|
||||
return True
|
||||
if self.response_length is not None or self.chunked:
|
||||
return False
|
||||
if self.req.method == 'HEAD':
|
||||
return False
|
||||
if self.status_code < 200 or self.status_code in (204, 304):
|
||||
return False
|
||||
return True
|
||||
|
||||
def start_response(self, status, headers, exc_info=None):
|
||||
if exc_info:
|
||||
try:
|
||||
if self.status and self.headers_sent:
|
||||
util.reraise(exc_info[0], exc_info[1], exc_info[2])
|
||||
finally:
|
||||
exc_info = None
|
||||
elif self.status is not None:
|
||||
raise AssertionError("Response headers already set!")
|
||||
|
||||
self.status = status
|
||||
|
||||
# get the status code from the response here so we can use it to check
|
||||
# the need for the connection header later without parsing the string
|
||||
# each time.
|
||||
try:
|
||||
self.status_code = int(self.status.split()[0])
|
||||
except ValueError:
|
||||
self.status_code = None
|
||||
|
||||
self.process_headers(headers)
|
||||
self.chunked = self.is_chunked()
|
||||
return self.write
|
||||
|
||||
def process_headers(self, headers):
|
||||
for name, value in headers:
|
||||
if not isinstance(name, str):
|
||||
raise TypeError('%r is not a string' % name)
|
||||
|
||||
if not TOKEN_RE.fullmatch(name):
|
||||
raise InvalidHeaderName('%r' % name)
|
||||
|
||||
if not isinstance(value, str):
|
||||
raise TypeError('%r is not a string' % value)
|
||||
|
||||
if not HEADER_VALUE_RE.fullmatch(value):
|
||||
raise InvalidHeader('%r' % value)
|
||||
|
||||
# RFC9110 5.5
|
||||
value = value.strip(" \t")
|
||||
lname = name.lower()
|
||||
if lname == "content-length":
|
||||
self.response_length = int(value)
|
||||
elif util.is_hoppish(name):
|
||||
if lname == "connection":
|
||||
# handle websocket
|
||||
if value.lower() == "upgrade":
|
||||
self.upgrade = True
|
||||
elif lname == "upgrade":
|
||||
if value.lower() == "websocket":
|
||||
self.headers.append((name, value))
|
||||
|
||||
# ignore hopbyhop headers
|
||||
continue
|
||||
self.headers.append((name, value))
|
||||
|
||||
def is_chunked(self):
|
||||
# Only use chunked responses when the client is
|
||||
# speaking HTTP/1.1 or newer and there was
|
||||
# no Content-Length header set.
|
||||
if self.response_length is not None:
|
||||
return False
|
||||
elif self.req.version <= (1, 0):
|
||||
return False
|
||||
elif self.req.method == 'HEAD':
|
||||
# Responses to a HEAD request MUST NOT contain a response body.
|
||||
return False
|
||||
elif self.status_code in (204, 304):
|
||||
# Do not use chunked responses when the response is guaranteed to
|
||||
# not have a response body.
|
||||
return False
|
||||
return True
|
||||
|
||||
def default_headers(self):
|
||||
# set the connection header
|
||||
if self.upgrade:
|
||||
connection = "upgrade"
|
||||
elif self.should_close():
|
||||
connection = "close"
|
||||
else:
|
||||
connection = "keep-alive"
|
||||
|
||||
headers = [
|
||||
"HTTP/%s.%s %s\r\n" % (self.req.version[0],
|
||||
self.req.version[1], self.status),
|
||||
"Server: %s\r\n" % self.version,
|
||||
"Date: %s\r\n" % util.http_date(),
|
||||
"Connection: %s\r\n" % connection
|
||||
]
|
||||
if self.chunked:
|
||||
headers.append("Transfer-Encoding: chunked\r\n")
|
||||
return headers
|
||||
|
||||
def send_headers(self):
|
||||
if self.headers_sent:
|
||||
return
|
||||
tosend = self.default_headers()
|
||||
tosend.extend(["%s: %s\r\n" % (k, v) for k, v in self.headers])
|
||||
|
||||
header_str = "%s\r\n" % "".join(tosend)
|
||||
util.write(self.sock, util.to_bytestring(header_str, "latin-1"))
|
||||
self.headers_sent = True
|
||||
|
||||
def write(self, arg):
|
||||
self.send_headers()
|
||||
if not isinstance(arg, bytes):
|
||||
raise TypeError('%r is not a byte' % arg)
|
||||
arglen = len(arg)
|
||||
tosend = arglen
|
||||
if self.response_length is not None:
|
||||
if self.sent >= self.response_length:
|
||||
# Never write more than self.response_length bytes
|
||||
return
|
||||
|
||||
tosend = min(self.response_length - self.sent, tosend)
|
||||
if tosend < arglen:
|
||||
arg = arg[:tosend]
|
||||
|
||||
# Sending an empty chunk signals the end of the
|
||||
# response and prematurely closes the response
|
||||
if self.chunked and tosend == 0:
|
||||
return
|
||||
|
||||
self.sent += tosend
|
||||
util.write(self.sock, arg, self.chunked)
|
||||
|
||||
def can_sendfile(self):
|
||||
return self.cfg.sendfile is not False
|
||||
|
||||
def sendfile(self, respiter):
|
||||
if self.cfg.is_ssl or not self.can_sendfile():
|
||||
return False
|
||||
|
||||
if not util.has_fileno(respiter.filelike):
|
||||
return False
|
||||
|
||||
fileno = respiter.filelike.fileno()
|
||||
try:
|
||||
offset = os.lseek(fileno, 0, os.SEEK_CUR)
|
||||
if self.response_length is None:
|
||||
filesize = os.fstat(fileno).st_size
|
||||
nbytes = filesize - offset
|
||||
else:
|
||||
nbytes = self.response_length
|
||||
except (OSError, io.UnsupportedOperation):
|
||||
return False
|
||||
|
||||
self.send_headers()
|
||||
|
||||
if self.is_chunked():
|
||||
chunk_size = "%X\r\n" % nbytes
|
||||
self.sock.sendall(chunk_size.encode('utf-8'))
|
||||
if nbytes > 0:
|
||||
self.sock.sendfile(respiter.filelike, offset=offset, count=nbytes)
|
||||
|
||||
if self.is_chunked():
|
||||
self.sock.sendall(b"\r\n")
|
||||
|
||||
os.lseek(fileno, offset, os.SEEK_SET)
|
||||
|
||||
return True
|
||||
|
||||
def write_file(self, respiter):
|
||||
if not self.sendfile(respiter):
|
||||
for item in respiter:
|
||||
self.write(item)
|
||||
|
||||
def close(self):
|
||||
if not self.headers_sent:
|
||||
self.send_headers()
|
||||
if self.chunked:
|
||||
util.write_chunk(self.sock, b"")
|
||||
Binary file not shown.
Binary file not shown.
134
venv/lib/python3.11/site-packages/gunicorn/instrument/statsd.py
Normal file
134
venv/lib/python3.11/site-packages/gunicorn/instrument/statsd.py
Normal file
@ -0,0 +1,134 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
"Bare-bones implementation of statsD's protocol, client-side"
|
||||
|
||||
import logging
|
||||
import socket
|
||||
from re import sub
|
||||
|
||||
from gunicorn.glogging import Logger
|
||||
|
||||
# Instrumentation constants
|
||||
METRIC_VAR = "metric"
|
||||
VALUE_VAR = "value"
|
||||
MTYPE_VAR = "mtype"
|
||||
GAUGE_TYPE = "gauge"
|
||||
COUNTER_TYPE = "counter"
|
||||
HISTOGRAM_TYPE = "histogram"
|
||||
|
||||
|
||||
class Statsd(Logger):
|
||||
"""statsD-based instrumentation, that passes as a logger
|
||||
"""
|
||||
def __init__(self, cfg):
|
||||
Logger.__init__(self, cfg)
|
||||
self.prefix = sub(r"^(.+[^.]+)\.*$", "\\g<1>.", cfg.statsd_prefix)
|
||||
|
||||
if isinstance(cfg.statsd_host, str):
|
||||
address_family = socket.AF_UNIX
|
||||
else:
|
||||
address_family = socket.AF_INET
|
||||
|
||||
try:
|
||||
self.sock = socket.socket(address_family, socket.SOCK_DGRAM)
|
||||
self.sock.connect(cfg.statsd_host)
|
||||
except Exception:
|
||||
self.sock = None
|
||||
|
||||
self.dogstatsd_tags = cfg.dogstatsd_tags
|
||||
|
||||
# Log errors and warnings
|
||||
def critical(self, msg, *args, **kwargs):
|
||||
Logger.critical(self, msg, *args, **kwargs)
|
||||
self.increment("gunicorn.log.critical", 1)
|
||||
|
||||
def error(self, msg, *args, **kwargs):
|
||||
Logger.error(self, msg, *args, **kwargs)
|
||||
self.increment("gunicorn.log.error", 1)
|
||||
|
||||
def warning(self, msg, *args, **kwargs):
|
||||
Logger.warning(self, msg, *args, **kwargs)
|
||||
self.increment("gunicorn.log.warning", 1)
|
||||
|
||||
def exception(self, msg, *args, **kwargs):
|
||||
Logger.exception(self, msg, *args, **kwargs)
|
||||
self.increment("gunicorn.log.exception", 1)
|
||||
|
||||
# Special treatment for info, the most common log level
|
||||
def info(self, msg, *args, **kwargs):
|
||||
self.log(logging.INFO, msg, *args, **kwargs)
|
||||
|
||||
# skip the run-of-the-mill logs
|
||||
def debug(self, msg, *args, **kwargs):
|
||||
self.log(logging.DEBUG, msg, *args, **kwargs)
|
||||
|
||||
def log(self, lvl, msg, *args, **kwargs):
|
||||
"""Log a given statistic if metric, value and type are present
|
||||
"""
|
||||
try:
|
||||
extra = kwargs.get("extra", None)
|
||||
if extra is not None:
|
||||
metric = extra.get(METRIC_VAR, None)
|
||||
value = extra.get(VALUE_VAR, None)
|
||||
typ = extra.get(MTYPE_VAR, None)
|
||||
if metric and value and typ:
|
||||
if typ == GAUGE_TYPE:
|
||||
self.gauge(metric, value)
|
||||
elif typ == COUNTER_TYPE:
|
||||
self.increment(metric, value)
|
||||
elif typ == HISTOGRAM_TYPE:
|
||||
self.histogram(metric, value)
|
||||
else:
|
||||
pass
|
||||
|
||||
# Log to parent logger only if there is something to say
|
||||
if msg:
|
||||
Logger.log(self, lvl, msg, *args, **kwargs)
|
||||
except Exception:
|
||||
Logger.warning(self, "Failed to log to statsd", exc_info=True)
|
||||
|
||||
# access logging
|
||||
def access(self, resp, req, environ, request_time):
|
||||
"""Measure request duration
|
||||
request_time is a datetime.timedelta
|
||||
"""
|
||||
Logger.access(self, resp, req, environ, request_time)
|
||||
duration_in_ms = request_time.seconds * 1000 + float(request_time.microseconds) / 10 ** 3
|
||||
status = resp.status
|
||||
if isinstance(status, bytes):
|
||||
status = status.decode('utf-8')
|
||||
if isinstance(status, str):
|
||||
status = int(status.split(None, 1)[0])
|
||||
self.histogram("gunicorn.request.duration", duration_in_ms)
|
||||
self.increment("gunicorn.requests", 1)
|
||||
self.increment("gunicorn.request.status.%d" % status, 1)
|
||||
|
||||
# statsD methods
|
||||
# you can use those directly if you want
|
||||
def gauge(self, name, value):
|
||||
self._sock_send("{0}{1}:{2}|g".format(self.prefix, name, value))
|
||||
|
||||
def increment(self, name, value, sampling_rate=1.0):
|
||||
self._sock_send("{0}{1}:{2}|c|@{3}".format(self.prefix, name, value, sampling_rate))
|
||||
|
||||
def decrement(self, name, value, sampling_rate=1.0):
|
||||
self._sock_send("{0}{1}:-{2}|c|@{3}".format(self.prefix, name, value, sampling_rate))
|
||||
|
||||
def histogram(self, name, value):
|
||||
self._sock_send("{0}{1}:{2}|ms".format(self.prefix, name, value))
|
||||
|
||||
def _sock_send(self, msg):
|
||||
try:
|
||||
if isinstance(msg, str):
|
||||
msg = msg.encode("ascii")
|
||||
|
||||
# http://docs.datadoghq.com/guides/dogstatsd/#datagram-format
|
||||
if self.dogstatsd_tags:
|
||||
msg = msg + b"|#" + self.dogstatsd_tags.encode('ascii')
|
||||
|
||||
if self.sock:
|
||||
self.sock.send(msg)
|
||||
except Exception:
|
||||
Logger.warning(self, "Error sending message to statsd", exc_info=True)
|
||||
85
venv/lib/python3.11/site-packages/gunicorn/pidfile.py
Normal file
85
venv/lib/python3.11/site-packages/gunicorn/pidfile.py
Normal file
@ -0,0 +1,85 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import errno
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
|
||||
class Pidfile:
|
||||
"""\
|
||||
Manage a PID file. If a specific name is provided
|
||||
it and '"%s.oldpid" % name' will be used. Otherwise
|
||||
we create a temp file using os.mkstemp.
|
||||
"""
|
||||
|
||||
def __init__(self, fname):
|
||||
self.fname = fname
|
||||
self.pid = None
|
||||
|
||||
def create(self, pid):
|
||||
oldpid = self.validate()
|
||||
if oldpid:
|
||||
if oldpid == os.getpid():
|
||||
return
|
||||
msg = "Already running on PID %s (or pid file '%s' is stale)"
|
||||
raise RuntimeError(msg % (oldpid, self.fname))
|
||||
|
||||
self.pid = pid
|
||||
|
||||
# Write pidfile
|
||||
fdir = os.path.dirname(self.fname)
|
||||
if fdir and not os.path.isdir(fdir):
|
||||
raise RuntimeError("%s doesn't exist. Can't create pidfile." % fdir)
|
||||
fd, fname = tempfile.mkstemp(dir=fdir)
|
||||
os.write(fd, ("%s\n" % self.pid).encode('utf-8'))
|
||||
if self.fname:
|
||||
os.rename(fname, self.fname)
|
||||
else:
|
||||
self.fname = fname
|
||||
os.close(fd)
|
||||
|
||||
# set permissions to -rw-r--r--
|
||||
os.chmod(self.fname, 420)
|
||||
|
||||
def rename(self, path):
|
||||
self.unlink()
|
||||
self.fname = path
|
||||
self.create(self.pid)
|
||||
|
||||
def unlink(self):
|
||||
""" delete pidfile"""
|
||||
try:
|
||||
with open(self.fname) as f:
|
||||
pid1 = int(f.read() or 0)
|
||||
|
||||
if pid1 == self.pid:
|
||||
os.unlink(self.fname)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def validate(self):
|
||||
""" Validate pidfile and make it stale if needed"""
|
||||
if not self.fname:
|
||||
return
|
||||
try:
|
||||
with open(self.fname) as f:
|
||||
try:
|
||||
wpid = int(f.read())
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
try:
|
||||
os.kill(wpid, 0)
|
||||
return wpid
|
||||
except OSError as e:
|
||||
if e.args[0] == errno.EPERM:
|
||||
return wpid
|
||||
if e.args[0] == errno.ESRCH:
|
||||
return
|
||||
raise
|
||||
except OSError as e:
|
||||
if e.args[0] == errno.ENOENT:
|
||||
return
|
||||
raise
|
||||
131
venv/lib/python3.11/site-packages/gunicorn/reloader.py
Normal file
131
venv/lib/python3.11/site-packages/gunicorn/reloader.py
Normal file
@ -0,0 +1,131 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
# pylint: disable=no-else-continue
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import threading
|
||||
|
||||
COMPILED_EXT_RE = re.compile(r'py[co]$')
|
||||
|
||||
|
||||
class Reloader(threading.Thread):
|
||||
def __init__(self, extra_files=None, interval=1, callback=None):
|
||||
super().__init__()
|
||||
self.daemon = True
|
||||
self._extra_files = set(extra_files or ())
|
||||
self._interval = interval
|
||||
self._callback = callback
|
||||
|
||||
def add_extra_file(self, filename):
|
||||
self._extra_files.add(filename)
|
||||
|
||||
def get_files(self):
|
||||
fnames = [
|
||||
COMPILED_EXT_RE.sub('py', module.__file__)
|
||||
for module in tuple(sys.modules.values())
|
||||
if getattr(module, '__file__', None)
|
||||
]
|
||||
|
||||
fnames.extend(self._extra_files)
|
||||
|
||||
return fnames
|
||||
|
||||
def run(self):
|
||||
mtimes = {}
|
||||
while True:
|
||||
for filename in self.get_files():
|
||||
try:
|
||||
mtime = os.stat(filename).st_mtime
|
||||
except OSError:
|
||||
continue
|
||||
old_time = mtimes.get(filename)
|
||||
if old_time is None:
|
||||
mtimes[filename] = mtime
|
||||
continue
|
||||
elif mtime > old_time:
|
||||
if self._callback:
|
||||
self._callback(filename)
|
||||
time.sleep(self._interval)
|
||||
|
||||
|
||||
has_inotify = False
|
||||
if sys.platform.startswith('linux'):
|
||||
try:
|
||||
from inotify.adapters import Inotify
|
||||
import inotify.constants
|
||||
has_inotify = True
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
if has_inotify:
|
||||
|
||||
class InotifyReloader(threading.Thread):
|
||||
event_mask = (inotify.constants.IN_CREATE | inotify.constants.IN_DELETE
|
||||
| inotify.constants.IN_DELETE_SELF | inotify.constants.IN_MODIFY
|
||||
| inotify.constants.IN_MOVE_SELF | inotify.constants.IN_MOVED_FROM
|
||||
| inotify.constants.IN_MOVED_TO)
|
||||
|
||||
def __init__(self, extra_files=None, callback=None):
|
||||
super().__init__()
|
||||
self.daemon = True
|
||||
self._callback = callback
|
||||
self._dirs = set()
|
||||
self._watcher = Inotify()
|
||||
|
||||
for extra_file in extra_files:
|
||||
self.add_extra_file(extra_file)
|
||||
|
||||
def add_extra_file(self, filename):
|
||||
dirname = os.path.dirname(filename)
|
||||
|
||||
if dirname in self._dirs:
|
||||
return
|
||||
|
||||
self._watcher.add_watch(dirname, mask=self.event_mask)
|
||||
self._dirs.add(dirname)
|
||||
|
||||
def get_dirs(self):
|
||||
fnames = [
|
||||
os.path.dirname(os.path.abspath(COMPILED_EXT_RE.sub('py', module.__file__)))
|
||||
for module in tuple(sys.modules.values())
|
||||
if getattr(module, '__file__', None)
|
||||
]
|
||||
|
||||
return set(fnames)
|
||||
|
||||
def run(self):
|
||||
self._dirs = self.get_dirs()
|
||||
|
||||
for dirname in self._dirs:
|
||||
if os.path.isdir(dirname):
|
||||
self._watcher.add_watch(dirname, mask=self.event_mask)
|
||||
|
||||
for event in self._watcher.event_gen():
|
||||
if event is None:
|
||||
continue
|
||||
|
||||
filename = event[3]
|
||||
|
||||
self._callback(filename)
|
||||
|
||||
else:
|
||||
|
||||
class InotifyReloader:
|
||||
def __init__(self, extra_files=None, callback=None):
|
||||
raise ImportError('You must have the inotify module installed to '
|
||||
'use the inotify reloader')
|
||||
|
||||
|
||||
preferred_reloader = InotifyReloader if has_inotify else Reloader
|
||||
|
||||
reloader_engines = {
|
||||
'auto': preferred_reloader,
|
||||
'poll': Reloader,
|
||||
'inotify': InotifyReloader,
|
||||
}
|
||||
231
venv/lib/python3.11/site-packages/gunicorn/sock.py
Normal file
231
venv/lib/python3.11/site-packages/gunicorn/sock.py
Normal file
@ -0,0 +1,231 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
import stat
|
||||
import sys
|
||||
import time
|
||||
|
||||
from gunicorn import util
|
||||
|
||||
|
||||
class BaseSocket:
|
||||
|
||||
def __init__(self, address, conf, log, fd=None):
|
||||
self.log = log
|
||||
self.conf = conf
|
||||
|
||||
self.cfg_addr = address
|
||||
if fd is None:
|
||||
sock = socket.socket(self.FAMILY, socket.SOCK_STREAM)
|
||||
bound = False
|
||||
else:
|
||||
sock = socket.fromfd(fd, self.FAMILY, socket.SOCK_STREAM)
|
||||
os.close(fd)
|
||||
bound = True
|
||||
|
||||
self.sock = self.set_options(sock, bound=bound)
|
||||
|
||||
def __str__(self):
|
||||
return "<socket %d>" % self.sock.fileno()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.sock, name)
|
||||
|
||||
def set_options(self, sock, bound=False):
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
if (self.conf.reuse_port
|
||||
and hasattr(socket, 'SO_REUSEPORT')): # pragma: no cover
|
||||
try:
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
except OSError as err:
|
||||
if err.errno not in (errno.ENOPROTOOPT, errno.EINVAL):
|
||||
raise
|
||||
if not bound:
|
||||
self.bind(sock)
|
||||
sock.setblocking(0)
|
||||
|
||||
# make sure that the socket can be inherited
|
||||
if hasattr(sock, "set_inheritable"):
|
||||
sock.set_inheritable(True)
|
||||
|
||||
sock.listen(self.conf.backlog)
|
||||
return sock
|
||||
|
||||
def bind(self, sock):
|
||||
sock.bind(self.cfg_addr)
|
||||
|
||||
def close(self):
|
||||
if self.sock is None:
|
||||
return
|
||||
|
||||
try:
|
||||
self.sock.close()
|
||||
except OSError as e:
|
||||
self.log.info("Error while closing socket %s", str(e))
|
||||
|
||||
self.sock = None
|
||||
|
||||
|
||||
class TCPSocket(BaseSocket):
|
||||
|
||||
FAMILY = socket.AF_INET
|
||||
|
||||
def __str__(self):
|
||||
if self.conf.is_ssl:
|
||||
scheme = "https"
|
||||
else:
|
||||
scheme = "http"
|
||||
|
||||
addr = self.sock.getsockname()
|
||||
return "%s://%s:%d" % (scheme, addr[0], addr[1])
|
||||
|
||||
def set_options(self, sock, bound=False):
|
||||
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
||||
return super().set_options(sock, bound=bound)
|
||||
|
||||
|
||||
class TCP6Socket(TCPSocket):
|
||||
|
||||
FAMILY = socket.AF_INET6
|
||||
|
||||
def __str__(self):
|
||||
(host, port, _, _) = self.sock.getsockname()
|
||||
return "http://[%s]:%d" % (host, port)
|
||||
|
||||
|
||||
class UnixSocket(BaseSocket):
|
||||
|
||||
FAMILY = socket.AF_UNIX
|
||||
|
||||
def __init__(self, addr, conf, log, fd=None):
|
||||
if fd is None:
|
||||
try:
|
||||
st = os.stat(addr)
|
||||
except OSError as e:
|
||||
if e.args[0] != errno.ENOENT:
|
||||
raise
|
||||
else:
|
||||
if stat.S_ISSOCK(st.st_mode):
|
||||
os.remove(addr)
|
||||
else:
|
||||
raise ValueError("%r is not a socket" % addr)
|
||||
super().__init__(addr, conf, log, fd=fd)
|
||||
|
||||
def __str__(self):
|
||||
return "unix:%s" % self.cfg_addr
|
||||
|
||||
def bind(self, sock):
|
||||
old_umask = os.umask(self.conf.umask)
|
||||
sock.bind(self.cfg_addr)
|
||||
util.chown(self.cfg_addr, self.conf.uid, self.conf.gid)
|
||||
os.umask(old_umask)
|
||||
|
||||
|
||||
def _sock_type(addr):
|
||||
if isinstance(addr, tuple):
|
||||
if util.is_ipv6(addr[0]):
|
||||
sock_type = TCP6Socket
|
||||
else:
|
||||
sock_type = TCPSocket
|
||||
elif isinstance(addr, (str, bytes)):
|
||||
sock_type = UnixSocket
|
||||
else:
|
||||
raise TypeError("Unable to create socket from: %r" % addr)
|
||||
return sock_type
|
||||
|
||||
|
||||
def create_sockets(conf, log, fds=None):
|
||||
"""
|
||||
Create a new socket for the configured addresses or file descriptors.
|
||||
|
||||
If a configured address is a tuple then a TCP socket is created.
|
||||
If it is a string, a Unix socket is created. Otherwise, a TypeError is
|
||||
raised.
|
||||
"""
|
||||
listeners = []
|
||||
|
||||
# get it only once
|
||||
addr = conf.address
|
||||
fdaddr = [bind for bind in addr if isinstance(bind, int)]
|
||||
if fds:
|
||||
fdaddr += list(fds)
|
||||
laddr = [bind for bind in addr if not isinstance(bind, int)]
|
||||
|
||||
# check ssl config early to raise the error on startup
|
||||
# only the certfile is needed since it can contains the keyfile
|
||||
if conf.certfile and not os.path.exists(conf.certfile):
|
||||
raise ValueError('certfile "%s" does not exist' % conf.certfile)
|
||||
|
||||
if conf.keyfile and not os.path.exists(conf.keyfile):
|
||||
raise ValueError('keyfile "%s" does not exist' % conf.keyfile)
|
||||
|
||||
# sockets are already bound
|
||||
if fdaddr:
|
||||
for fd in fdaddr:
|
||||
sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
sock_name = sock.getsockname()
|
||||
sock_type = _sock_type(sock_name)
|
||||
listener = sock_type(sock_name, conf, log, fd=fd)
|
||||
listeners.append(listener)
|
||||
|
||||
return listeners
|
||||
|
||||
# no sockets is bound, first initialization of gunicorn in this env.
|
||||
for addr in laddr:
|
||||
sock_type = _sock_type(addr)
|
||||
sock = None
|
||||
for i in range(5):
|
||||
try:
|
||||
sock = sock_type(addr, conf, log)
|
||||
except OSError as e:
|
||||
if e.args[0] == errno.EADDRINUSE:
|
||||
log.error("Connection in use: %s", str(addr))
|
||||
if e.args[0] == errno.EADDRNOTAVAIL:
|
||||
log.error("Invalid address: %s", str(addr))
|
||||
msg = "connection to {addr} failed: {error}"
|
||||
log.error(msg.format(addr=str(addr), error=str(e)))
|
||||
if i < 5:
|
||||
log.debug("Retrying in 1 second.")
|
||||
time.sleep(1)
|
||||
else:
|
||||
break
|
||||
|
||||
if sock is None:
|
||||
log.error("Can't connect to %s", str(addr))
|
||||
sys.exit(1)
|
||||
|
||||
listeners.append(sock)
|
||||
|
||||
return listeners
|
||||
|
||||
|
||||
def close_sockets(listeners, unlink=True):
|
||||
for sock in listeners:
|
||||
sock_name = sock.getsockname()
|
||||
sock.close()
|
||||
if unlink and _sock_type(sock_name) is UnixSocket:
|
||||
os.unlink(sock_name)
|
||||
|
||||
|
||||
def ssl_context(conf):
|
||||
def default_ssl_context_factory():
|
||||
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=conf.ca_certs)
|
||||
context.load_cert_chain(certfile=conf.certfile, keyfile=conf.keyfile)
|
||||
context.verify_mode = conf.cert_reqs
|
||||
if conf.ciphers:
|
||||
context.set_ciphers(conf.ciphers)
|
||||
return context
|
||||
|
||||
return conf.ssl_context(conf, default_ssl_context_factory)
|
||||
|
||||
|
||||
def ssl_wrap_socket(sock, conf):
|
||||
return ssl_context(conf).wrap_socket(sock,
|
||||
server_side=True,
|
||||
suppress_ragged_eofs=conf.suppress_ragged_eofs,
|
||||
do_handshake_on_connect=conf.do_handshake_on_connect)
|
||||
75
venv/lib/python3.11/site-packages/gunicorn/systemd.py
Normal file
75
venv/lib/python3.11/site-packages/gunicorn/systemd.py
Normal file
@ -0,0 +1,75 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import os
|
||||
import socket
|
||||
|
||||
SD_LISTEN_FDS_START = 3
|
||||
|
||||
|
||||
def listen_fds(unset_environment=True):
|
||||
"""
|
||||
Get the number of sockets inherited from systemd socket activation.
|
||||
|
||||
:param unset_environment: clear systemd environment variables unless False
|
||||
:type unset_environment: bool
|
||||
:return: the number of sockets to inherit from systemd socket activation
|
||||
:rtype: int
|
||||
|
||||
Returns zero immediately if $LISTEN_PID is not set to the current pid.
|
||||
Otherwise, returns the number of systemd activation sockets specified by
|
||||
$LISTEN_FDS.
|
||||
|
||||
When $LISTEN_PID matches the current pid, unsets the environment variables
|
||||
unless the ``unset_environment`` flag is ``False``.
|
||||
|
||||
.. note::
|
||||
Unlike the sd_listen_fds C function, this implementation does not set
|
||||
the FD_CLOEXEC flag because the gunicorn arbiter never needs to do this.
|
||||
|
||||
.. seealso::
|
||||
`<https://www.freedesktop.org/software/systemd/man/sd_listen_fds.html>`_
|
||||
|
||||
"""
|
||||
fds = int(os.environ.get('LISTEN_FDS', 0))
|
||||
listen_pid = int(os.environ.get('LISTEN_PID', 0))
|
||||
|
||||
if listen_pid != os.getpid():
|
||||
return 0
|
||||
|
||||
if unset_environment:
|
||||
os.environ.pop('LISTEN_PID', None)
|
||||
os.environ.pop('LISTEN_FDS', None)
|
||||
|
||||
return fds
|
||||
|
||||
|
||||
def sd_notify(state, logger, unset_environment=False):
|
||||
"""Send a notification to systemd. state is a string; see
|
||||
the man page of sd_notify (http://www.freedesktop.org/software/systemd/man/sd_notify.html)
|
||||
for a description of the allowable values.
|
||||
|
||||
If the unset_environment parameter is True, sd_notify() will unset
|
||||
the $NOTIFY_SOCKET environment variable before returning (regardless of
|
||||
whether the function call itself succeeded or not). Further calls to
|
||||
sd_notify() will then fail, but the variable is no longer inherited by
|
||||
child processes.
|
||||
"""
|
||||
|
||||
addr = os.environ.get('NOTIFY_SOCKET')
|
||||
if addr is None:
|
||||
# not run in a service, just a noop
|
||||
return
|
||||
try:
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC)
|
||||
if addr[0] == '@':
|
||||
addr = '\0' + addr[1:]
|
||||
sock.connect(addr)
|
||||
sock.sendall(state.encode('utf-8'))
|
||||
except Exception:
|
||||
logger.debug("Exception while invoking sd_notify()", exc_info=True)
|
||||
finally:
|
||||
if unset_environment:
|
||||
os.environ.pop('NOTIFY_SOCKET')
|
||||
sock.close()
|
||||
653
venv/lib/python3.11/site-packages/gunicorn/util.py
Normal file
653
venv/lib/python3.11/site-packages/gunicorn/util.py
Normal file
@ -0,0 +1,653 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
import ast
|
||||
import email.utils
|
||||
import errno
|
||||
import fcntl
|
||||
import html
|
||||
import importlib
|
||||
import inspect
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import pwd
|
||||
import random
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import textwrap
|
||||
import time
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
try:
|
||||
import importlib.metadata as importlib_metadata
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
import importlib_metadata
|
||||
|
||||
from gunicorn.errors import AppImportError
|
||||
from gunicorn.workers import SUPPORTED_WORKERS
|
||||
import urllib.parse
|
||||
|
||||
REDIRECT_TO = getattr(os, 'devnull', '/dev/null')
|
||||
|
||||
# Server and Date aren't technically hop-by-hop
|
||||
# headers, but they are in the purview of the
|
||||
# origin server which the WSGI spec says we should
|
||||
# act like. So we drop them and add our own.
|
||||
#
|
||||
# In the future, concatenation server header values
|
||||
# might be better, but nothing else does it and
|
||||
# dropping them is easier.
|
||||
hop_headers = set("""
|
||||
connection keep-alive proxy-authenticate proxy-authorization
|
||||
te trailers transfer-encoding upgrade
|
||||
server date
|
||||
""".split())
|
||||
|
||||
try:
|
||||
from setproctitle import setproctitle
|
||||
|
||||
def _setproctitle(title):
|
||||
setproctitle("gunicorn: %s" % title)
|
||||
except ImportError:
|
||||
def _setproctitle(title):
|
||||
pass
|
||||
|
||||
|
||||
def load_entry_point(distribution, group, name):
|
||||
dist_obj = importlib_metadata.distribution(distribution)
|
||||
eps = [ep for ep in dist_obj.entry_points
|
||||
if ep.group == group and ep.name == name]
|
||||
if not eps:
|
||||
raise ImportError("Entry point %r not found" % ((group, name),))
|
||||
return eps[0].load()
|
||||
|
||||
|
||||
def load_class(uri, default="gunicorn.workers.sync.SyncWorker",
|
||||
section="gunicorn.workers"):
|
||||
if inspect.isclass(uri):
|
||||
return uri
|
||||
if uri.startswith("egg:"):
|
||||
# uses entry points
|
||||
entry_str = uri.split("egg:")[1]
|
||||
try:
|
||||
dist, name = entry_str.rsplit("#", 1)
|
||||
except ValueError:
|
||||
dist = entry_str
|
||||
name = default
|
||||
|
||||
try:
|
||||
return load_entry_point(dist, section, name)
|
||||
except Exception:
|
||||
exc = traceback.format_exc()
|
||||
msg = "class uri %r invalid or not found: \n\n[%s]"
|
||||
raise RuntimeError(msg % (uri, exc))
|
||||
else:
|
||||
components = uri.split('.')
|
||||
if len(components) == 1:
|
||||
while True:
|
||||
if uri.startswith("#"):
|
||||
uri = uri[1:]
|
||||
|
||||
if uri in SUPPORTED_WORKERS:
|
||||
components = SUPPORTED_WORKERS[uri].split(".")
|
||||
break
|
||||
|
||||
try:
|
||||
return load_entry_point(
|
||||
"gunicorn", section, uri
|
||||
)
|
||||
except Exception:
|
||||
exc = traceback.format_exc()
|
||||
msg = "class uri %r invalid or not found: \n\n[%s]"
|
||||
raise RuntimeError(msg % (uri, exc))
|
||||
|
||||
klass = components.pop(-1)
|
||||
|
||||
try:
|
||||
mod = importlib.import_module('.'.join(components))
|
||||
except Exception:
|
||||
exc = traceback.format_exc()
|
||||
msg = "class uri %r invalid or not found: \n\n[%s]"
|
||||
raise RuntimeError(msg % (uri, exc))
|
||||
return getattr(mod, klass)
|
||||
|
||||
|
||||
positionals = (
|
||||
inspect.Parameter.POSITIONAL_ONLY,
|
||||
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
||||
)
|
||||
|
||||
|
||||
def get_arity(f):
|
||||
sig = inspect.signature(f)
|
||||
arity = 0
|
||||
|
||||
for param in sig.parameters.values():
|
||||
if param.kind in positionals:
|
||||
arity += 1
|
||||
|
||||
return arity
|
||||
|
||||
|
||||
def get_username(uid):
|
||||
""" get the username for a user id"""
|
||||
return pwd.getpwuid(uid).pw_name
|
||||
|
||||
|
||||
def set_owner_process(uid, gid, initgroups=False):
|
||||
""" set user and group of workers processes """
|
||||
|
||||
if gid:
|
||||
if uid:
|
||||
try:
|
||||
username = get_username(uid)
|
||||
except KeyError:
|
||||
initgroups = False
|
||||
|
||||
# versions of python < 2.6.2 don't manage unsigned int for
|
||||
# groups like on osx or fedora
|
||||
gid = abs(gid) & 0x7FFFFFFF
|
||||
|
||||
if initgroups:
|
||||
os.initgroups(username, gid)
|
||||
elif gid != os.getgid():
|
||||
os.setgid(gid)
|
||||
|
||||
if uid and uid != os.getuid():
|
||||
os.setuid(uid)
|
||||
|
||||
|
||||
def chown(path, uid, gid):
|
||||
os.chown(path, uid, gid)
|
||||
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
def _waitfor(func, pathname, waitall=False):
|
||||
# Perform the operation
|
||||
func(pathname)
|
||||
# Now setup the wait loop
|
||||
if waitall:
|
||||
dirname = pathname
|
||||
else:
|
||||
dirname, name = os.path.split(pathname)
|
||||
dirname = dirname or '.'
|
||||
# Check for `pathname` to be removed from the filesystem.
|
||||
# The exponential backoff of the timeout amounts to a total
|
||||
# of ~1 second after which the deletion is probably an error
|
||||
# anyway.
|
||||
# Testing on a i7@4.3GHz shows that usually only 1 iteration is
|
||||
# required when contention occurs.
|
||||
timeout = 0.001
|
||||
while timeout < 1.0:
|
||||
# Note we are only testing for the existence of the file(s) in
|
||||
# the contents of the directory regardless of any security or
|
||||
# access rights. If we have made it this far, we have sufficient
|
||||
# permissions to do that much using Python's equivalent of the
|
||||
# Windows API FindFirstFile.
|
||||
# Other Windows APIs can fail or give incorrect results when
|
||||
# dealing with files that are pending deletion.
|
||||
L = os.listdir(dirname)
|
||||
if not L if waitall else name in L:
|
||||
return
|
||||
# Increase the timeout and try again
|
||||
time.sleep(timeout)
|
||||
timeout *= 2
|
||||
warnings.warn('tests may fail, delete still pending for ' + pathname,
|
||||
RuntimeWarning, stacklevel=4)
|
||||
|
||||
def _unlink(filename):
|
||||
_waitfor(os.unlink, filename)
|
||||
else:
|
||||
_unlink = os.unlink
|
||||
|
||||
|
||||
def unlink(filename):
|
||||
try:
|
||||
_unlink(filename)
|
||||
except OSError as error:
|
||||
# The filename need not exist.
|
||||
if error.errno not in (errno.ENOENT, errno.ENOTDIR):
|
||||
raise
|
||||
|
||||
|
||||
def is_ipv6(addr):
|
||||
try:
|
||||
socket.inet_pton(socket.AF_INET6, addr)
|
||||
except OSError: # not a valid address
|
||||
return False
|
||||
except ValueError: # ipv6 not supported on this platform
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def parse_address(netloc, default_port='8000'):
|
||||
if re.match(r'unix:(//)?', netloc):
|
||||
return re.split(r'unix:(//)?', netloc)[-1]
|
||||
|
||||
if netloc.startswith("fd://"):
|
||||
fd = netloc[5:]
|
||||
try:
|
||||
return int(fd)
|
||||
except ValueError:
|
||||
raise RuntimeError("%r is not a valid file descriptor." % fd) from None
|
||||
|
||||
if netloc.startswith("tcp://"):
|
||||
netloc = netloc.split("tcp://")[1]
|
||||
host, port = netloc, default_port
|
||||
|
||||
if '[' in netloc and ']' in netloc:
|
||||
host = netloc.split(']')[0][1:]
|
||||
port = (netloc.split(']:') + [default_port])[1]
|
||||
elif ':' in netloc:
|
||||
host, port = (netloc.split(':') + [default_port])[:2]
|
||||
elif netloc == "":
|
||||
host, port = "0.0.0.0", default_port
|
||||
|
||||
try:
|
||||
port = int(port)
|
||||
except ValueError:
|
||||
raise RuntimeError("%r is not a valid port number." % port)
|
||||
|
||||
return host.lower(), port
|
||||
|
||||
|
||||
def close_on_exec(fd):
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
flags |= fcntl.FD_CLOEXEC
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
|
||||
|
||||
|
||||
def set_non_blocking(fd):
|
||||
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
|
||||
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
||||
|
||||
|
||||
def close(sock):
|
||||
try:
|
||||
sock.close()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
try:
|
||||
from os import closerange
|
||||
except ImportError:
|
||||
def closerange(fd_low, fd_high):
|
||||
# Iterate through and close all file descriptors.
|
||||
for fd in range(fd_low, fd_high):
|
||||
try:
|
||||
os.close(fd)
|
||||
except OSError: # ERROR, fd wasn't open to begin with (ignored)
|
||||
pass
|
||||
|
||||
|
||||
def write_chunk(sock, data):
|
||||
if isinstance(data, str):
|
||||
data = data.encode('utf-8')
|
||||
chunk_size = "%X\r\n" % len(data)
|
||||
chunk = b"".join([chunk_size.encode('utf-8'), data, b"\r\n"])
|
||||
sock.sendall(chunk)
|
||||
|
||||
|
||||
def write(sock, data, chunked=False):
|
||||
if chunked:
|
||||
return write_chunk(sock, data)
|
||||
sock.sendall(data)
|
||||
|
||||
|
||||
def write_nonblock(sock, data, chunked=False):
|
||||
timeout = sock.gettimeout()
|
||||
if timeout != 0.0:
|
||||
try:
|
||||
sock.setblocking(0)
|
||||
return write(sock, data, chunked)
|
||||
finally:
|
||||
sock.setblocking(1)
|
||||
else:
|
||||
return write(sock, data, chunked)
|
||||
|
||||
|
||||
def write_error(sock, status_int, reason, mesg):
|
||||
html_error = textwrap.dedent("""\
|
||||
<html>
|
||||
<head>
|
||||
<title>%(reason)s</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1><p>%(reason)s</p></h1>
|
||||
%(mesg)s
|
||||
</body>
|
||||
</html>
|
||||
""") % {"reason": reason, "mesg": html.escape(mesg)}
|
||||
|
||||
http = textwrap.dedent("""\
|
||||
HTTP/1.1 %s %s\r
|
||||
Connection: close\r
|
||||
Content-Type: text/html\r
|
||||
Content-Length: %d\r
|
||||
\r
|
||||
%s""") % (str(status_int), reason, len(html_error), html_error)
|
||||
write_nonblock(sock, http.encode('latin1'))
|
||||
|
||||
|
||||
def _called_with_wrong_args(f):
|
||||
"""Check whether calling a function raised a ``TypeError`` because
|
||||
the call failed or because something in the function raised the
|
||||
error.
|
||||
|
||||
:param f: The function that was called.
|
||||
:return: ``True`` if the call failed.
|
||||
"""
|
||||
tb = sys.exc_info()[2]
|
||||
|
||||
try:
|
||||
while tb is not None:
|
||||
if tb.tb_frame.f_code is f.__code__:
|
||||
# In the function, it was called successfully.
|
||||
return False
|
||||
|
||||
tb = tb.tb_next
|
||||
|
||||
# Didn't reach the function.
|
||||
return True
|
||||
finally:
|
||||
# Delete tb to break a circular reference in Python 2.
|
||||
# https://docs.python.org/2/library/sys.html#sys.exc_info
|
||||
del tb
|
||||
|
||||
|
||||
def import_app(module):
|
||||
parts = module.split(":", 1)
|
||||
if len(parts) == 1:
|
||||
obj = "application"
|
||||
else:
|
||||
module, obj = parts[0], parts[1]
|
||||
|
||||
try:
|
||||
mod = importlib.import_module(module)
|
||||
except ImportError:
|
||||
if module.endswith(".py") and os.path.exists(module):
|
||||
msg = "Failed to find application, did you mean '%s:%s'?"
|
||||
raise ImportError(msg % (module.rsplit(".", 1)[0], obj))
|
||||
raise
|
||||
|
||||
# Parse obj as a single expression to determine if it's a valid
|
||||
# attribute name or function call.
|
||||
try:
|
||||
expression = ast.parse(obj, mode="eval").body
|
||||
except SyntaxError:
|
||||
raise AppImportError(
|
||||
"Failed to parse %r as an attribute name or function call." % obj
|
||||
)
|
||||
|
||||
if isinstance(expression, ast.Name):
|
||||
name = expression.id
|
||||
args = kwargs = None
|
||||
elif isinstance(expression, ast.Call):
|
||||
# Ensure the function name is an attribute name only.
|
||||
if not isinstance(expression.func, ast.Name):
|
||||
raise AppImportError("Function reference must be a simple name: %r" % obj)
|
||||
|
||||
name = expression.func.id
|
||||
|
||||
# Parse the positional and keyword arguments as literals.
|
||||
try:
|
||||
args = [ast.literal_eval(arg) for arg in expression.args]
|
||||
kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in expression.keywords}
|
||||
except ValueError:
|
||||
# literal_eval gives cryptic error messages, show a generic
|
||||
# message with the full expression instead.
|
||||
raise AppImportError(
|
||||
"Failed to parse arguments as literal values: %r" % obj
|
||||
)
|
||||
else:
|
||||
raise AppImportError(
|
||||
"Failed to parse %r as an attribute name or function call." % obj
|
||||
)
|
||||
|
||||
is_debug = logging.root.level == logging.DEBUG
|
||||
try:
|
||||
app = getattr(mod, name)
|
||||
except AttributeError:
|
||||
if is_debug:
|
||||
traceback.print_exception(*sys.exc_info())
|
||||
raise AppImportError("Failed to find attribute %r in %r." % (name, module))
|
||||
|
||||
# If the expression was a function call, call the retrieved object
|
||||
# to get the real application.
|
||||
if args is not None:
|
||||
try:
|
||||
app = app(*args, **kwargs)
|
||||
except TypeError as e:
|
||||
# If the TypeError was due to bad arguments to the factory
|
||||
# function, show Python's nice error message without a
|
||||
# traceback.
|
||||
if _called_with_wrong_args(app):
|
||||
raise AppImportError(
|
||||
"".join(traceback.format_exception_only(TypeError, e)).strip()
|
||||
)
|
||||
|
||||
# Otherwise it was raised from within the function, show the
|
||||
# full traceback.
|
||||
raise
|
||||
|
||||
if app is None:
|
||||
raise AppImportError("Failed to find application object: %r" % obj)
|
||||
|
||||
if not callable(app):
|
||||
raise AppImportError("Application object must be callable.")
|
||||
return app
|
||||
|
||||
|
||||
def getcwd():
|
||||
# get current path, try to use PWD env first
|
||||
try:
|
||||
a = os.stat(os.environ['PWD'])
|
||||
b = os.stat(os.getcwd())
|
||||
if a.st_ino == b.st_ino and a.st_dev == b.st_dev:
|
||||
cwd = os.environ['PWD']
|
||||
else:
|
||||
cwd = os.getcwd()
|
||||
except Exception:
|
||||
cwd = os.getcwd()
|
||||
return cwd
|
||||
|
||||
|
||||
def http_date(timestamp=None):
|
||||
"""Return the current date and time formatted for a message header."""
|
||||
if timestamp is None:
|
||||
timestamp = time.time()
|
||||
s = email.utils.formatdate(timestamp, localtime=False, usegmt=True)
|
||||
return s
|
||||
|
||||
|
||||
def is_hoppish(header):
|
||||
return header.lower().strip() in hop_headers
|
||||
|
||||
|
||||
def daemonize(enable_stdio_inheritance=False):
|
||||
"""\
|
||||
Standard daemonization of a process.
|
||||
http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7
|
||||
"""
|
||||
if 'GUNICORN_FD' not in os.environ:
|
||||
if os.fork():
|
||||
os._exit(0)
|
||||
os.setsid()
|
||||
|
||||
if os.fork():
|
||||
os._exit(0)
|
||||
|
||||
os.umask(0o22)
|
||||
|
||||
# In both the following any file descriptors above stdin
|
||||
# stdout and stderr are left untouched. The inheritance
|
||||
# option simply allows one to have output go to a file
|
||||
# specified by way of shell redirection when not wanting
|
||||
# to use --error-log option.
|
||||
|
||||
if not enable_stdio_inheritance:
|
||||
# Remap all of stdin, stdout and stderr on to
|
||||
# /dev/null. The expectation is that users have
|
||||
# specified the --error-log option.
|
||||
|
||||
closerange(0, 3)
|
||||
|
||||
fd_null = os.open(REDIRECT_TO, os.O_RDWR)
|
||||
# PEP 446, make fd for /dev/null inheritable
|
||||
os.set_inheritable(fd_null, True)
|
||||
|
||||
# expect fd_null to be always 0 here, but in-case not ...
|
||||
if fd_null != 0:
|
||||
os.dup2(fd_null, 0)
|
||||
|
||||
os.dup2(fd_null, 1)
|
||||
os.dup2(fd_null, 2)
|
||||
|
||||
else:
|
||||
fd_null = os.open(REDIRECT_TO, os.O_RDWR)
|
||||
|
||||
# Always redirect stdin to /dev/null as we would
|
||||
# never expect to need to read interactive input.
|
||||
|
||||
if fd_null != 0:
|
||||
os.close(0)
|
||||
os.dup2(fd_null, 0)
|
||||
|
||||
# If stdout and stderr are still connected to
|
||||
# their original file descriptors we check to see
|
||||
# if they are associated with terminal devices.
|
||||
# When they are we map them to /dev/null so that
|
||||
# are still detached from any controlling terminal
|
||||
# properly. If not we preserve them as they are.
|
||||
#
|
||||
# If stdin and stdout were not hooked up to the
|
||||
# original file descriptors, then all bets are
|
||||
# off and all we can really do is leave them as
|
||||
# they were.
|
||||
#
|
||||
# This will allow 'gunicorn ... > output.log 2>&1'
|
||||
# to work with stdout/stderr going to the file
|
||||
# as expected.
|
||||
#
|
||||
# Note that if using --error-log option, the log
|
||||
# file specified through shell redirection will
|
||||
# only be used up until the log file specified
|
||||
# by the option takes over. As it replaces stdout
|
||||
# and stderr at the file descriptor level, then
|
||||
# anything using stdout or stderr, including having
|
||||
# cached a reference to them, will still work.
|
||||
|
||||
def redirect(stream, fd_expect):
|
||||
try:
|
||||
fd = stream.fileno()
|
||||
if fd == fd_expect and stream.isatty():
|
||||
os.close(fd)
|
||||
os.dup2(fd_null, fd)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
redirect(sys.stdout, 1)
|
||||
redirect(sys.stderr, 2)
|
||||
|
||||
|
||||
def seed():
|
||||
try:
|
||||
random.seed(os.urandom(64))
|
||||
except NotImplementedError:
|
||||
random.seed('%s.%s' % (time.time(), os.getpid()))
|
||||
|
||||
|
||||
def check_is_writable(path):
|
||||
try:
|
||||
with open(path, 'a') as f:
|
||||
f.close()
|
||||
except OSError as e:
|
||||
raise RuntimeError("Error: '%s' isn't writable [%r]" % (path, e))
|
||||
|
||||
|
||||
def to_bytestring(value, encoding="utf8"):
|
||||
"""Converts a string argument to a byte string"""
|
||||
if isinstance(value, bytes):
|
||||
return value
|
||||
if not isinstance(value, str):
|
||||
raise TypeError('%r is not a string' % value)
|
||||
|
||||
return value.encode(encoding)
|
||||
|
||||
|
||||
def has_fileno(obj):
|
||||
if not hasattr(obj, "fileno"):
|
||||
return False
|
||||
|
||||
# check BytesIO case and maybe others
|
||||
try:
|
||||
obj.fileno()
|
||||
except (AttributeError, OSError, io.UnsupportedOperation):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def warn(msg):
|
||||
print("!!!", file=sys.stderr)
|
||||
|
||||
lines = msg.splitlines()
|
||||
for i, line in enumerate(lines):
|
||||
if i == 0:
|
||||
line = "WARNING: %s" % line
|
||||
print("!!! %s" % line, file=sys.stderr)
|
||||
|
||||
print("!!!\n", file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def make_fail_app(msg):
|
||||
msg = to_bytestring(msg)
|
||||
|
||||
def app(environ, start_response):
|
||||
start_response("500 Internal Server Error", [
|
||||
("Content-Type", "text/plain"),
|
||||
("Content-Length", str(len(msg)))
|
||||
])
|
||||
return [msg]
|
||||
|
||||
return app
|
||||
|
||||
|
||||
def split_request_uri(uri):
|
||||
if uri.startswith("//"):
|
||||
# When the path starts with //, urlsplit considers it as a
|
||||
# relative uri while the RFC says we should consider it as abs_path
|
||||
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
|
||||
# We use temporary dot prefix to workaround this behaviour
|
||||
parts = urllib.parse.urlsplit("." + uri)
|
||||
return parts._replace(path=parts.path[1:])
|
||||
|
||||
return urllib.parse.urlsplit(uri)
|
||||
|
||||
|
||||
# From six.reraise
|
||||
def reraise(tp, value, tb=None):
|
||||
try:
|
||||
if value is None:
|
||||
value = tp()
|
||||
if value.__traceback__ is not tb:
|
||||
raise value.with_traceback(tb)
|
||||
raise value
|
||||
finally:
|
||||
value = None
|
||||
tb = None
|
||||
|
||||
|
||||
def bytes_to_str(b):
|
||||
if isinstance(b, str):
|
||||
return b
|
||||
return str(b, 'latin1')
|
||||
|
||||
|
||||
def unquote_to_wsgi_str(string):
|
||||
return urllib.parse.unquote_to_bytes(string).decode('latin-1')
|
||||
@ -0,0 +1,14 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
# supported gunicorn workers.
|
||||
SUPPORTED_WORKERS = {
|
||||
"sync": "gunicorn.workers.sync.SyncWorker",
|
||||
"eventlet": "gunicorn.workers.geventlet.EventletWorker",
|
||||
"gevent": "gunicorn.workers.ggevent.GeventWorker",
|
||||
"gevent_wsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker",
|
||||
"gevent_pywsgi": "gunicorn.workers.ggevent.GeventPyWSGIWorker",
|
||||
"tornado": "gunicorn.workers.gtornado.TornadoWorker",
|
||||
"gthread": "gunicorn.workers.gthread.ThreadWorker",
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
287
venv/lib/python3.11/site-packages/gunicorn/workers/base.py
Normal file
287
venv/lib/python3.11/site-packages/gunicorn/workers/base.py
Normal file
@ -0,0 +1,287 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import io
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from random import randint
|
||||
from ssl import SSLError
|
||||
|
||||
from gunicorn import util
|
||||
from gunicorn.http.errors import (
|
||||
ForbiddenProxyRequest, InvalidHeader,
|
||||
InvalidHeaderName, InvalidHTTPVersion,
|
||||
InvalidProxyLine, InvalidRequestLine,
|
||||
InvalidRequestMethod, InvalidSchemeHeaders,
|
||||
LimitRequestHeaders, LimitRequestLine,
|
||||
UnsupportedTransferCoding,
|
||||
ConfigurationProblem, ObsoleteFolding,
|
||||
)
|
||||
from gunicorn.http.wsgi import Response, default_environ
|
||||
from gunicorn.reloader import reloader_engines
|
||||
from gunicorn.workers.workertmp import WorkerTmp
|
||||
|
||||
|
||||
class Worker:
|
||||
|
||||
SIGNALS = [getattr(signal, "SIG%s" % x) for x in (
|
||||
"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split()
|
||||
)]
|
||||
|
||||
PIPE = []
|
||||
|
||||
def __init__(self, age, ppid, sockets, app, timeout, cfg, log):
|
||||
"""\
|
||||
This is called pre-fork so it shouldn't do anything to the
|
||||
current process. If there's a need to make process wide
|
||||
changes you'll want to do that in ``self.init_process()``.
|
||||
"""
|
||||
self.age = age
|
||||
self.pid = "[booting]"
|
||||
self.ppid = ppid
|
||||
self.sockets = sockets
|
||||
self.app = app
|
||||
self.timeout = timeout
|
||||
self.cfg = cfg
|
||||
self.booted = False
|
||||
self.aborted = False
|
||||
self.reloader = None
|
||||
|
||||
self.nr = 0
|
||||
|
||||
if cfg.max_requests > 0:
|
||||
jitter = randint(0, cfg.max_requests_jitter)
|
||||
self.max_requests = cfg.max_requests + jitter
|
||||
else:
|
||||
self.max_requests = sys.maxsize
|
||||
|
||||
self.alive = True
|
||||
self.log = log
|
||||
self.tmp = WorkerTmp(cfg)
|
||||
|
||||
def __str__(self):
|
||||
return "<Worker %s>" % self.pid
|
||||
|
||||
def notify(self):
|
||||
"""\
|
||||
Your worker subclass must arrange to have this method called
|
||||
once every ``self.timeout`` seconds. If you fail in accomplishing
|
||||
this task, the master process will murder your workers.
|
||||
"""
|
||||
self.tmp.notify()
|
||||
|
||||
def run(self):
|
||||
"""\
|
||||
This is the mainloop of a worker process. You should override
|
||||
this method in a subclass to provide the intended behaviour
|
||||
for your particular evil schemes.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def init_process(self):
|
||||
"""\
|
||||
If you override this method in a subclass, the last statement
|
||||
in the function should be to call this method with
|
||||
super().init_process() so that the ``run()`` loop is initiated.
|
||||
"""
|
||||
|
||||
# set environment' variables
|
||||
if self.cfg.env:
|
||||
for k, v in self.cfg.env.items():
|
||||
os.environ[k] = v
|
||||
|
||||
util.set_owner_process(self.cfg.uid, self.cfg.gid,
|
||||
initgroups=self.cfg.initgroups)
|
||||
|
||||
# Reseed the random number generator
|
||||
util.seed()
|
||||
|
||||
# For waking ourselves up
|
||||
self.PIPE = os.pipe()
|
||||
for p in self.PIPE:
|
||||
util.set_non_blocking(p)
|
||||
util.close_on_exec(p)
|
||||
|
||||
# Prevent fd inheritance
|
||||
for s in self.sockets:
|
||||
util.close_on_exec(s)
|
||||
util.close_on_exec(self.tmp.fileno())
|
||||
|
||||
self.wait_fds = self.sockets + [self.PIPE[0]]
|
||||
|
||||
self.log.close_on_exec()
|
||||
|
||||
self.init_signals()
|
||||
|
||||
# start the reloader
|
||||
if self.cfg.reload:
|
||||
def changed(fname):
|
||||
self.log.info("Worker reloading: %s modified", fname)
|
||||
self.alive = False
|
||||
os.write(self.PIPE[1], b"1")
|
||||
self.cfg.worker_int(self)
|
||||
time.sleep(0.1)
|
||||
sys.exit(0)
|
||||
|
||||
reloader_cls = reloader_engines[self.cfg.reload_engine]
|
||||
self.reloader = reloader_cls(extra_files=self.cfg.reload_extra_files,
|
||||
callback=changed)
|
||||
|
||||
self.load_wsgi()
|
||||
if self.reloader:
|
||||
self.reloader.start()
|
||||
|
||||
self.cfg.post_worker_init(self)
|
||||
|
||||
# Enter main run loop
|
||||
self.booted = True
|
||||
self.run()
|
||||
|
||||
def load_wsgi(self):
|
||||
try:
|
||||
self.wsgi = self.app.wsgi()
|
||||
except SyntaxError as e:
|
||||
if not self.cfg.reload:
|
||||
raise
|
||||
|
||||
self.log.exception(e)
|
||||
|
||||
# fix from PR #1228
|
||||
# storing the traceback into exc_tb will create a circular reference.
|
||||
# per https://docs.python.org/2/library/sys.html#sys.exc_info warning,
|
||||
# delete the traceback after use.
|
||||
try:
|
||||
_, exc_val, exc_tb = sys.exc_info()
|
||||
self.reloader.add_extra_file(exc_val.filename)
|
||||
|
||||
tb_string = io.StringIO()
|
||||
traceback.print_tb(exc_tb, file=tb_string)
|
||||
self.wsgi = util.make_fail_app(tb_string.getvalue())
|
||||
finally:
|
||||
del exc_tb
|
||||
|
||||
def init_signals(self):
|
||||
# reset signaling
|
||||
for s in self.SIGNALS:
|
||||
signal.signal(s, signal.SIG_DFL)
|
||||
# init new signaling
|
||||
signal.signal(signal.SIGQUIT, self.handle_quit)
|
||||
signal.signal(signal.SIGTERM, self.handle_exit)
|
||||
signal.signal(signal.SIGINT, self.handle_quit)
|
||||
signal.signal(signal.SIGWINCH, self.handle_winch)
|
||||
signal.signal(signal.SIGUSR1, self.handle_usr1)
|
||||
signal.signal(signal.SIGABRT, self.handle_abort)
|
||||
|
||||
# Don't let SIGTERM and SIGUSR1 disturb active requests
|
||||
# by interrupting system calls
|
||||
signal.siginterrupt(signal.SIGTERM, False)
|
||||
signal.siginterrupt(signal.SIGUSR1, False)
|
||||
|
||||
if hasattr(signal, 'set_wakeup_fd'):
|
||||
signal.set_wakeup_fd(self.PIPE[1])
|
||||
|
||||
def handle_usr1(self, sig, frame):
|
||||
self.log.reopen_files()
|
||||
|
||||
def handle_exit(self, sig, frame):
|
||||
self.alive = False
|
||||
|
||||
def handle_quit(self, sig, frame):
|
||||
self.alive = False
|
||||
# worker_int callback
|
||||
self.cfg.worker_int(self)
|
||||
time.sleep(0.1)
|
||||
sys.exit(0)
|
||||
|
||||
def handle_abort(self, sig, frame):
|
||||
self.alive = False
|
||||
self.cfg.worker_abort(self)
|
||||
sys.exit(1)
|
||||
|
||||
def handle_error(self, req, client, addr, exc):
|
||||
request_start = datetime.now()
|
||||
addr = addr or ('', -1) # unix socket case
|
||||
if isinstance(exc, (
|
||||
InvalidRequestLine, InvalidRequestMethod,
|
||||
InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,
|
||||
LimitRequestLine, LimitRequestHeaders,
|
||||
InvalidProxyLine, ForbiddenProxyRequest,
|
||||
InvalidSchemeHeaders, UnsupportedTransferCoding,
|
||||
ConfigurationProblem, ObsoleteFolding,
|
||||
SSLError,
|
||||
)):
|
||||
|
||||
status_int = 400
|
||||
reason = "Bad Request"
|
||||
|
||||
if isinstance(exc, InvalidRequestLine):
|
||||
mesg = "Invalid Request Line '%s'" % str(exc)
|
||||
elif isinstance(exc, InvalidRequestMethod):
|
||||
mesg = "Invalid Method '%s'" % str(exc)
|
||||
elif isinstance(exc, InvalidHTTPVersion):
|
||||
mesg = "Invalid HTTP Version '%s'" % str(exc)
|
||||
elif isinstance(exc, UnsupportedTransferCoding):
|
||||
mesg = "%s" % str(exc)
|
||||
status_int = 501
|
||||
elif isinstance(exc, ConfigurationProblem):
|
||||
mesg = "%s" % str(exc)
|
||||
status_int = 500
|
||||
elif isinstance(exc, ObsoleteFolding):
|
||||
mesg = "%s" % str(exc)
|
||||
elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):
|
||||
mesg = "%s" % str(exc)
|
||||
if not req and hasattr(exc, "req"):
|
||||
req = exc.req # for access log
|
||||
elif isinstance(exc, LimitRequestLine):
|
||||
mesg = "%s" % str(exc)
|
||||
elif isinstance(exc, LimitRequestHeaders):
|
||||
reason = "Request Header Fields Too Large"
|
||||
mesg = "Error parsing headers: '%s'" % str(exc)
|
||||
status_int = 431
|
||||
elif isinstance(exc, InvalidProxyLine):
|
||||
mesg = "'%s'" % str(exc)
|
||||
elif isinstance(exc, ForbiddenProxyRequest):
|
||||
reason = "Forbidden"
|
||||
mesg = "Request forbidden"
|
||||
status_int = 403
|
||||
elif isinstance(exc, InvalidSchemeHeaders):
|
||||
mesg = "%s" % str(exc)
|
||||
elif isinstance(exc, SSLError):
|
||||
reason = "Forbidden"
|
||||
mesg = "'%s'" % str(exc)
|
||||
status_int = 403
|
||||
|
||||
msg = "Invalid request from ip={ip}: {error}"
|
||||
self.log.warning(msg.format(ip=addr[0], error=str(exc)))
|
||||
else:
|
||||
if hasattr(req, "uri"):
|
||||
self.log.exception("Error handling request %s", req.uri)
|
||||
else:
|
||||
self.log.exception("Error handling request (no URI read)")
|
||||
status_int = 500
|
||||
reason = "Internal Server Error"
|
||||
mesg = ""
|
||||
|
||||
if req is not None:
|
||||
request_time = datetime.now() - request_start
|
||||
environ = default_environ(req, client, self.cfg)
|
||||
environ['REMOTE_ADDR'] = addr[0]
|
||||
environ['REMOTE_PORT'] = str(addr[1])
|
||||
resp = Response(req, client, self.cfg)
|
||||
resp.status = "%s %s" % (status_int, reason)
|
||||
resp.response_length = len(mesg)
|
||||
self.log.access(resp, req, environ, request_time)
|
||||
|
||||
try:
|
||||
util.write_error(client, status_int, reason, mesg)
|
||||
except Exception:
|
||||
self.log.debug("Failed to send error message.")
|
||||
|
||||
def handle_winch(self, sig, fname):
|
||||
# Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.
|
||||
self.log.debug("worker: SIGWINCH ignored.")
|
||||
147
venv/lib/python3.11/site-packages/gunicorn/workers/base_async.py
Normal file
147
venv/lib/python3.11/site-packages/gunicorn/workers/base_async.py
Normal file
@ -0,0 +1,147 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
from datetime import datetime
|
||||
import errno
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
from gunicorn import http
|
||||
from gunicorn.http import wsgi
|
||||
from gunicorn import util
|
||||
from gunicorn.workers import base
|
||||
|
||||
ALREADY_HANDLED = object()
|
||||
|
||||
|
||||
class AsyncWorker(base.Worker):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.worker_connections = self.cfg.worker_connections
|
||||
|
||||
def timeout_ctx(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def is_already_handled(self, respiter):
|
||||
# some workers will need to overload this function to raise a StopIteration
|
||||
return respiter == ALREADY_HANDLED
|
||||
|
||||
def handle(self, listener, client, addr):
|
||||
req = None
|
||||
try:
|
||||
parser = http.RequestParser(self.cfg, client, addr)
|
||||
try:
|
||||
listener_name = listener.getsockname()
|
||||
if not self.cfg.keepalive:
|
||||
req = next(parser)
|
||||
self.handle_request(listener_name, req, client, addr)
|
||||
else:
|
||||
# keepalive loop
|
||||
proxy_protocol_info = {}
|
||||
while True:
|
||||
req = None
|
||||
with self.timeout_ctx():
|
||||
req = next(parser)
|
||||
if not req:
|
||||
break
|
||||
if req.proxy_protocol_info:
|
||||
proxy_protocol_info = req.proxy_protocol_info
|
||||
else:
|
||||
req.proxy_protocol_info = proxy_protocol_info
|
||||
self.handle_request(listener_name, req, client, addr)
|
||||
except http.errors.NoMoreData as e:
|
||||
self.log.debug("Ignored premature client disconnection. %s", e)
|
||||
except StopIteration as e:
|
||||
self.log.debug("Closing connection. %s", e)
|
||||
except ssl.SSLError:
|
||||
# pass to next try-except level
|
||||
util.reraise(*sys.exc_info())
|
||||
except OSError:
|
||||
# pass to next try-except level
|
||||
util.reraise(*sys.exc_info())
|
||||
except Exception as e:
|
||||
self.handle_error(req, client, addr, e)
|
||||
except ssl.SSLError as e:
|
||||
if e.args[0] == ssl.SSL_ERROR_EOF:
|
||||
self.log.debug("ssl connection closed")
|
||||
client.close()
|
||||
else:
|
||||
self.log.debug("Error processing SSL request.")
|
||||
self.handle_error(req, client, addr, e)
|
||||
except OSError as e:
|
||||
if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN):
|
||||
self.log.exception("Socket error processing request.")
|
||||
else:
|
||||
if e.errno == errno.ECONNRESET:
|
||||
self.log.debug("Ignoring connection reset")
|
||||
elif e.errno == errno.ENOTCONN:
|
||||
self.log.debug("Ignoring socket not connected")
|
||||
else:
|
||||
self.log.debug("Ignoring EPIPE")
|
||||
except BaseException as e:
|
||||
self.handle_error(req, client, addr, e)
|
||||
finally:
|
||||
util.close(client)
|
||||
|
||||
def handle_request(self, listener_name, req, sock, addr):
|
||||
request_start = datetime.now()
|
||||
environ = {}
|
||||
resp = None
|
||||
try:
|
||||
self.cfg.pre_request(self, req)
|
||||
resp, environ = wsgi.create(req, sock, addr,
|
||||
listener_name, self.cfg)
|
||||
environ["wsgi.multithread"] = True
|
||||
self.nr += 1
|
||||
if self.nr >= self.max_requests:
|
||||
if self.alive:
|
||||
self.log.info("Autorestarting worker after current request.")
|
||||
self.alive = False
|
||||
|
||||
if not self.alive or not self.cfg.keepalive:
|
||||
resp.force_close()
|
||||
|
||||
respiter = self.wsgi(environ, resp.start_response)
|
||||
if self.is_already_handled(respiter):
|
||||
return False
|
||||
try:
|
||||
if isinstance(respiter, environ['wsgi.file_wrapper']):
|
||||
resp.write_file(respiter)
|
||||
else:
|
||||
for item in respiter:
|
||||
resp.write(item)
|
||||
resp.close()
|
||||
finally:
|
||||
request_time = datetime.now() - request_start
|
||||
self.log.access(resp, req, environ, request_time)
|
||||
if hasattr(respiter, "close"):
|
||||
respiter.close()
|
||||
if resp.should_close():
|
||||
raise StopIteration()
|
||||
except StopIteration:
|
||||
raise
|
||||
except OSError:
|
||||
# If the original exception was a socket.error we delegate
|
||||
# handling it to the caller (where handle() might ignore it)
|
||||
util.reraise(*sys.exc_info())
|
||||
except Exception:
|
||||
if resp and resp.headers_sent:
|
||||
# If the requests have already been sent, we should close the
|
||||
# connection to indicate the error.
|
||||
self.log.exception("Error handling request")
|
||||
try:
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
sock.close()
|
||||
except OSError:
|
||||
pass
|
||||
raise StopIteration()
|
||||
raise
|
||||
finally:
|
||||
try:
|
||||
self.cfg.post_request(self, req, environ, resp)
|
||||
except Exception:
|
||||
self.log.exception("Exception in post_request hook")
|
||||
return True
|
||||
186
venv/lib/python3.11/site-packages/gunicorn/workers/geventlet.py
Normal file
186
venv/lib/python3.11/site-packages/gunicorn/workers/geventlet.py
Normal file
@ -0,0 +1,186 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
from functools import partial
|
||||
import sys
|
||||
|
||||
try:
|
||||
import eventlet
|
||||
except ImportError:
|
||||
raise RuntimeError("eventlet worker requires eventlet 0.24.1 or higher")
|
||||
else:
|
||||
from packaging.version import parse as parse_version
|
||||
if parse_version(eventlet.__version__) < parse_version('0.24.1'):
|
||||
raise RuntimeError("eventlet worker requires eventlet 0.24.1 or higher")
|
||||
|
||||
from eventlet import hubs, greenthread
|
||||
from eventlet.greenio import GreenSocket
|
||||
import eventlet.wsgi
|
||||
import greenlet
|
||||
|
||||
from gunicorn.workers.base_async import AsyncWorker
|
||||
from gunicorn.sock import ssl_wrap_socket
|
||||
|
||||
# ALREADY_HANDLED is removed in 0.30.3+ now it's `WSGI_LOCAL.already_handled: bool`
|
||||
# https://github.com/eventlet/eventlet/pull/544
|
||||
EVENTLET_WSGI_LOCAL = getattr(eventlet.wsgi, "WSGI_LOCAL", None)
|
||||
EVENTLET_ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None)
|
||||
|
||||
|
||||
def _eventlet_socket_sendfile(self, file, offset=0, count=None):
|
||||
# Based on the implementation in gevent which in turn is slightly
|
||||
# modified from the standard library implementation.
|
||||
if self.gettimeout() == 0:
|
||||
raise ValueError("non-blocking sockets are not supported")
|
||||
if offset:
|
||||
file.seek(offset)
|
||||
blocksize = min(count, 8192) if count else 8192
|
||||
total_sent = 0
|
||||
# localize variable access to minimize overhead
|
||||
file_read = file.read
|
||||
sock_send = self.send
|
||||
try:
|
||||
while True:
|
||||
if count:
|
||||
blocksize = min(count - total_sent, blocksize)
|
||||
if blocksize <= 0:
|
||||
break
|
||||
data = memoryview(file_read(blocksize))
|
||||
if not data:
|
||||
break # EOF
|
||||
while True:
|
||||
try:
|
||||
sent = sock_send(data)
|
||||
except BlockingIOError:
|
||||
continue
|
||||
else:
|
||||
total_sent += sent
|
||||
if sent < len(data):
|
||||
data = data[sent:]
|
||||
else:
|
||||
break
|
||||
return total_sent
|
||||
finally:
|
||||
if total_sent > 0 and hasattr(file, 'seek'):
|
||||
file.seek(offset + total_sent)
|
||||
|
||||
|
||||
def _eventlet_serve(sock, handle, concurrency):
|
||||
"""
|
||||
Serve requests forever.
|
||||
|
||||
This code is nearly identical to ``eventlet.convenience.serve`` except
|
||||
that it attempts to join the pool at the end, which allows for gunicorn
|
||||
graceful shutdowns.
|
||||
"""
|
||||
pool = eventlet.greenpool.GreenPool(concurrency)
|
||||
server_gt = eventlet.greenthread.getcurrent()
|
||||
|
||||
while True:
|
||||
try:
|
||||
conn, addr = sock.accept()
|
||||
gt = pool.spawn(handle, conn, addr)
|
||||
gt.link(_eventlet_stop, server_gt, conn)
|
||||
conn, addr, gt = None, None, None
|
||||
except eventlet.StopServe:
|
||||
sock.close()
|
||||
pool.waitall()
|
||||
return
|
||||
|
||||
|
||||
def _eventlet_stop(client, server, conn):
|
||||
"""
|
||||
Stop a greenlet handling a request and close its connection.
|
||||
|
||||
This code is lifted from eventlet so as not to depend on undocumented
|
||||
functions in the library.
|
||||
"""
|
||||
try:
|
||||
try:
|
||||
client.wait()
|
||||
finally:
|
||||
conn.close()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception:
|
||||
greenthread.kill(server, *sys.exc_info())
|
||||
|
||||
|
||||
def patch_sendfile():
|
||||
# As of eventlet 0.25.1, GreenSocket.sendfile doesn't exist,
|
||||
# meaning the native implementations of socket.sendfile will be used.
|
||||
# If os.sendfile exists, it will attempt to use that, failing explicitly
|
||||
# if the socket is in non-blocking mode, which the underlying
|
||||
# socket object /is/. Even the regular _sendfile_use_send will
|
||||
# fail in that way; plus, it would use the underlying socket.send which isn't
|
||||
# properly cooperative. So we have to monkey-patch a working socket.sendfile()
|
||||
# into GreenSocket; in this method, `self.send` will be the GreenSocket's
|
||||
# send method which is properly cooperative.
|
||||
if not hasattr(GreenSocket, 'sendfile'):
|
||||
GreenSocket.sendfile = _eventlet_socket_sendfile
|
||||
|
||||
|
||||
class EventletWorker(AsyncWorker):
|
||||
|
||||
def patch(self):
|
||||
hubs.use_hub()
|
||||
eventlet.monkey_patch()
|
||||
patch_sendfile()
|
||||
|
||||
def is_already_handled(self, respiter):
|
||||
# eventlet >= 0.30.3
|
||||
if getattr(EVENTLET_WSGI_LOCAL, "already_handled", None):
|
||||
raise StopIteration()
|
||||
# eventlet < 0.30.3
|
||||
if respiter == EVENTLET_ALREADY_HANDLED:
|
||||
raise StopIteration()
|
||||
return super().is_already_handled(respiter)
|
||||
|
||||
def init_process(self):
|
||||
self.patch()
|
||||
super().init_process()
|
||||
|
||||
def handle_quit(self, sig, frame):
|
||||
eventlet.spawn(super().handle_quit, sig, frame)
|
||||
|
||||
def handle_usr1(self, sig, frame):
|
||||
eventlet.spawn(super().handle_usr1, sig, frame)
|
||||
|
||||
def timeout_ctx(self):
|
||||
return eventlet.Timeout(self.cfg.keepalive or None, False)
|
||||
|
||||
def handle(self, listener, client, addr):
|
||||
if self.cfg.is_ssl:
|
||||
client = ssl_wrap_socket(client, self.cfg)
|
||||
super().handle(listener, client, addr)
|
||||
|
||||
def run(self):
|
||||
acceptors = []
|
||||
for sock in self.sockets:
|
||||
gsock = GreenSocket(sock)
|
||||
gsock.setblocking(1)
|
||||
hfun = partial(self.handle, gsock)
|
||||
acceptor = eventlet.spawn(_eventlet_serve, gsock, hfun,
|
||||
self.worker_connections)
|
||||
|
||||
acceptors.append(acceptor)
|
||||
eventlet.sleep(0.0)
|
||||
|
||||
while self.alive:
|
||||
self.notify()
|
||||
eventlet.sleep(1.0)
|
||||
|
||||
self.notify()
|
||||
t = None
|
||||
try:
|
||||
with eventlet.Timeout(self.cfg.graceful_timeout) as t:
|
||||
for a in acceptors:
|
||||
a.kill(eventlet.StopServe())
|
||||
for a in acceptors:
|
||||
a.wait()
|
||||
except eventlet.Timeout as te:
|
||||
if te != t:
|
||||
raise
|
||||
for a in acceptors:
|
||||
a.kill()
|
||||
193
venv/lib/python3.11/site-packages/gunicorn/workers/ggevent.py
Normal file
193
venv/lib/python3.11/site-packages/gunicorn/workers/ggevent.py
Normal file
@ -0,0 +1,193 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
import time
|
||||
|
||||
try:
|
||||
import gevent
|
||||
except ImportError:
|
||||
raise RuntimeError("gevent worker requires gevent 1.4 or higher")
|
||||
else:
|
||||
from packaging.version import parse as parse_version
|
||||
if parse_version(gevent.__version__) < parse_version('1.4'):
|
||||
raise RuntimeError("gevent worker requires gevent 1.4 or higher")
|
||||
|
||||
from gevent.pool import Pool
|
||||
from gevent.server import StreamServer
|
||||
from gevent import hub, monkey, socket, pywsgi
|
||||
|
||||
import gunicorn
|
||||
from gunicorn.http.wsgi import base_environ
|
||||
from gunicorn.sock import ssl_context
|
||||
from gunicorn.workers.base_async import AsyncWorker
|
||||
|
||||
VERSION = "gevent/%s gunicorn/%s" % (gevent.__version__, gunicorn.__version__)
|
||||
|
||||
|
||||
class GeventWorker(AsyncWorker):
|
||||
|
||||
server_class = None
|
||||
wsgi_handler = None
|
||||
|
||||
def patch(self):
|
||||
monkey.patch_all()
|
||||
|
||||
# patch sockets
|
||||
sockets = []
|
||||
for s in self.sockets:
|
||||
sockets.append(socket.socket(s.FAMILY, socket.SOCK_STREAM,
|
||||
fileno=s.sock.fileno()))
|
||||
self.sockets = sockets
|
||||
|
||||
def notify(self):
|
||||
super().notify()
|
||||
if self.ppid != os.getppid():
|
||||
self.log.info("Parent changed, shutting down: %s", self)
|
||||
sys.exit(0)
|
||||
|
||||
def timeout_ctx(self):
|
||||
return gevent.Timeout(self.cfg.keepalive, False)
|
||||
|
||||
def run(self):
|
||||
servers = []
|
||||
ssl_args = {}
|
||||
|
||||
if self.cfg.is_ssl:
|
||||
ssl_args = {"ssl_context": ssl_context(self.cfg)}
|
||||
|
||||
for s in self.sockets:
|
||||
s.setblocking(1)
|
||||
pool = Pool(self.worker_connections)
|
||||
if self.server_class is not None:
|
||||
environ = base_environ(self.cfg)
|
||||
environ.update({
|
||||
"wsgi.multithread": True,
|
||||
"SERVER_SOFTWARE": VERSION,
|
||||
})
|
||||
server = self.server_class(
|
||||
s, application=self.wsgi, spawn=pool, log=self.log,
|
||||
handler_class=self.wsgi_handler, environ=environ,
|
||||
**ssl_args)
|
||||
else:
|
||||
hfun = partial(self.handle, s)
|
||||
server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args)
|
||||
if self.cfg.workers > 1:
|
||||
server.max_accept = 1
|
||||
|
||||
server.start()
|
||||
servers.append(server)
|
||||
|
||||
while self.alive:
|
||||
self.notify()
|
||||
gevent.sleep(1.0)
|
||||
|
||||
try:
|
||||
# Stop accepting requests
|
||||
for server in servers:
|
||||
if hasattr(server, 'close'): # gevent 1.0
|
||||
server.close()
|
||||
if hasattr(server, 'kill'): # gevent < 1.0
|
||||
server.kill()
|
||||
|
||||
# Handle current requests until graceful_timeout
|
||||
ts = time.time()
|
||||
while time.time() - ts <= self.cfg.graceful_timeout:
|
||||
accepting = 0
|
||||
for server in servers:
|
||||
if server.pool.free_count() != server.pool.size:
|
||||
accepting += 1
|
||||
|
||||
# if no server is accepting a connection, we can exit
|
||||
if not accepting:
|
||||
return
|
||||
|
||||
self.notify()
|
||||
gevent.sleep(1.0)
|
||||
|
||||
# Force kill all active the handlers
|
||||
self.log.warning("Worker graceful timeout (pid:%s)", self.pid)
|
||||
for server in servers:
|
||||
server.stop(timeout=1)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def handle(self, listener, client, addr):
|
||||
# Connected socket timeout defaults to socket.getdefaulttimeout().
|
||||
# This forces to blocking mode.
|
||||
client.setblocking(1)
|
||||
super().handle(listener, client, addr)
|
||||
|
||||
def handle_request(self, listener_name, req, sock, addr):
|
||||
try:
|
||||
super().handle_request(listener_name, req, sock, addr)
|
||||
except gevent.GreenletExit:
|
||||
pass
|
||||
except SystemExit:
|
||||
pass
|
||||
|
||||
def handle_quit(self, sig, frame):
|
||||
# Move this out of the signal handler so we can use
|
||||
# blocking calls. See #1126
|
||||
gevent.spawn(super().handle_quit, sig, frame)
|
||||
|
||||
def handle_usr1(self, sig, frame):
|
||||
# Make the gevent workers handle the usr1 signal
|
||||
# by deferring to a new greenlet. See #1645
|
||||
gevent.spawn(super().handle_usr1, sig, frame)
|
||||
|
||||
def init_process(self):
|
||||
self.patch()
|
||||
hub.reinit()
|
||||
super().init_process()
|
||||
|
||||
|
||||
class GeventResponse:
|
||||
|
||||
status = None
|
||||
headers = None
|
||||
sent = None
|
||||
|
||||
def __init__(self, status, headers, clength):
|
||||
self.status = status
|
||||
self.headers = headers
|
||||
self.sent = clength
|
||||
|
||||
|
||||
class PyWSGIHandler(pywsgi.WSGIHandler):
|
||||
|
||||
def log_request(self):
|
||||
start = datetime.fromtimestamp(self.time_start)
|
||||
finish = datetime.fromtimestamp(self.time_finish)
|
||||
response_time = finish - start
|
||||
resp_headers = getattr(self, 'response_headers', {})
|
||||
|
||||
# Status is expected to be a string but is encoded to bytes in gevent for PY3
|
||||
# Except when it isn't because gevent uses hardcoded strings for network errors.
|
||||
status = self.status.decode() if isinstance(self.status, bytes) else self.status
|
||||
resp = GeventResponse(status, resp_headers, self.response_length)
|
||||
if hasattr(self, 'headers'):
|
||||
req_headers = self.headers.items()
|
||||
else:
|
||||
req_headers = []
|
||||
self.server.log.access(resp, req_headers, self.environ, response_time)
|
||||
|
||||
def get_environ(self):
|
||||
env = super().get_environ()
|
||||
env['gunicorn.sock'] = self.socket
|
||||
env['RAW_URI'] = self.path
|
||||
return env
|
||||
|
||||
|
||||
class PyWSGIServer(pywsgi.WSGIServer):
|
||||
pass
|
||||
|
||||
|
||||
class GeventPyWSGIWorker(GeventWorker):
|
||||
"The Gevent StreamServer based workers."
|
||||
server_class = PyWSGIServer
|
||||
wsgi_handler = PyWSGIHandler
|
||||
372
venv/lib/python3.11/site-packages/gunicorn/workers/gthread.py
Normal file
372
venv/lib/python3.11/site-packages/gunicorn/workers/gthread.py
Normal file
@ -0,0 +1,372 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
# design:
|
||||
# A threaded worker accepts connections in the main loop, accepted
|
||||
# connections are added to the thread pool as a connection job.
|
||||
# Keepalive connections are put back in the loop waiting for an event.
|
||||
# If no event happen after the keep alive timeout, the connection is
|
||||
# closed.
|
||||
# pylint: disable=no-else-break
|
||||
|
||||
from concurrent import futures
|
||||
import errno
|
||||
import os
|
||||
import selectors
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
import time
|
||||
from collections import deque
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
from threading import RLock
|
||||
|
||||
from . import base
|
||||
from .. import http
|
||||
from .. import util
|
||||
from .. import sock
|
||||
from ..http import wsgi
|
||||
|
||||
|
||||
class TConn:
|
||||
|
||||
def __init__(self, cfg, sock, client, server):
|
||||
self.cfg = cfg
|
||||
self.sock = sock
|
||||
self.client = client
|
||||
self.server = server
|
||||
|
||||
self.timeout = None
|
||||
self.parser = None
|
||||
self.initialized = False
|
||||
|
||||
# set the socket to non blocking
|
||||
self.sock.setblocking(False)
|
||||
|
||||
def init(self):
|
||||
self.initialized = True
|
||||
self.sock.setblocking(True)
|
||||
|
||||
if self.parser is None:
|
||||
# wrap the socket if needed
|
||||
if self.cfg.is_ssl:
|
||||
self.sock = sock.ssl_wrap_socket(self.sock, self.cfg)
|
||||
|
||||
# initialize the parser
|
||||
self.parser = http.RequestParser(self.cfg, self.sock, self.client)
|
||||
|
||||
def set_timeout(self):
|
||||
# set the timeout
|
||||
self.timeout = time.time() + self.cfg.keepalive
|
||||
|
||||
def close(self):
|
||||
util.close(self.sock)
|
||||
|
||||
|
||||
class ThreadWorker(base.Worker):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.worker_connections = self.cfg.worker_connections
|
||||
self.max_keepalived = self.cfg.worker_connections - self.cfg.threads
|
||||
# initialise the pool
|
||||
self.tpool = None
|
||||
self.poller = None
|
||||
self._lock = None
|
||||
self.futures = deque()
|
||||
self._keep = deque()
|
||||
self.nr_conns = 0
|
||||
|
||||
@classmethod
|
||||
def check_config(cls, cfg, log):
|
||||
max_keepalived = cfg.worker_connections - cfg.threads
|
||||
|
||||
if max_keepalived <= 0 and cfg.keepalive:
|
||||
log.warning("No keepalived connections can be handled. " +
|
||||
"Check the number of worker connections and threads.")
|
||||
|
||||
def init_process(self):
|
||||
self.tpool = self.get_thread_pool()
|
||||
self.poller = selectors.DefaultSelector()
|
||||
self._lock = RLock()
|
||||
super().init_process()
|
||||
|
||||
def get_thread_pool(self):
|
||||
"""Override this method to customize how the thread pool is created"""
|
||||
return futures.ThreadPoolExecutor(max_workers=self.cfg.threads)
|
||||
|
||||
def handle_quit(self, sig, frame):
|
||||
self.alive = False
|
||||
# worker_int callback
|
||||
self.cfg.worker_int(self)
|
||||
self.tpool.shutdown(False)
|
||||
time.sleep(0.1)
|
||||
sys.exit(0)
|
||||
|
||||
def _wrap_future(self, fs, conn):
|
||||
fs.conn = conn
|
||||
self.futures.append(fs)
|
||||
fs.add_done_callback(self.finish_request)
|
||||
|
||||
def enqueue_req(self, conn):
|
||||
conn.init()
|
||||
# submit the connection to a worker
|
||||
fs = self.tpool.submit(self.handle, conn)
|
||||
self._wrap_future(fs, conn)
|
||||
|
||||
def accept(self, server, listener):
|
||||
try:
|
||||
sock, client = listener.accept()
|
||||
# initialize the connection object
|
||||
conn = TConn(self.cfg, sock, client, server)
|
||||
|
||||
self.nr_conns += 1
|
||||
# wait until socket is readable
|
||||
with self._lock:
|
||||
self.poller.register(conn.sock, selectors.EVENT_READ,
|
||||
partial(self.on_client_socket_readable, conn))
|
||||
except OSError as e:
|
||||
if e.errno not in (errno.EAGAIN, errno.ECONNABORTED,
|
||||
errno.EWOULDBLOCK):
|
||||
raise
|
||||
|
||||
def on_client_socket_readable(self, conn, client):
|
||||
with self._lock:
|
||||
# unregister the client from the poller
|
||||
self.poller.unregister(client)
|
||||
|
||||
if conn.initialized:
|
||||
# remove the connection from keepalive
|
||||
try:
|
||||
self._keep.remove(conn)
|
||||
except ValueError:
|
||||
# race condition
|
||||
return
|
||||
|
||||
# submit the connection to a worker
|
||||
self.enqueue_req(conn)
|
||||
|
||||
def murder_keepalived(self):
|
||||
now = time.time()
|
||||
while True:
|
||||
with self._lock:
|
||||
try:
|
||||
# remove the connection from the queue
|
||||
conn = self._keep.popleft()
|
||||
except IndexError:
|
||||
break
|
||||
|
||||
delta = conn.timeout - now
|
||||
if delta > 0:
|
||||
# add the connection back to the queue
|
||||
with self._lock:
|
||||
self._keep.appendleft(conn)
|
||||
break
|
||||
else:
|
||||
self.nr_conns -= 1
|
||||
# remove the socket from the poller
|
||||
with self._lock:
|
||||
try:
|
||||
self.poller.unregister(conn.sock)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EBADF:
|
||||
raise
|
||||
except KeyError:
|
||||
# already removed by the system, continue
|
||||
pass
|
||||
except ValueError:
|
||||
# already removed by the system continue
|
||||
pass
|
||||
|
||||
# close the socket
|
||||
conn.close()
|
||||
|
||||
def is_parent_alive(self):
|
||||
# If our parent changed then we shut down.
|
||||
if self.ppid != os.getppid():
|
||||
self.log.info("Parent changed, shutting down: %s", self)
|
||||
return False
|
||||
return True
|
||||
|
||||
def run(self):
|
||||
# init listeners, add them to the event loop
|
||||
for sock in self.sockets:
|
||||
sock.setblocking(False)
|
||||
# a race condition during graceful shutdown may make the listener
|
||||
# name unavailable in the request handler so capture it once here
|
||||
server = sock.getsockname()
|
||||
acceptor = partial(self.accept, server)
|
||||
self.poller.register(sock, selectors.EVENT_READ, acceptor)
|
||||
|
||||
while self.alive:
|
||||
# notify the arbiter we are alive
|
||||
self.notify()
|
||||
|
||||
# can we accept more connections?
|
||||
if self.nr_conns < self.worker_connections:
|
||||
# wait for an event
|
||||
events = self.poller.select(1.0)
|
||||
for key, _ in events:
|
||||
callback = key.data
|
||||
callback(key.fileobj)
|
||||
|
||||
# check (but do not wait) for finished requests
|
||||
result = futures.wait(self.futures, timeout=0,
|
||||
return_when=futures.FIRST_COMPLETED)
|
||||
else:
|
||||
# wait for a request to finish
|
||||
result = futures.wait(self.futures, timeout=1.0,
|
||||
return_when=futures.FIRST_COMPLETED)
|
||||
|
||||
# clean up finished requests
|
||||
for fut in result.done:
|
||||
self.futures.remove(fut)
|
||||
|
||||
if not self.is_parent_alive():
|
||||
break
|
||||
|
||||
# handle keepalive timeouts
|
||||
self.murder_keepalived()
|
||||
|
||||
self.tpool.shutdown(False)
|
||||
self.poller.close()
|
||||
|
||||
for s in self.sockets:
|
||||
s.close()
|
||||
|
||||
futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
|
||||
|
||||
def finish_request(self, fs):
|
||||
if fs.cancelled():
|
||||
self.nr_conns -= 1
|
||||
fs.conn.close()
|
||||
return
|
||||
|
||||
try:
|
||||
(keepalive, conn) = fs.result()
|
||||
# if the connection should be kept alived add it
|
||||
# to the eventloop and record it
|
||||
if keepalive and self.alive:
|
||||
# flag the socket as non blocked
|
||||
conn.sock.setblocking(False)
|
||||
|
||||
# register the connection
|
||||
conn.set_timeout()
|
||||
with self._lock:
|
||||
self._keep.append(conn)
|
||||
|
||||
# add the socket to the event loop
|
||||
self.poller.register(conn.sock, selectors.EVENT_READ,
|
||||
partial(self.on_client_socket_readable, conn))
|
||||
else:
|
||||
self.nr_conns -= 1
|
||||
conn.close()
|
||||
except Exception:
|
||||
# an exception happened, make sure to close the
|
||||
# socket.
|
||||
self.nr_conns -= 1
|
||||
fs.conn.close()
|
||||
|
||||
def handle(self, conn):
|
||||
keepalive = False
|
||||
req = None
|
||||
try:
|
||||
req = next(conn.parser)
|
||||
if not req:
|
||||
return (False, conn)
|
||||
|
||||
# handle the request
|
||||
keepalive = self.handle_request(req, conn)
|
||||
if keepalive:
|
||||
return (keepalive, conn)
|
||||
except http.errors.NoMoreData as e:
|
||||
self.log.debug("Ignored premature client disconnection. %s", e)
|
||||
|
||||
except StopIteration as e:
|
||||
self.log.debug("Closing connection. %s", e)
|
||||
except ssl.SSLError as e:
|
||||
if e.args[0] == ssl.SSL_ERROR_EOF:
|
||||
self.log.debug("ssl connection closed")
|
||||
conn.sock.close()
|
||||
else:
|
||||
self.log.debug("Error processing SSL request.")
|
||||
self.handle_error(req, conn.sock, conn.client, e)
|
||||
|
||||
except OSError as e:
|
||||
if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN):
|
||||
self.log.exception("Socket error processing request.")
|
||||
else:
|
||||
if e.errno == errno.ECONNRESET:
|
||||
self.log.debug("Ignoring connection reset")
|
||||
elif e.errno == errno.ENOTCONN:
|
||||
self.log.debug("Ignoring socket not connected")
|
||||
else:
|
||||
self.log.debug("Ignoring connection epipe")
|
||||
except Exception as e:
|
||||
self.handle_error(req, conn.sock, conn.client, e)
|
||||
|
||||
return (False, conn)
|
||||
|
||||
def handle_request(self, req, conn):
|
||||
environ = {}
|
||||
resp = None
|
||||
try:
|
||||
self.cfg.pre_request(self, req)
|
||||
request_start = datetime.now()
|
||||
resp, environ = wsgi.create(req, conn.sock, conn.client,
|
||||
conn.server, self.cfg)
|
||||
environ["wsgi.multithread"] = True
|
||||
self.nr += 1
|
||||
if self.nr >= self.max_requests:
|
||||
if self.alive:
|
||||
self.log.info("Autorestarting worker after current request.")
|
||||
self.alive = False
|
||||
resp.force_close()
|
||||
|
||||
if not self.alive or not self.cfg.keepalive:
|
||||
resp.force_close()
|
||||
elif len(self._keep) >= self.max_keepalived:
|
||||
resp.force_close()
|
||||
|
||||
respiter = self.wsgi(environ, resp.start_response)
|
||||
try:
|
||||
if isinstance(respiter, environ['wsgi.file_wrapper']):
|
||||
resp.write_file(respiter)
|
||||
else:
|
||||
for item in respiter:
|
||||
resp.write(item)
|
||||
|
||||
resp.close()
|
||||
finally:
|
||||
request_time = datetime.now() - request_start
|
||||
self.log.access(resp, req, environ, request_time)
|
||||
if hasattr(respiter, "close"):
|
||||
respiter.close()
|
||||
|
||||
if resp.should_close():
|
||||
self.log.debug("Closing connection.")
|
||||
return False
|
||||
except OSError:
|
||||
# pass to next try-except level
|
||||
util.reraise(*sys.exc_info())
|
||||
except Exception:
|
||||
if resp and resp.headers_sent:
|
||||
# If the requests have already been sent, we should close the
|
||||
# connection to indicate the error.
|
||||
self.log.exception("Error handling request")
|
||||
try:
|
||||
conn.sock.shutdown(socket.SHUT_RDWR)
|
||||
conn.sock.close()
|
||||
except OSError:
|
||||
pass
|
||||
raise StopIteration()
|
||||
raise
|
||||
finally:
|
||||
try:
|
||||
self.cfg.post_request(self, req, environ, resp)
|
||||
except Exception:
|
||||
self.log.exception("Exception in post_request hook")
|
||||
|
||||
return True
|
||||
166
venv/lib/python3.11/site-packages/gunicorn/workers/gtornado.py
Normal file
166
venv/lib/python3.11/site-packages/gunicorn/workers/gtornado.py
Normal file
@ -0,0 +1,166 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
try:
|
||||
import tornado
|
||||
except ImportError:
|
||||
raise RuntimeError("You need tornado installed to use this worker.")
|
||||
import tornado.web
|
||||
import tornado.httpserver
|
||||
from tornado.ioloop import IOLoop, PeriodicCallback
|
||||
from tornado.wsgi import WSGIContainer
|
||||
from gunicorn.workers.base import Worker
|
||||
from gunicorn import __version__ as gversion
|
||||
from gunicorn.sock import ssl_context
|
||||
|
||||
|
||||
# Tornado 5.0 updated its IOLoop, and the `io_loop` arguments to many
|
||||
# Tornado functions have been removed in Tornado 5.0. Also, they no
|
||||
# longer store PeriodCallbacks in ioloop._callbacks. Instead we store
|
||||
# them on our side, and use stop() on them when stopping the worker.
|
||||
# See https://www.tornadoweb.org/en/stable/releases/v5.0.0.html#backwards-compatibility-notes
|
||||
# for more details.
|
||||
TORNADO5 = tornado.version_info >= (5, 0, 0)
|
||||
|
||||
|
||||
class TornadoWorker(Worker):
|
||||
|
||||
@classmethod
|
||||
def setup(cls):
|
||||
web = sys.modules.pop("tornado.web")
|
||||
old_clear = web.RequestHandler.clear
|
||||
|
||||
def clear(self):
|
||||
old_clear(self)
|
||||
if "Gunicorn" not in self._headers["Server"]:
|
||||
self._headers["Server"] += " (Gunicorn/%s)" % gversion
|
||||
web.RequestHandler.clear = clear
|
||||
sys.modules["tornado.web"] = web
|
||||
|
||||
def handle_exit(self, sig, frame):
|
||||
if self.alive:
|
||||
super().handle_exit(sig, frame)
|
||||
|
||||
def handle_request(self):
|
||||
self.nr += 1
|
||||
if self.alive and self.nr >= self.max_requests:
|
||||
self.log.info("Autorestarting worker after current request.")
|
||||
self.alive = False
|
||||
|
||||
def watchdog(self):
|
||||
if self.alive:
|
||||
self.notify()
|
||||
|
||||
if self.ppid != os.getppid():
|
||||
self.log.info("Parent changed, shutting down: %s", self)
|
||||
self.alive = False
|
||||
|
||||
def heartbeat(self):
|
||||
if not self.alive:
|
||||
if self.server_alive:
|
||||
if hasattr(self, 'server'):
|
||||
try:
|
||||
self.server.stop()
|
||||
except Exception:
|
||||
pass
|
||||
self.server_alive = False
|
||||
else:
|
||||
if TORNADO5:
|
||||
for callback in self.callbacks:
|
||||
callback.stop()
|
||||
self.ioloop.stop()
|
||||
else:
|
||||
if not self.ioloop._callbacks:
|
||||
self.ioloop.stop()
|
||||
|
||||
def init_process(self):
|
||||
# IOLoop cannot survive a fork or be shared across processes
|
||||
# in any way. When multiple processes are being used, each process
|
||||
# should create its own IOLoop. We should clear current IOLoop
|
||||
# if exists before os.fork.
|
||||
IOLoop.clear_current()
|
||||
super().init_process()
|
||||
|
||||
def run(self):
|
||||
self.ioloop = IOLoop.instance()
|
||||
self.alive = True
|
||||
self.server_alive = False
|
||||
|
||||
if TORNADO5:
|
||||
self.callbacks = []
|
||||
self.callbacks.append(PeriodicCallback(self.watchdog, 1000))
|
||||
self.callbacks.append(PeriodicCallback(self.heartbeat, 1000))
|
||||
for callback in self.callbacks:
|
||||
callback.start()
|
||||
else:
|
||||
PeriodicCallback(self.watchdog, 1000, io_loop=self.ioloop).start()
|
||||
PeriodicCallback(self.heartbeat, 1000, io_loop=self.ioloop).start()
|
||||
|
||||
# Assume the app is a WSGI callable if its not an
|
||||
# instance of tornado.web.Application or is an
|
||||
# instance of tornado.wsgi.WSGIApplication
|
||||
app = self.wsgi
|
||||
|
||||
if tornado.version_info[0] < 6:
|
||||
if not isinstance(app, tornado.web.Application) or \
|
||||
isinstance(app, tornado.wsgi.WSGIApplication):
|
||||
app = WSGIContainer(app)
|
||||
elif not isinstance(app, WSGIContainer) and \
|
||||
not isinstance(app, tornado.web.Application):
|
||||
app = WSGIContainer(app)
|
||||
|
||||
# Monkey-patching HTTPConnection.finish to count the
|
||||
# number of requests being handled by Tornado. This
|
||||
# will help gunicorn shutdown the worker if max_requests
|
||||
# is exceeded.
|
||||
httpserver = sys.modules["tornado.httpserver"]
|
||||
if hasattr(httpserver, 'HTTPConnection'):
|
||||
old_connection_finish = httpserver.HTTPConnection.finish
|
||||
|
||||
def finish(other):
|
||||
self.handle_request()
|
||||
old_connection_finish(other)
|
||||
httpserver.HTTPConnection.finish = finish
|
||||
sys.modules["tornado.httpserver"] = httpserver
|
||||
|
||||
server_class = tornado.httpserver.HTTPServer
|
||||
else:
|
||||
|
||||
class _HTTPServer(tornado.httpserver.HTTPServer):
|
||||
|
||||
def on_close(instance, server_conn):
|
||||
self.handle_request()
|
||||
super().on_close(server_conn)
|
||||
|
||||
server_class = _HTTPServer
|
||||
|
||||
if self.cfg.is_ssl:
|
||||
if TORNADO5:
|
||||
server = server_class(app, ssl_options=ssl_context(self.cfg))
|
||||
else:
|
||||
server = server_class(app, io_loop=self.ioloop,
|
||||
ssl_options=ssl_context(self.cfg))
|
||||
else:
|
||||
if TORNADO5:
|
||||
server = server_class(app)
|
||||
else:
|
||||
server = server_class(app, io_loop=self.ioloop)
|
||||
|
||||
self.server = server
|
||||
self.server_alive = True
|
||||
|
||||
for s in self.sockets:
|
||||
s.setblocking(0)
|
||||
if hasattr(server, "add_socket"): # tornado > 2.0
|
||||
server.add_socket(s)
|
||||
elif hasattr(server, "_sockets"): # tornado 2.0
|
||||
server._sockets[s.fileno()] = s
|
||||
|
||||
server.no_keep_alive = self.cfg.keepalive <= 0
|
||||
server.start(num_processes=1)
|
||||
|
||||
self.ioloop.start()
|
||||
209
venv/lib/python3.11/site-packages/gunicorn/workers/sync.py
Normal file
209
venv/lib/python3.11/site-packages/gunicorn/workers/sync.py
Normal file
@ -0,0 +1,209 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
#
|
||||
|
||||
from datetime import datetime
|
||||
import errno
|
||||
import os
|
||||
import select
|
||||
import socket
|
||||
import ssl
|
||||
import sys
|
||||
|
||||
from gunicorn import http
|
||||
from gunicorn.http import wsgi
|
||||
from gunicorn import sock
|
||||
from gunicorn import util
|
||||
from gunicorn.workers import base
|
||||
|
||||
|
||||
class StopWaiting(Exception):
|
||||
""" exception raised to stop waiting for a connection """
|
||||
|
||||
|
||||
class SyncWorker(base.Worker):
|
||||
|
||||
def accept(self, listener):
|
||||
client, addr = listener.accept()
|
||||
client.setblocking(1)
|
||||
util.close_on_exec(client)
|
||||
self.handle(listener, client, addr)
|
||||
|
||||
def wait(self, timeout):
|
||||
try:
|
||||
self.notify()
|
||||
ret = select.select(self.wait_fds, [], [], timeout)
|
||||
if ret[0]:
|
||||
if self.PIPE[0] in ret[0]:
|
||||
os.read(self.PIPE[0], 1)
|
||||
return ret[0]
|
||||
|
||||
except OSError as e:
|
||||
if e.args[0] == errno.EINTR:
|
||||
return self.sockets
|
||||
if e.args[0] == errno.EBADF:
|
||||
if self.nr < 0:
|
||||
return self.sockets
|
||||
else:
|
||||
raise StopWaiting
|
||||
raise
|
||||
|
||||
def is_parent_alive(self):
|
||||
# If our parent changed then we shut down.
|
||||
if self.ppid != os.getppid():
|
||||
self.log.info("Parent changed, shutting down: %s", self)
|
||||
return False
|
||||
return True
|
||||
|
||||
def run_for_one(self, timeout):
|
||||
listener = self.sockets[0]
|
||||
while self.alive:
|
||||
self.notify()
|
||||
|
||||
# Accept a connection. If we get an error telling us
|
||||
# that no connection is waiting we fall down to the
|
||||
# select which is where we'll wait for a bit for new
|
||||
# workers to come give us some love.
|
||||
try:
|
||||
self.accept(listener)
|
||||
# Keep processing clients until no one is waiting. This
|
||||
# prevents the need to select() for every client that we
|
||||
# process.
|
||||
continue
|
||||
|
||||
except OSError as e:
|
||||
if e.errno not in (errno.EAGAIN, errno.ECONNABORTED,
|
||||
errno.EWOULDBLOCK):
|
||||
raise
|
||||
|
||||
if not self.is_parent_alive():
|
||||
return
|
||||
|
||||
try:
|
||||
self.wait(timeout)
|
||||
except StopWaiting:
|
||||
return
|
||||
|
||||
def run_for_multiple(self, timeout):
|
||||
while self.alive:
|
||||
self.notify()
|
||||
|
||||
try:
|
||||
ready = self.wait(timeout)
|
||||
except StopWaiting:
|
||||
return
|
||||
|
||||
if ready is not None:
|
||||
for listener in ready:
|
||||
if listener == self.PIPE[0]:
|
||||
continue
|
||||
|
||||
try:
|
||||
self.accept(listener)
|
||||
except OSError as e:
|
||||
if e.errno not in (errno.EAGAIN, errno.ECONNABORTED,
|
||||
errno.EWOULDBLOCK):
|
||||
raise
|
||||
|
||||
if not self.is_parent_alive():
|
||||
return
|
||||
|
||||
def run(self):
|
||||
# if no timeout is given the worker will never wait and will
|
||||
# use the CPU for nothing. This minimal timeout prevent it.
|
||||
timeout = self.timeout or 0.5
|
||||
|
||||
# self.socket appears to lose its blocking status after
|
||||
# we fork in the arbiter. Reset it here.
|
||||
for s in self.sockets:
|
||||
s.setblocking(0)
|
||||
|
||||
if len(self.sockets) > 1:
|
||||
self.run_for_multiple(timeout)
|
||||
else:
|
||||
self.run_for_one(timeout)
|
||||
|
||||
def handle(self, listener, client, addr):
|
||||
req = None
|
||||
try:
|
||||
if self.cfg.is_ssl:
|
||||
client = sock.ssl_wrap_socket(client, self.cfg)
|
||||
parser = http.RequestParser(self.cfg, client, addr)
|
||||
req = next(parser)
|
||||
self.handle_request(listener, req, client, addr)
|
||||
except http.errors.NoMoreData as e:
|
||||
self.log.debug("Ignored premature client disconnection. %s", e)
|
||||
except StopIteration as e:
|
||||
self.log.debug("Closing connection. %s", e)
|
||||
except ssl.SSLError as e:
|
||||
if e.args[0] == ssl.SSL_ERROR_EOF:
|
||||
self.log.debug("ssl connection closed")
|
||||
client.close()
|
||||
else:
|
||||
self.log.debug("Error processing SSL request.")
|
||||
self.handle_error(req, client, addr, e)
|
||||
except OSError as e:
|
||||
if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN):
|
||||
self.log.exception("Socket error processing request.")
|
||||
else:
|
||||
if e.errno == errno.ECONNRESET:
|
||||
self.log.debug("Ignoring connection reset")
|
||||
elif e.errno == errno.ENOTCONN:
|
||||
self.log.debug("Ignoring socket not connected")
|
||||
else:
|
||||
self.log.debug("Ignoring EPIPE")
|
||||
except BaseException as e:
|
||||
self.handle_error(req, client, addr, e)
|
||||
finally:
|
||||
util.close(client)
|
||||
|
||||
def handle_request(self, listener, req, client, addr):
|
||||
environ = {}
|
||||
resp = None
|
||||
try:
|
||||
self.cfg.pre_request(self, req)
|
||||
request_start = datetime.now()
|
||||
resp, environ = wsgi.create(req, client, addr,
|
||||
listener.getsockname(), self.cfg)
|
||||
# Force the connection closed until someone shows
|
||||
# a buffering proxy that supports Keep-Alive to
|
||||
# the backend.
|
||||
resp.force_close()
|
||||
self.nr += 1
|
||||
if self.nr >= self.max_requests:
|
||||
self.log.info("Autorestarting worker after current request.")
|
||||
self.alive = False
|
||||
respiter = self.wsgi(environ, resp.start_response)
|
||||
try:
|
||||
if isinstance(respiter, environ['wsgi.file_wrapper']):
|
||||
resp.write_file(respiter)
|
||||
else:
|
||||
for item in respiter:
|
||||
resp.write(item)
|
||||
resp.close()
|
||||
finally:
|
||||
request_time = datetime.now() - request_start
|
||||
self.log.access(resp, req, environ, request_time)
|
||||
if hasattr(respiter, "close"):
|
||||
respiter.close()
|
||||
except OSError:
|
||||
# pass to next try-except level
|
||||
util.reraise(*sys.exc_info())
|
||||
except Exception:
|
||||
if resp and resp.headers_sent:
|
||||
# If the requests have already been sent, we should close the
|
||||
# connection to indicate the error.
|
||||
self.log.exception("Error handling request")
|
||||
try:
|
||||
client.shutdown(socket.SHUT_RDWR)
|
||||
client.close()
|
||||
except OSError:
|
||||
pass
|
||||
raise StopIteration()
|
||||
raise
|
||||
finally:
|
||||
try:
|
||||
self.cfg.post_request(self, req, environ, resp)
|
||||
except Exception:
|
||||
self.log.exception("Exception in post_request hook")
|
||||
@ -0,0 +1,53 @@
|
||||
#
|
||||
# This file is part of gunicorn released under the MIT license.
|
||||
# See the NOTICE for more information.
|
||||
|
||||
import os
|
||||
import time
|
||||
import platform
|
||||
import tempfile
|
||||
|
||||
from gunicorn import util
|
||||
|
||||
PLATFORM = platform.system()
|
||||
IS_CYGWIN = PLATFORM.startswith('CYGWIN')
|
||||
|
||||
|
||||
class WorkerTmp:
|
||||
|
||||
def __init__(self, cfg):
|
||||
old_umask = os.umask(cfg.umask)
|
||||
fdir = cfg.worker_tmp_dir
|
||||
if fdir and not os.path.isdir(fdir):
|
||||
raise RuntimeError("%s doesn't exist. Can't create workertmp." % fdir)
|
||||
fd, name = tempfile.mkstemp(prefix="wgunicorn-", dir=fdir)
|
||||
os.umask(old_umask)
|
||||
|
||||
# change the owner and group of the file if the worker will run as
|
||||
# a different user or group, so that the worker can modify the file
|
||||
if cfg.uid != os.geteuid() or cfg.gid != os.getegid():
|
||||
util.chown(name, cfg.uid, cfg.gid)
|
||||
|
||||
# unlink the file so we don't leak temporary files
|
||||
try:
|
||||
if not IS_CYGWIN:
|
||||
util.unlink(name)
|
||||
# In Python 3.8, open() emits RuntimeWarning if buffering=1 for binary mode.
|
||||
# Because we never write to this file, pass 0 to switch buffering off.
|
||||
self._tmp = os.fdopen(fd, 'w+b', 0)
|
||||
except Exception:
|
||||
os.close(fd)
|
||||
raise
|
||||
|
||||
def notify(self):
|
||||
new_time = time.monotonic()
|
||||
os.utime(self._tmp.fileno(), (new_time, new_time))
|
||||
|
||||
def last_update(self):
|
||||
return os.fstat(self._tmp.fileno()).st_mtime
|
||||
|
||||
def fileno(self):
|
||||
return self._tmp.fileno()
|
||||
|
||||
def close(self):
|
||||
return self._tmp.close()
|
||||
Reference in New Issue
Block a user