Quantcast
Viewing all 7229 articles
Browse latest View live

Throttle requests with limit_req rate based on header from response to auth subrequest (no replies)

I'm hoping to use the limit_req directive with different rates based on a header that is returned from the auth subrequest. I got some ideas from https://www.ruby-forum.com/topic/4418040 but am running into a few problems. Here is my configuration:

> user nginx;
> worker_processes auto;
> error_log /var/log/nginx/error.log warn;
> pid /var/run/nginx.pid;
> events {
> worker_connections 10000;
> }
>
> worker_rlimit_nofile 10000;
>
> http {
> log_subrequest on;
>
> log_format main escape=json '{ "timestamp": "$time_local", "client": "$remote_addr",'
> ' "method": "$request_method", "uri": "$uri",'
> ' "request_length": $request_length,'
> ' "status": $status, "bytes_sent": $bytes_sent,'
> ' "upstream_status": "$upstream_status",'
> ' "request_id": "$request_id",'
> ' "request_uri": "$request_uri",'
> ' "tier": "$tier",'
> ' "upstream_http_tier": "$upstream_http_tier",'
> ' "2X_key": "$2X_key",'
> ' "3X_key": "$3X_key",'
> ' "2X_key_from_upstream": "$2X_key_from_upstream",'
> ' "3X_key_from_upstream": "$3X_key_from_upstream",'
> ' "origin": "$http_origin"}' ;
>
> access_log /var/log/nginx/access.log main;
> sendfile on;
> tcp_nopush on;
> tcp_nodelay on;
> keepalive_timeout 65;
> types_hash_max_size 2048;
> include /etc/nginx/mime.types;
> default_type application/octet-stream;
> proxy_buffering on;
> proxy_buffers 8 64k;
> proxy_cache_path /dev/shm/nginx/auth use_temp_path=off levels=1:2 keys_zone=auth_cache:1024m inactive=30m max_size=1g;
> proxy_cache_path /dev/shm/nginx/manifests use_temp_path=off levels=1:2 keys_zone=manifest_cache:100m inactive=30s max_size=10g;
> proxy_cache_methods GET HEAD;
> proxy_cache_lock on;
> proxy_cache_use_stale updating;
> proxy_bind 0.0.0.0;
> proxy_ignore_headers Expires;
> proxy_pass_header Server;
>
>
> map $request_uri $endpoint_id {
> default "unknown";
> ~^/out/v\d+/(?P<endpoint.+?)/.+$ $endpoint;
> }
>
> # Mappings based on the tier header from the /auth request
> map $tier $2X_key {~02x $endpoint_id; default "";}
> map $tier $3X_key {~03x $endpoint_id; default "";}
> map $upstream_http_tier $2X_key_from_upstream {~02x $endpoint_id; default "";}
> map $upstream_http_tier $3X_key_from_upstream {~03x $endpoint_id; default "";}
>
> # Throttle zones based on the results of the above mapping
> limit_req_zone $2X_key zone=2x_zone:20m rate=10r/s;
> limit_req_zone $3X_key zone=3x_zone:20m rate=100r/s;
> limit_req_zone $2X_key_from_upstream zone=2x_zone_from_upstream:20m rate=10r/s;
> limit_req_zone $3X_key_from_upstream zone=3x_zone_from_upstream:20m rate=100r/s;
>
>
> server {
> listen 80 default_server;
> listen [::]:80 default_server;
> server_name default_backend;
> server_tokens off;
> access_log /var/log/nginx/access.log main;
>
> root /var/www/html;
>
> error_page 401 /error_pages/401.html;
> error_page 403 /error_pages/403.html;
> error_page 404 /error_pages/404.html;
> error_page 429 /error_pages/429.html;
> error_page 500 501 502 503 504 /error_pages/5xx.html;
>
> set $upstream_server http://my_server:80;
>
> proxy_http_version 1.1;
> proxy_set_header Connection "";
> proxy_connect_timeout 10;
> proxy_send_timeout 30;
> proxy_read_timeout 30;
>
> proxy_cache_valid 404 412 1s;
>
> location ~* \.(m3u8|mpd|isml?/manifest)$ {
> auth_request /auth;
>
> # Capture the tier header from auth request
> auth_request_set $tier $upstream_http_tier;
>
> # Throttling based on mappings from tier
> limit_req zone=2x_zone burst=10 nodelay;
> limit_req zone=3x_zone burst=10 nodelay;
> limit_req zone=2x_zone_from_upstream burst=10 nodelay;
> limit_req zone=3x_zone_from_upstream burst=10 nodelay;
> limit_req_status 429;
>
> proxy_pass $upstream_server;
> proxy_cache manifest_cache;
> set $cache_key "${endpoint_id}";
> proxy_cache_key $cache_key;
> proxy_cache_valid 200 301 302 2s;
>
> access_log /var/log/nginx/access.log main;
> }
>
> location /auth {
> internal;
>
> set $auth_type 1;
> proxy_pass_request_body off;
> proxy_pass $upstream_server/auth?endpoint=$endpoint_id;
>
> proxy_cache auth_cache;
> set $auth_cache_key "${endpoint_id}";
> proxy_cache_key $auth_cache_key;
> proxy_cache_valid 200 301 302 5m;
> proxy_cache_valid 400 401 403 404 5m;
>
> access_log /var/log/nginx/access.log main;
> }
> }
> }
>

I'm expecting the following line to capture the "tier" header that comes back from the auth subrequest (sometimes it will be "02x", and sometimes "03x"):

> auth_request_set $tier $upstream_http_tier;

Then, that value will be passed into several mappings that should either return "" or a value, depending on which throttle zone should be applied. (There are variants using both $tier and $upstream_http_tier from attempts at troubleshooting.)

> map $tier $2X_key {~02x $endpoint_id; default "";}
> map $tier $3X_key {~03x $endpoint_id; default "";}
> map $upstream_http_tier $2X_key_from_upstream {~02x $endpoint_id; default "";}
> map $upstream_http_tier $3X_key_from_upstream {~03x $endpoint_id; default "";}

Finally, I expect only the limit_req directive with a non-empty key to be applied:

> limit_req_zone $2X_key zone=2x_zone:20m rate=10r/s;
> limit_req_zone $3X_key zone=3x_zone:20m rate=100r/s;
> limit_req_zone $2X_key_from_upstream zone=2x_zone_from_upstream:20m rate=10r/s;
> limit_req_zone $3X_key_from_upstream zone=3x_zone_from_upstream:20m rate=100r/s;


However, it's look like at least two things are not behaving as I expect.
1) The only throttling that I see is coming from 2x_zone, even when the header comes back as "03x".

2018/08/29 22:37:31 [error] 12626#0: *1596735 limiting requests, excess: 10.010 by zone "2x_zone", client: 10.0.136.178, server: default_backend, request: "GET /out/v1/04b1719535444a1d84389aeb0a1fb912/throttleCanary.m3u8 HTTP/1.1", host: "myserver"

2) Mapping based on the variable captured from auth_request_set don't appear to be set as I expect on the main request.

Here's what I see in the access log for an auth request and a main request:

{
"timestamp": "29/Aug/2018:22:37:31 +0000",
"client": "10.0.136.178",
"method": "GET",
"uri": "/auth",
"status": 200,
"bytes_sent": 0,
"upstream_status": "",
"server_name": "default_backend",
"request_id": "d6545f5758c2b3944438c80f2e964678",
"request_uri": "/out/v1/04b1719535444a1d84389aeb0a1fb912/throttleCanary.m3u8",
"tier": "",
"upstream_http_tier": "02x",
"2X_key": "",
"3X_key": "",
"2X_key_from_upstream": "04b1719535444a1d84389aeb0a1fb912",
"3X_key_from_upstream": "",
"origin": ""
}
{
"timestamp": "29/Aug/2018:22:37:31 +0000",
"client": "10.0.136.178",
"method": "GET",
"uri": "/out/v1/04b1719535444a1d84389aeb0a1fb912/throttleCanary.m3u8",
"status": 200,
"bytes_sent": 652,
"upstream_status": "",
"server_name": "default_backend",
"request_id": "d6545f5758c2b3944438c80f2e964678",
"request_uri": "/out/v1/04b1719535444a1d84389aeb0a1fb912/throttleCanary.m3u8",
"tier": "02x",
"upstream_http_tier": "",
"2X_key": "",
"3X_key": "",
"2X_key_from_upstream": "04b1719535444a1d84389aeb0a1fb912",
"3X_key_from_upstream": "",
"origin": ""
}

Notice that in the main request, "tier" is "02x" as expected, but the mapping based on $tier (2X_key) is empty while the mapping based on $http_upstream_tier (2X_key_from_upstream) has a value. Moreover, since only 2X_key_from_upstream has a value, I would expect the throttle based on that key (2x_zone_from_upstream) to take effect, not 2x_zone whose key is empty.

I would really appreciate any help explaining my misunderstanding or advice with how better to implement what I'm trying to do.

Thanks,
Jared

mirror delay (3 replies)

Hi,
I'm using the mirror module in my "production" nginx in order to mirror
real traffic to a test envrionment.
I don't want this mirroring to affect the latency of the production
environment, but it looks like the nginx is waiting for the response from
the test environment.
is there a way to avoid this? I just want the request to get to the test
environment and let it process it. but it shouldn't wait fo r the response
from the test environment in order to respond to the request

--
Thanks,
Eylon Saadon
_______________________________________________
nginx mailing list
nginx@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx

Ignore Certificate Errors (1 reply)

Hello,

is there a way to make NGINX more forgiving on TLS certificate errors? Or would that have to be done in OpenSSL instead?

When I use openssl s_client, I get the following errors from the upstream server:

140226185430680:error:0407006A:rsa routines:RSA_padding_check_PKCS1_type_1:block type is not 01:rsa_pk1.c:103:
140226185430680:error:04067072:rsa routines:RSA_EAY_PUBLIC_DECRYPT:padding check failed:rsa_eay.c:705:
140226185430680:error:1408D07B:SSL routines:ssl3_get_key_exchange:bad signature:s3_clnt.c:2010:

This causes NGINX (reverse proxy) to return 502 Bad Gateway to the browser.

The NGINX error log shows:

2018/08/29 09:09:59 [crit] 11633#11633: *28 SSL_do_handshake() failed (SSL: error:0407006A:rsa routines:RSA_padding_check_PKCS1_type_1:block type is not 01 error:04067072:rsa routines:RSA_EAY_PUBLIC_DECRYPT:padding check failed error:1408D07B:SSL routines:ssl3_get_key_exchange:bad signature) while SSL handshaking to upstream, client: 192.168.1.66, server: s5.example.com, request: "GET /xyz

I have added “proxy_ssl_verify off;”, but that did not make any difference.

Surprisingly, the browser (directly to the upstream server) does not complain about the TLS error.

Is there anything else I can do either in NGINX or openssl to suppress the 502 Bad Gateway?

Thanks…

Roger

PS: I don’t have control over the upstream server, so I can’t fix the root cause (faulty certificate).

_______________________________________________
nginx mailing list
nginx@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx

Serve multiple requests from a single proxy request (2 replies)

Hello,
I'm wondering if nginx is able to serve multiple requests from a single proxy request before it completes.

I am using the following configuration:

proxy_cache_lock on;
proxy_cache_lock_timeout 5s;
proxy_cache ram;
proxy_pass myUpstream;

My upstream uses chunked transfer encoding and serve the request in 10 sec.
Now if I try to send 2 requests to nginx, the first one starts responding immediately but the second will starts 5 sec later (lock timeout) and then perform a second request to my upstream.

Is there a way to configure nginx to immediately respond to multiple requests with a single request to my upstream?

Thank you in advance,
Traquila

Rewrite with number after hyphen (no replies)

Hi,

I detected an issue with my rewrite rule in the nginx.conf and I don't
understand why it happens and how to fix it.

I have tested in two environments with versions 1.10.3 and 1.14.0.

Having the following simple conf with a regex is intended to get everything:

> location /foo {
> rewrite /foo/(.*) /web/foo.do?a=$1 last;
> }


OK:

> https://www.test.com/foo/asdf

https://www.test.com/foo/asdf-asdf

https://www.test.com/foo/asdf12

https://www.test.com/foo/asdf12-asdf

https://www.test.com/foo/12

https://www.test.com/foo/-12


KO:

> https://www.test.com/foo/asdf-12

https://www.test.com/foo/asdf-12-asdf


As implementing pcre regex, this regex works in all cases in the common
regex online sites but not in nginx.

Why if I put a number after a hyphen the regex stops working?

Many thanks,

--
Ivan Bianchi
Wikiloc
_______________________________________________
nginx mailing list
nginx@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx

add checksum to nginx log entries (no replies)

Hi,
I'm wondering if there is a ready way to add a checksum (e.g. CRC) to the end of each log entry before they get written to the "access" or "error" log files? One of the project I work on wants each log line to have its own checksum for some integrity checks.

Any hint on how I can implement these would be of great help.

Thanks,
Cheers,
Prasanna

reverse proxy multiple subdomains problems (no replies)

I am trying to do a redirect from http and a reverse proxy to my apache web server. I would like to include several subdomains, they all have dns records and apache virtual hosts setup on the other end. However no matter which of the 3 subdomains i try i always end up at https://my-site.com this is fine for www.my-site.com but recipes.my-site.com is a supposed to be a different website all together.

I am new with nginx and have a hunch that it may have something to do with $server_name$request_uri not being the right option in my case but i'm not sure see config below

server {
listen 172.16.0.10:80;
server_name my-site.com www.my-site.com recipes.my-site.com;
return 301 https://$server_name$request_uri;
}

server {
listen 172.16.0.10:443 ssl;

server_name my-site.com www.my-site.com recipes.my-site.com;

access_log /var/log/nginx/van-ginneken.com-access.log;
ssl_certificate /root/SYNC-certs/van-ginneken.com/fullchain.pem;
ssl_certificate_key /root/SYNC-certs/van-ginneken.com/privkey.pem;

set $upstream 172.16.0.13;

location / {
proxy_pass_header Authorization;
proxy_pass https://$upstream;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_buffering off;
client_max_body_size 0;
proxy_read_timeout 36000s;
proxy_redirect off;
proxy_ssl_session_reuse off;
}
}

Set `expires` by MIME type (no replies)

Hello.
I am attempting to use `expires` on Nginx 1.15.3 to define the expiry of files on a per MIME type basis.

I have used [1] as a base, and constructed the following `map` in the `http` section of a `include`-d `server` block (domain sanitised):

map $sent_http_content_type $www_example_com_expires {
default 1M;
application/atom+xml 1h;
application/javascript 1M;
application/json 0s;
application/ld+json 0s;
application/manifest+json 1w;
application/rdf+xml 1h;
application/rss+xml 1h;
application/schema+json 0s;
application/x-javascript 1M;
application/xml 0s;
font/woff 1M;
image/gif 1M;
image/jpeg 1M;
image/png 1M;
image/svg+xml 1M;
image/vnd.microsoft.icon 1M;
image/webp 1M;
image/x-icon 1M;
text/cache-manifest 0s;
text/css 1M;
text/html 0s;
text/javascript 1M;
text/x-cross-domain-policy 1w;
text/xml 0s;
video/mp4 1M;
video/webm 1M;
}

Later on, after the `map` is defined, I call it using `expires` in a `server` block:

server {#IPv4 and IPv6, https, PHP fastcgi, check https://cipherli.st for latest ciphers
access_log /var/log/nginx/www.example.com.access.log ipscrubbed;
add_header Access-Control-Allow-Origin "https://*.example.com";
add_header Content-Security-Policy "default-src 'self'; connect-src 'self' https://api.github.com; font-src 'self'; img-src 'self' data: * https://*; media-src 'self' * https://*; style-src 'self' 'unsafe-in$
add_header Expect-CT "max-age=0; report-uri=https://example.com/expect-ct-report";
add_header Feature-Policy "camera 'self'; geolocation 'none'; microphone 'none'; midi 'none'; payment 'none'";
add_header Referrer-Policy strict-origin;
add_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload";
add_header X-Content-Type-Options nosniff;
add_header X-Frame-Options DENY;
add_header X-XSS-Protection "1; mode=block";
error_log /var/log/nginx/www.example.com.error.log crit;
etag off;
expires $www_example_com_expires;
index index.html index.php;
listen [::]:443 http2 ssl;
listen 443 http2 ssl;
[...]
}

My config passes the `nginx -t` self-test with no errors, and I can restart Nginx without issue.

In the browser inspector, all MIME types are assigned a 1 month expiry, as if they're inheriting the `default` value from the map. Example headers for a .php file:

Date: Mon, 03 Sep 2018 20:09:30 GMT
Expires: Wed, 03 Oct 2018 20:09:30 GMT

If I remove the `expires` directive, the 'Expires:' header is not shown, so `expires` is doing *something*.

I suspect my syntax is wrong, and I would be very grateful for any feedback -- I am particularly interested a clue or pointer to aid my research into why this is not working.

Thank you for your attention and interest.

[1] http://nginx.org/en/docs/http/ngx_http_headers_module.html#expires

NGINX Logs balancerWorkerName but NOT balancerName (no replies)

Hello All,

I have a situation where NGINX appears to be logging the balancerWorkerName but NOT the balancerName... The requests are a 404, the balancerName exists. Any idea why this would be happening? how can you serve a request out of a balancerWorker WITHOUT the balancerName cluster?

Reverse proxy (no replies)

Hi All,
New to nginx and reverse proxies. Trying to setup reserves proxy using nginx and docker. Have defined upstream in nginx config and created dependency in docker-compose as well. However getting time out error. 
Attached docker-compose and nginx.conf file for reference. Below is the error message. 
Note: However able to get the pages using below links http://178.128.159.51:81, http://178.128.159.51:82, http://178.128.159.51:83, http://178.128.159.51:100/login 2018/09/02 13:29:20 [error] 10#10: *1 upstream timed out (110: Connection timed out) while connecting to upstream, client: 103.252.27.57, server: , request: "GET / HTTP/1.1", upstream: "http://178.128.159.51:81/", host: "178.128.159.51"2018/09/02 13:30:21 [error] 10#10: *1 upstream timed out (110: Connection timed out) while connecting to upstream, client: 103.252.27.57, server: , request: "GET /favicon.ico HTTP/1.1", upstream: "http://178.128.159.51:81/favicon.ico", host: "178.128.159.51", referrer: "http://178.128.159.51/"
2018/09/02 13:30:50 [error] 12#12: *6 upstream timed out (110: Connection timed out) while connecting to upstream, client: 103.252.27.57, server: , request: "GET /topic HTTP/1.1", upstream: "http://178.128.159.51:82/", host: "178.128.159.51"
2018/09/02 13:32:07 [error] 7#7: *9 upstream timed out (110: Connection timed out) while connecting to upstream, client: 103.252.27.57, server: , request: "GET /wb HTTP/1.1", upstream: "http://178.128.159.51:100/login", host: "178.128.159.51"
2018/09/02 13:35:21 [error] 8#8: *14 upstream timed out (110: Connection timed out) while connecting to upstream, client: 103.252.27.57, server: , request: "GET /admin HTTP/1.1", upstream: "http://178.128.159.51:83/", host: "178.128.159.51"

Please advise what i am missing.
Thanks and regards

Jagannath S Bilgi
_______________________________________________
nginx mailing list
nginx@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx

Problem when reconfiguring Nginx for SSL with self-signed certificate (no replies)

I have a VPS on Digital Ocean with Ubuntu 18.04, Nginx, Gunicorn, Django, and a test web application, all configured (ufw) to work with http: 80. Everything works perfectly. Tutorial: https://www.digitalocean.com/community/tutorials/how-to-set-up-django-with-postgres-nginx-and-gunicorn-on-ubuntu-18-04#configure-nginx-to-proxy-pass-to-gunicorn

Now I modify the file /sites-available/LibrosWeb to allow SSL traffic with a self-signed certificate, since I do not have a domain.
Tutorial: https://www.digitalocean.com/community/tutorials/how-to-create-a-self-signed-ssl-certificate-for-nginx-in-ubuntu-18-04
Result "Error 502 Bad Gateway".

This is the initial code that works well with http: 80:

server{
#Configuracion http

listen 80;
listen [::]:80;
server_name 15.15.15.15;

location = /favicon.ico { access_log off; log_not_found off; }
location /robots.txt {
alias /var/www/LibrosWeb/robots.txt ;
}
location /static/ {
root /home/gela/LibrosWeb;
}

location / {
include proxy_params;
proxy_pass http://unix:/run/gunicorn.sock;
}
}

And this is the code to allow SSL (error 502):

server{
#Configuracion SSL

listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name 15.15.15.15;
include snippets/self-signed.conf;
include snippets/ssl-params.conf;

location = /favicon.ico { access_log off; log_not_found off; }
location /robots.txt {
alias /var/www/LibrosWeb/robots.txt ;
}
location /static/ {
root /home/gela/LibrosWeb;
}

location / {
include proxy_params;
proxy_pass https://unix:/run/gunicorn.sock;
}
}

server{
#Configuracion http

listen 80;
listen [::]:80;
server_name 15.15.15.15;
return 302 https://15.15.15.15$request_uri;
}

UFW configured as:

80,443/tcp (Nginx Full) ALLOW IN Anywhere
80,443/tcp (Nginx Full (v6)) ALLOW IN Anywhere (v6)

The files /etc/nginx/snippets/self-signed.conf and /etc/nginx/snippets/ssl-params.conf are the same as those in the tutorial.

I've been testing configurations for two days and the most I could get is that I work halfway, that is, I can show the default page of django but not the one of my application, if I put the code like this:

server{
#Configuracion http

listen 80;
listen [::]:80;
server_name 15.15.15.15;
return 302 https://15.15.15.15$request_uri;

location = /favicon.ico { access_log off; log_not_found off; }
location /robots.txt {
alias /var/www/LibrosWeb/robots.txt ;
}
location /static/ {
root /home/gela/LibrosWeb;
}
}

server{
#Configuracion SSL

listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name 15.15.15.15;
include snippets/self-signed.conf;
include snippets/ssl-params.conf;

location / {
include proxy_params;
proxy_pass https://unix:/run/gunicorn.sock;
}
}
What is wrong, or what is missing?

nginx sends 301 redirect for alias in location (no replies)

Hi. Could you, please, explain. Why nginx sends 301 redirect for the
following vhost:

server {
listen 80;
server_name test.domain.tv http://test.m9.smotreshka.tv;

access_log off;

location = /test/README.txt {
alias /var/www/;
}
}

Here is redirect:

$ http http://test.domain.tv/test/README.txt
HTTP/1.1 301 Moved Permanently
Connection: keep-alive
Content-Length: 178
Content-Type: text/html
Date: Wed, 05 Sep 2018 06:55:27 GMT
Keep-Alive: timeout=20
Location: http://test.domain.tv/test/README.txt/
Server: nginx

<html>
<head><title>301 Moved Permanently</title></head>
<body bgcolor="white">
<center><h1>301 Moved Permanently</h1></center>
<hr><center>nginx</center>
</body>
</html>



--
Peter.
_______________________________________________
nginx mailing list
nginx@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx

Nginx openssl Async mode support (no replies)

Hi Team,

I am new to Nginx and I am developing a new OpenSSL Dynamic engine which supports OpenSSL async mode. I have verified the async mode function via speed command provided by OpenSSL.

Now I need to integrate the OpenSSL with Nginx. In all the reference it showing that It requires Intel QAT engine support. In my case, I am using my own engine.

What all the configuration requires to test this.?
Is there any dependency for QAT framework. ?

Will nginx return 502 without any log in certain case? (no replies)

I have my nginx running on EC2(Amazon Linux) behind ALB (Load Balancer of AWS).
Usually it works just fine but very rarely ALB receive 502 bad gateway form the EC2 instance.

I checked both access.log and error.log of nginx but there is no log for 502 bad gateway.
We asked AWS about the reason of 502, but they told us it should be problem of web server running on EC2 instance.

After some googling, I found an article that when nginx sends TCP RST or TCP FIN, it will return 502 without any log output.
So I'm suggesting I am facing this case, but it there any way to figure out whether it is so or not?
And if so, is there any way to get some information about why TCP RST or TCP FIN is caused?

Thanks in advance.

Django proxy_pass redirect issues (no replies)

Hello,
I faced an issue with nginx proxy_pass to a Django app.

I configured nginx server to this django:

#######
server {
listen 443 ssl;
server_name mydjango.com;

ssl on;
ssl_certificate /opt/ssl/nginx/mydjango.crt;
ssl_certificate_key /opt/ssl/nginx/mydjango.key;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers HIGH:!aNULL:!MD5;

client_max_body_size 120M;

#charset koi8-r;
access_log /var/log/nginx/backend.mydjango.app.log main;
error_log /var/log/nginx/backend.mydjango.app.error.log error;

location / {
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://backend.mydjango.app:3080/;
proxy_redirect off;
}
}
#######

But connecting to NginX reverse proxy (https://mydjango.com) django starts redirecting and finish with a bad request changing in my browser to: http://127.0.0.1:5002

It seems I forgot some proxy header but I tried some combinations and I dont find the good one.


Thanks in advance,

Config problems with Amplify (no replies)

Hi,

my nginx config hierarchy is:

/etc/nginx/nginx.conf (commented out except for a single include directive of /etc/nginx/conf.d/*.conf)
/etc/nginx/conf.d/default.conf (server-wider config directives and an include of /etc/nginx/sites-enabled/*.conf)
/etc/nginx/sites-enabled/domainX.conf (multiple vhost conf files each named accordingly)

With default configuration, the amplify service doesn’t receive any data and I have two Nginx entries in my Amplify Graphs page. One with /etc/nginx/nginx.conf and one with /etc/nginx/conf.d/default.conf

If I add:

configfile = /etc/nginx/conf.d/default.conf

to my /etc/amplify-agent/agent.conf (and restart it), I get data to the /etc/nginx/conf.d/default.conf but not the /etc/nginx/nginx.conf entry.

How should I setup Amplify to get a single entry (or should each vhost actually have it’s own entry)?

--
Palvelin.fi Hostmaster
postmaster@palvelin.fi

_______________________________________________
nginx mailing list
nginx@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx

Add Header to cached response? (no replies)

Hello,

is there a way to add a header to the cached response?

I have used ngx_http_headers_module’s add_header directive, which adds the header to the response at the time the response is generated.

What I would like to do is to add a response header at the time when the upstream request is made (reflecting the state of the caching request, not the used-the-cache request).

Thanks…

Roger

_______________________________________________
nginx mailing list
nginx@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx

Avoiding Nginx restart when rsyncing cache across machines (no replies)

I run a mini CDN for a static site by having Nginx cache machines (in
different locations) in front of the origin and load balanced by Cloudflare..



Periodically I run rsync pull to update the cache on each of these
machines. Works well, except that I realized I need to restart Nginx and
reload isn’t updating the cache in memory.



Really want to avoid the restart. Is this possible? Or maybe I am doing
something wrong here.

- Quintin
_______________________________________________
nginx mailing list
nginx@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx

SSL stream to HTTP2 server (2 replies)

Hello,

I’m trying to basically use nginx as replacement to hitch (for Varnish).

Request goes like this: browser → nginx (stream SSL) → varnish (HTTP2 on) → backend HTTP

stream {
server {
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
proxy_pass 127.0.0.1:6081;
proxy_protocol on;
}
}

With the above, I’m getting HTTP/1.1 in browser.
When I replace nginx with hitch, I get HTTP/2.

From Hitch docs: "Hitch will transmit the selected protocol as part of its PROXY header” Does nginx have same capability?

In general, is nginx capable of being SSL terminator for HTTP/2 backends using TCP streams? (while delivering HTTP/2 to supporting clients). I’m interested in using TCP streams since only those will allow use of PROXY protocol to upstream.

Best Regards,
Danila

_______________________________________________
nginx mailing list
nginx@nginx.org
http://mailman.nginx.org/mailman/listinfo/nginx

nginx as nonroot - setsockopt not permitted (2 replies)

Hi,

we use nginx which load-balances toward our snmptrapd. Everything is working fine if we start nginx with root. We would like to change it so nginx (workers) would start with nginx user. I couldn't make it work, do you have any idea what additional thing can I set/check?

nginx -V
nginx version: nginx/1.12.2
built by gcc 4.8.5 20150623 (Red Hat 4.8.5-16) (GCC)
built with OpenSSL 1.0.2k-fips 26 Jan 2017
TLS SNI support enabled
configure arguments: --prefix=/usr/share/nginx --sbin-path=/usr/sbin/nginx --modules-path=/usr/lib64/nginx/modules --conf-path=/etc/nginx/nginx.conf --error-log-path=/var/log/nginx/error.log --http-log-path=/var/log/nginx/access.log --http-client-body-temp-path=/var/lib/nginx/tmp/client_body --http-proxy-temp-path=/var/lib/nginx/tmp/proxy --http-fastcgi-temp-path=/var/lib/nginx/tmp/fastcgi --http-uwsgi-temp-path=/var/lib/nginx/tmp/uwsgi --http-scgi-temp-path=/var/lib/nginx/tmp/scgi --pid-path=/run/nginx.pid --lock-path=/run/lock/subsys/nginx --user=nginx --group=nginx --with-file-aio --with-ipv6 --with-http_auth_request_module --with-http_ssl_module --with-http_v2_module --with-http_realip_module --with-http_addition_module --with-http_xslt_module=dynamic --with-http_image_filter_module=dynamic --with-http_geoip_module=dynamic --with-http_sub_module --with-http_dav_module --with-http_flv_module --with-http_mp4_module --with-http_gunzip_module --with-http_gzip_static_module --with-http_random_index_module --with-http_secure_link_module --with-http_degradation_module --with-http_slice_module --with-http_stub_status_module --with-http_perl_module=dynamic --with-mail=dynamic --with-mail_ssl_module --with-pcre --with-pcre-jit --with-stream=dynamic --with-stream_ssl_module --with-google_perftools_module --with-debug --with-cc-opt='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -m64 -mtune=generic' --with-ld-opt='-Wl,-z,relro -specs=/usr/lib/rpm/redhat/redhat-hardened-ld -Wl,-E'

uname -a
Linux c-1 4.14.62-1.el7.centos.ncir.1.x86_64 #1 SMP Wed Aug 15 04:24:17 EEST 2018 x86_64 x86_64 x86_64 GNU/Linux


--------------------------------------------------------------------------------------------------

observation 0) with root user (master+workers)
everything works fine, snmptrapd gets the traps

--------------------------------------------------------------------------------------------------

observation 1)
idea: playing with setcap

config:
* with nginx user (master is root, workers are started with nginx user, so in /etc/nginx/nginx.conf 'user nginx;' line is included)
root 2703077 0.0 0.0 59028 2280 ? Ss 11:34 0:00 nginx: master process /usr/sbin/nginx
nginx 2703078 0.0 0.0 59476 4160 ? S 11:34 0:00 nginx: worker process
nginx 2703079 0.0 0.0 59476 4840 ? S 11:34 0:00 nginx: worker process
nginx 2703080 0.0 0.0 59476 4840 ? S 11:34 0:00 nginx: worker process
... etc.
* upstream port is 162, snmptrapd is listening there
* I've tried both capacities:
setcap cap_net_bind_service=+ep /sbin/nginx
setcap cap_net_admin+ep /sbin/nginx
* /etc/nginx/conf.d/stream/snmptrap.conf
upstream snmptrap_upstream {
#server x.y.z.226:162; #commented out for easier testing
#server x.y.z:227162; #commented out for easier testing
server x.y.z.228:162;
}
server {
listen z.y.z.225:162 udp;
proxy_pass snmptrap_upstream;
proxy_timeout 1s;
proxy_responses 0;
proxy_bind $remote_addr transparent;
error_log /var/log/nginx/snmptrap.log;
}
* also tried out switching off iptables



netstat -ulpn | grep 162
udp 0 0 x.y.z.228:162 0.0.0.0:* 2748327/snmptrapd
udp 0 0 x.y.z.225:162 0.0.0.0:* 2743096/nginx: mast


/var/log/nginx/snmptrap.log:
2018/09/12 11:55:04 [alert] 2739785#0: *23 setsockopt(IP_TRANSPARENT) failed (1: Operation not permitted) while connecting to upstream, udp client: x.y.z.225, server: x.y.z.225:162, upstream: "x.y.z.228:162", bytes from/to client:5/0, bytes from/to upstream:0/0

/var/log/nginx/stream.log: error 500 is coming
2018-09-12T11:55:04+03:00 x.y.z.225 UDP 500 0 5 0.000 "0" "0" "0.000"

--------------------------------------------------------------------------------------------------
observation 2)
idea: trying an other upstream port (>1024), but still the same:

config:
* with nginx user (master is root, workers are started with nginx user, so in /etc/nginx/nginx.conf 'user nginx;' line is included)
* upstream port is 4162
* /etc/nginx/conf.d/stream/snmptrap.conf
upstream snmptrap_upstream {
#server x.y.z.226:162; #commentedout for easier testing
#server x.y.z:227162; #commented out for easier testing
server x.y.z.228:4162;
}
server {
listen z.y.z.225:162 udp;
proxy_pass snmptrap_upstream;
proxy_timeout 1s;
proxy_responses 0;
proxy_bind $remote_addr transparent;
error_log /var/log/nginx/snmptrap.log;
}
* also tried out switching off iptables


netstat -ulpn | grep 162
udp 0 0 x.y.z.228:4162 0.0.0.0:* 2748327/snmptrapd
udp 0 0 x.y.z.225:162 0.0.0.0:* 2743096/nginx: mast


/var/log/nginx/snmptrap.log:
2018/09/12 11:08:03 [alert] 121472#0: *112642 setsockopt(IP_TRANSPARENT) failed (1: Operation not permitted) while connecting to upstream, udp client: x.y.z.225, server: x.y.z.225:162, upstream: "x.y.z.228:4162", bytes from/to client:5/0, bytes from/to upstream:0/0

/var/log/nginx/stream.log: error 500 is coming
2018-09-12T11:08:03+03:00 x.y.z.225 UDP 500 0 5 0.000 "0" "0" "0.000"


Thanks in advance:
Orsi
Viewing all 7229 articles
Browse latest View live