[NEW] benchmarks/py-locust: Python utility for doing distributed load tests

Locust is an easy-to-use, distributed, user load testing tool. It is
  intended for load-testing web sites (or other systems) and figuring out
  how many concurrent users a system can handle.

  The behavior of each locust (or test user if you will) is defined by you
  and the swarming process is monitored from a web UI in real-time. This
  will help you battle test and identify bottlenecks in your code before
  letting real users in.

  WWW: https://locust.io/

Approved by:	araujo (mentor), rene (mentor)
Sponsored by:	cleverbridge AG
Differential Revision:	https://reviews.freebsd.org/D18895
This commit is contained in:
Vinícius Zavam 2019-01-20 12:54:30 +00:00
parent 0a527d7d7f
commit f20baa6ebd
Notes: svn2git 2021-03-31 03:12:20 +00:00
svn path=/head/; revision=490786
14 changed files with 436 additions and 0 deletions

View file

@ -65,6 +65,7 @@
SUBDIR += polygraph
SUBDIR += postal
SUBDIR += postmark
SUBDIR += py-locust
SUBDIR += py-naarad
SUBDIR += py-throughpy
SUBDIR += py-zopkio

View file

@ -0,0 +1,34 @@
# $FreeBSD$
PORTNAME= locust
PORTVERSION= 0.9.0
CATEGORIES= benchmarks www python
MASTER_SITES= CHEESESHOP
PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
DISTNAME= locustio-${PORTVERSION}
MAINTAINER= egypcio@FreeBSD.org
COMMENT= Python utility for doing easy, distributed load testing
LICENSE= MIT
RUN_DEPENDS= ${PYTHON_PKGNAMEPREFIX}Flask>=0.10.1:www/py-flask@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}gevent>=1.2.2:devel/py-gevent@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}msgpack>=0.4.2:devel/py-msgpack@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}pyzmq>=16.0.2:net/py-pyzmq@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}requests>=2.9.1:www/py-requests@${PY_FLAVOR} \
${PYTHON_PKGNAMEPREFIX}six>=1.10.0:devel/py-six@${PY_FLAVOR}
TEST_DEPENDS= ${PYTHON_PKGNAMEPREFIX}mock>=0:devel/py-mock@${PY_FLAVOR}
USES= python
USE_PYTHON= autoplist distutils
NO_ARCH= yes
OPTIONS_DEFINE= EXAMPLES
post-install-EXAMPLES-on:
${MKDIR} ${STAGEDIR}${EXAMPLESDIR}
${INSTALL_DATA} ${FILESDIR}/extra-EXAMPLES* ${STAGEDIR}${EXAMPLESDIR}
.include <bsd.port.mk>

View file

@ -0,0 +1,3 @@
TIMESTAMP = 1547833536
SHA256 (locustio-0.9.0.tar.gz) = c77b471e0e08e215c93a7af9a95b79193268072873fbbc0effca40f3d9b58be4
SIZE (locustio-0.9.0.tar.gz) = 226870

View file

@ -0,0 +1,26 @@
from locust import HttpLocust, TaskSet, task
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
# but it might be convenient to use the @task decorator
@task
def page404(self):
self.client.get("/does_not_exist")
class WebsiteUser(HttpLocust):
"""
Locust user class that does requests to the locust web server running on localhost
"""
host = "http://127.0.0.1:8089"
min_wait = 2000
max_wait = 5000
task_set = UserTasks

View file

@ -0,0 +1,50 @@
# This locust test script example will simulate a user
# browsing the Locust documentation on https://docs.locust.io/
import random
from locust import HttpLocust, TaskSquence, seq_task, task
from pyquery import PyQuery
class BrowseDocumentationSequence(TaskSquence):
def on_start(self):
self.urls_on_current_page = self.toc_urls
# assume all users arrive at the index page
@seq_task(1)
def index_page(self):
r = self.client.get("/")
pq = PyQuery(r.content)
link_elements = pq(".toctree-wrapper a.internal")
self.toc_urls = [
l.attrib["href"] for l in link_elements
]
@seq_task(2)
@task(50)
def load_page(self, url=None):
url = random.choice(self.toc_urls)
r = self.client.get(url)
pq = PyQuery(r.content)
link_elements = pq("a.internal")
self.urls_on_current_page = [
l.attrib["href"] for l in link_elements
]
@seq_task(3)
@task(30)
def load_sub_page(self):
url = random.choice(self.urls_on_current_page)
r = self.client.get(url)
class AwesomeUser(HttpLocust):
task_set = BrowseDocumentationSequence
host = "https://docs.locust.io/en/latest/"
# we assume someone who is browsing the Locust docs,
# generally has a quite long waiting time (between
# 20 and 600 seconds), since there's a bunch of text
# on each page
min_wait = 20 * 1000
max_wait = 600 * 1000

View file

@ -0,0 +1,49 @@
# This locust test script example will simulate a user
# browsing the Locust documentation on https://docs.locust.io/
import random
from locust import HttpLocust, TaskSet, task
from pyquery import PyQuery
class BrowseDocumentation(TaskSet):
def on_start(self):
# assume all users arrive at the index page
self.index_page()
self.urls_on_current_page = self.toc_urls
@task(10)
def index_page(self):
r = self.client.get("/")
pq = PyQuery(r.content)
link_elements = pq(".toctree-wrapper a.internal")
self.toc_urls = [
l.attrib["href"] for l in link_elements
]
@task(50)
def load_page(self, url=None):
url = random.choice(self.toc_urls)
r = self.client.get(url)
pq = PyQuery(r.content)
link_elements = pq("a.internal")
self.urls_on_current_page = [
l.attrib["href"] for l in link_elements
]
@task(30)
def load_sub_page(self):
url = random.choice(self.urls_on_current_page)
r = self.client.get(url)
class AwesomeUser(HttpLocust):
task_set = BrowseDocumentation
host = "https://docs.locust.io/en/latest/"
# we assume someone who is browsing the Locust docs,
# generally has a quite long waiting time (between
# 20 and 600 seconds), since there's a bunch of text
# on each page
min_wait = 20 * 1000
max_wait = 600 * 1000

View file

@ -0,0 +1,51 @@
from locust import HttpLocust, TaskSet, task
import random
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
# but it might be convenient to use the @task decorator
@task
def page404(self):
self.client.get("/does_not_exist")
class WebsiteUser(HttpLocust):
"""
Locust user class that does requests to the locust web server running on localhost
"""
host = "http://127.0.0.1:8089"
# Most task inter-arrival times approximate to exponential distributions
# We will model this wait time as exponentially distributed with a mean of 1 second
wait_function = lambda self: random.expovariate(1)*1000 # *1000 to convert to milliseconds
task_set = UserTasks
def strictExp(min_wait,max_wait,mu=1):
"""
Returns an exponentially distributed time strictly between two bounds.
"""
while True:
x = random.expovariate(mu)
increment = (max_wait-min_wait)/(mu*6.0)
result = min_wait + (x*increment)
if result<max_wait:
break
return result
class StrictWebsiteUser(HttpLocust):
"""
Locust user class that makes exponential requests but strictly between two bounds.
"""
host = "http://127.0.0.1:8089"
wait_function = lambda self: strictExp(self.min_wait, self.max_wait)*1000
task_set = UserTasks

View file

@ -0,0 +1,54 @@
import time
import xmlrpclib
from locust import Locust, TaskSet, events, task
class XmlRpcClient(xmlrpclib.ServerProxy):
"""
Simple, sample XML RPC client implementation that wraps xmlrpclib.ServerProxy and
fires locust events on request_success and request_failure, so that all requests
gets tracked in locust's statistics.
"""
def __getattr__(self, name):
func = xmlrpclib.ServerProxy.__getattr__(self, name)
def wrapper(*args, **kwargs):
start_time = time.time()
try:
result = func(*args, **kwargs)
except xmlrpclib.Fault as e:
total_time = int((time.time() - start_time) * 1000)
events.request_failure.fire(request_type="xmlrpc", name=name, response_time=total_time, exception=e)
else:
total_time = int((time.time() - start_time) * 1000)
events.request_success.fire(request_type="xmlrpc", name=name, response_time=total_time, response_length=0)
# In this example, I've hardcoded response_length=0. If we would want the response length to be
# reported correctly in the statistics, we would probably need to hook in at a lower level
return wrapper
class XmlRpcLocust(Locust):
"""
This is the abstract Locust class which should be subclassed. It provides an XML-RPC client
that can be used to make XML-RPC requests that will be tracked in Locust's statistics.
"""
def __init__(self, *args, **kwargs):
super(XmlRpcLocust, self).__init__(*args, **kwargs)
self.client = XmlRpcClient(self.host)
class ApiUser(XmlRpcLocust):
host = "http://127.0.0.1:8877/"
min_wait = 100
max_wait = 1000
class task_set(TaskSet):
@task(10)
def get_time(self):
self.client.get_time()
@task(5)
def get_random_number(self):
self.client.get_random_number(0, 100)

View file

@ -0,0 +1,25 @@
# locustfile.py
from locust import HttpLocust, TaskSet, task
USER_CREDENTIALS = [
("user1", "password"),
("user2", "password"),
("user3", "password"),
]
class UserBehaviour(TaskSet):
def on_start(self):
if len(USER_CREDENTIALS) > 0:
user, passw = USER_CREDENTIALS.pop()
self.client.post("/login", {"username":user, "password":passw})
@task
def some_task(self):
# user should be logged in here (unless the USER_CREDENTIALS ran out)
self.client.get("/protected/resource")
class User(HttpLocust):
task_set = UserBehaviour
min_wait = 5000
max_wait = 60000

View file

@ -0,0 +1,69 @@
# -*- coding: utf-8 -*-
"""
This is an example of a locustfile that uses Locust's built in event hooks to
track the sum of the content-length header in all successful HTTP responses
"""
from locust import HttpLocust, TaskSet, events, task, web
class MyTaskSet(TaskSet):
@task(2)
def index(l):
l.client.get("/")
@task(1)
def stats(l):
l.client.get("/stats/requests")
class WebsiteUser(HttpLocust):
host = "http://127.0.0.1:8089"
min_wait = 2000
max_wait = 5000
task_set = MyTaskSet
"""
We need somewhere to store the stats.
On the master node stats will contain the aggregated sum of all content-lengths,
while on the slave nodes this will be the sum of the content-lengths since the
last stats report was sent to the master
"""
stats = {"content-length":0}
def on_request_success(request_type, name, response_time, response_length):
"""
Event handler that get triggered on every successful request
"""
stats["content-length"] += response_length
def on_report_to_master(client_id, data):
"""
This event is triggered on the slave instances every time a stats report is
to be sent to the locust master. It will allow us to add our extra content-length
data to the dict that is being sent, and then we clear the local stats in the slave.
"""
data["content-length"] = stats["content-length"]
stats["content-length"] = 0
def on_slave_report(client_id, data):
"""
This event is triggered on the master instance when a new stats report arrives
from a slave. Here we just add the content-length to the master's aggregated
stats dict.
"""
stats["content-length"] += data["content-length"]
# Hook up the event listeners
events.request_success += on_request_success
events.report_to_master += on_report_to_master
events.slave_report += on_slave_report
@web.app.route("/content-length")
def total_content_length():
"""
Add a route to the Locust web app, where we can see the total content-length
"""
return "Total content-length recieved: %i" % stats["content-length"]

View file

@ -0,0 +1,31 @@
import os
from locust import HttpLocust, TaskSet, task
from locust.clients import HttpSession
class MultipleHostsLocust(HttpLocust):
abstract = True
def __init__(self, *args, **kwargs):
super(MultipleHostsLocust, self).__init__(*args, **kwargs)
self.api_client = HttpSession(base_url=os.environ["API_HOST"])
class UserTasks(TaskSet):
# but it might be convenient to use the @task decorator
@task
def index(self):
self.locust.client.get("/")
@task
def index_other_host(self):
self.locust.api_client.get("/stats/requests")
class WebsiteUser(MultipleHostsLocust):
"""
Locust user class that does requests to the locust web server running on localhost
"""
host = "http://127.0.0.1:8089"
min_wait = 2000
max_wait = 5000
task_set = UserTasks

View file

@ -0,0 +1,25 @@
from locust import HttpLocust, TaskSet, task, events
from gevent.coros import Semaphore
all_locusts_spawned = Semaphore()
all_locusts_spawned.acquire()
def on_hatch_complete(**kw):
all_locusts_spawned.release()
events.hatch_complete += on_hatch_complete
class UserTasks(TaskSet):
def on_start(self):
all_locusts_spawned.wait()
self.wait()
@task
def index(self):
self.client.get("/")
class WebsiteUser(HttpLocust):
host = "http://127.0.0.1:8089"
min_wait = 2000
max_wait = 5000
task_set = UserTasks

View file

@ -0,0 +1,9 @@
Locust is an easy-to-use, distributed, user load testing tool. It is intended
for load-testing web sites (or other systems) and figuring out how many
concurrent users a system can handle.
The behavior of each locust (or test user if you will) is defined by you and the
swarming process is monitored from a web UI in real-time. This will help you
battle test and identify bottlenecks in your code before letting real users in.
WWW: https://locust.io/

View file

@ -0,0 +1,9 @@
%%PORTEXAMPLES%%%%EXAMPLESDIR%%/extra-EXAMPLES-basic.py
%%PORTEXAMPLES%%%%EXAMPLESDIR%%/extra-EXAMPLES-browse_docs_sequence_test.py
%%PORTEXAMPLES%%%%EXAMPLESDIR%%/extra-EXAMPLES-browse_docs_test.py
%%PORTEXAMPLES%%%%EXAMPLESDIR%%/extra-EXAMPLES-custom_wait_function.py
%%PORTEXAMPLES%%%%EXAMPLESDIR%%/extra-EXAMPLES-custom_xmlrpc_client.py
%%PORTEXAMPLES%%%%EXAMPLESDIR%%/extra-EXAMPLES-dynamice_user_credentials.py
%%PORTEXAMPLES%%%%EXAMPLESDIR%%/extra-EXAMPLES-events.py
%%PORTEXAMPLES%%%%EXAMPLESDIR%%/extra-EXAMPLES-multiple_hosts.py
%%PORTEXAMPLES%%%%EXAMPLESDIR%%/extra-EXAMPLES-semaphore_wait.py