# Retrieve a single page and report the URL and contents defload_url(url, timeout): with urllib.request.urlopen(url, timeout=timeout) as conn: return conn.read()
# We can use a with statement to ensure threads are cleaned up promptly with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: # Start the load operations and mark each future with its URL future_to_url = {executor.submit(load_url, url, 60): url for url in URLS} for future in concurrent.futures.as_completed(future_to_url): url = future_to_url[future] try: data = future.result() except Exception as exc: print('%r generated an exception: %s' % (url, exc)) else: print('%r page is %d bytes' % (url, len(data)))
defis_prime(n): if n < 2: returnFalse if n == 2: returnTrue if n % 2 == 0: returnFalse
sqrt_n = int(math.floor(math.sqrt(n))) for i inrange(3, sqrt_n + 1, 2): if n % i == 0: returnFalse returnTrue
defmain(): with concurrent.futures.ProcessPoolExecutor() as executor: for number, prime inzip(PRIMES, executor.map(is_prime, PRIMES)): print('%d is prime: %s' % (number, prime))