Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

default timemout for requests #110

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 15 additions & 18 deletions salesforce_bulk/salesforce_bulk.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class SalesforceBulk(object):

def __init__(self, sessionId=None, host=None, username=None, password=None,
API_version=DEFAULT_API_VERSION, sandbox=False,
security_token=None, organizationId=None, client_id=None, domain=None):
security_token=None, organizationId=None, client_id=None, domain=None, timeout=None):
if not sessionId and not username:
raise RuntimeError(
"Must supply either sessionId/instance_url or username/password")
Expand All @@ -105,6 +105,7 @@ def __init__(self, sessionId=None, host=None, username=None, password=None,
self.job_content_types = {} # dict of job_id => contentType
self.batch_statuses = {}
self.API_version = API_version
self.timeout = timeout

@staticmethod
def login_to_salesforce(username, password, sandbox=False, security_token=None,
Expand Down Expand Up @@ -204,7 +205,7 @@ def create_job(self, object_name=None, operation=None, contentType='CSV',

resp = requests.post(self.endpoint + "/job",
headers=self.headers(extra_headers),
data=doc)
data=doc, timeout=self.timeout)
self.check_status(resp)

tree = ET.fromstring(resp.content)
Expand All @@ -221,7 +222,7 @@ def check_status(self, resp):

def get_batch_list(self, job_id):
url = self.endpoint + "/job/{}/batch".format(job_id)
resp = requests.get(url, headers=self.headers())
resp = requests.get(url, headers=self.headers(), timeout=self.timeout)
self.check_status(resp)
results = self.parse_response(resp)
if isinstance(results, dict):
Expand All @@ -235,25 +236,21 @@ def get_query_batch_request(self, batch_id, job_id=None):
job_id = self.lookup_job_id(batch_id)

url = self.endpoint + "/job/{}/batch/{}/request".format(job_id, batch_id)
resp = requests.get(url, headers=self.headers())
resp = requests.get(url, headers=self.headers(), timeout=self.timeout)
self.check_status(resp)
return resp.text

def close_job(self, job_id):
doc = self.create_close_job_doc()
url = self.endpoint + "/job/%s" % job_id
resp = requests.post(url, headers=self.headers(), data=doc)
resp = requests.post(url, headers=self.headers(), data=doc, timeout=self.timeout)
self.check_status(resp)

def abort_job(self, job_id):
"""Abort a given bulk job"""
doc = self.create_abort_job_doc()
url = self.endpoint + "/job/%s" % job_id
resp = requests.post(
url,
headers=self.headers(),
data=doc
)
resp = requests.post(url, headers=self.headers(), data=doc, timeout=self.timeout)
self.check_status(resp)

def create_job_doc(self, object_name=None, operation=None,
Expand Down Expand Up @@ -315,7 +312,7 @@ def query(self, job_id, soql, contentType='CSV'):
headers = self.headers(content_type=http_content_type)

uri = self.endpoint + "/job/%s/batch" % job_id
resp = requests.post(uri, data=soql, headers=headers)
resp = requests.post(uri, data=soql, headers=headers, timeout=self.timeout)

self.check_status(resp)

Expand All @@ -338,7 +335,7 @@ def post_batch(self, job_id, data_generator):

uri = self.endpoint + "/job/%s/batch" % job_id
headers = self.headers(content_type=http_content_type)
resp = requests.post(uri, data=data_generator, headers=headers)
resp = requests.post(uri, data=data_generator, headers=headers, timeout=self.timeout)
self.check_status(resp)

result = self.parse_response(resp)
Expand All @@ -352,7 +349,7 @@ def post_mapping_file(self, job_id, mapping_data):
http_content_type = job_to_http_content_type[job_content_type]
uri = self.endpoint + "/job/%s/spec" % job_id
headers = self.headers(content_type=http_content_type)
resp = requests.post(uri, data=mapping_data, headers=headers)
resp = requests.post(uri, data=mapping_data, headers=headers, timeout=self.timeout)
self.check_status(resp)

if resp.status_code != 201:
Expand All @@ -368,7 +365,7 @@ def lookup_job_id(self, batch_id):
def job_status(self, job_id=None):
job_id = job_id
uri = urlparse.urljoin(self.endpoint + "/", 'job/{0}'.format(job_id))
response = requests.get(uri, headers=self.headers())
response = requests.get(uri, headers=self.headers(), timeout=self.timeout)
self.check_status(response)

tree = ET.fromstring(response.content)
Expand Down Expand Up @@ -413,7 +410,7 @@ def batch_status(self, batch_id=None, job_id=None, reload=False):

uri = self.endpoint + \
"/job/%s/batch/%s" % (job_id, batch_id)
resp = requests.get(uri, headers=self.headers())
resp = requests.get(uri, headers=self.headers(), timeout=self.timeout)
self.check_status(resp)

result = self.parse_response(resp)
Expand Down Expand Up @@ -454,7 +451,7 @@ def get_query_batch_result_ids(self, batch_id, job_id=None):
"job/{0}/batch/{1}/result".format(
job_id, batch_id),
)
resp = requests.get(uri, headers=self.headers())
resp = requests.get(uri, headers=self.headers(), timeout=self.timeout)
self.check_status(resp)

if resp.headers['Content-Type'] == 'application/json':
Expand Down Expand Up @@ -494,7 +491,7 @@ def get_query_batch_results(self, batch_id, result_id, job_id=None, chunk_size=2
job_id, batch_id, result_id),
)

resp = requests.get(uri, headers=self.headers(), stream=True)
resp = requests.get(uri, headers=self.headers(), stream=True, timeout=self.timeout)
self.check_status(resp)
if raw:
return resp.raw
Expand All @@ -511,7 +508,7 @@ def get_batch_results(self, batch_id, job_id=None):
job_id, batch_id),
)

resp = requests.get(uri, headers=self.headers(), stream=True)
resp = requests.get(uri, headers=self.headers(), stream=True, timeout=self.timeout)
self.check_status(resp)

iter = (x.replace(b'\0', b'') for x in resp.iter_content())
Expand Down