I have a google spreadsheet with around 3000
rows and I am trying to extract comments from this spreadsheet using the following code:
import requests
from apiclient import errors
from apiclient import discovery
from apiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
import httplib2
CLIENT_ID = "xxxxxyyyy"
CLIENT_SECRET = "xxxxxxx"
OAUTH_SCOPE = "https://www.googleapis.com/auth/drive"
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
file-id = "zzzzzz"
def retrieve_comments(service, file_id):
"""Retrieve a list of comments.
Args:
service: Drive API service instance.
file_id: ID of the file to retrieve comments for.
Returns:
List of comments.
"""
try:
comments = service.comments().list(fileId=file_id).execute()
return comments.get('items', [])
except errors.HttpError as error:
print(f'An error occurred: {error}')
return None
# ...
flow = OAuth2WebServerFlow(CLIENT_ID,CLIENT_SECRET,OAUTH_SCOPE)
flow.redirect_uri = REDIRECT_URI
authorize_url = flow.step1_get_authorize_url()
print("Go to the following link in your web browser "+ authorize_url)
code = input("Enter verfication code : ").strip()
credentials = flow.step2_exchange(code)
http = httplib2.Http()
http = credentials.authorize(http)
service = build('drive', 'v2', http=http)
comments = retrieve_comments(service, file-id)
However, the length of the list comments
is only 20
whereas the spreadsheet surely contains more comments. Could someone explain which parameter I would need to tweak to retrieve all the comments in the spreadsheet? Thanks!