I've been struggling to scrape all elements from a webpage using Selenium. Despite my efforts, I can't seem to retrieve more than 6 elements out of at least 30 on the URL provided. Can you help me identify what I might be overlooking in my code?
import requests
import webbrowser
import time
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
url = 'https://www.adidas.com/us/men-shoes-new_arrivals'
res = requests.get(url, headers=headers)
page_soup = bs(res.text, "html.parser")
containers = page_soup.findAll("div", {"class": "gl-product-card-container show-variation-carousel"})
print(len(containers))
#for each container find shoe model
shoe_colors = []
for container in containers:
if container.find("div", {'class': 'gl-product-card__reviews-number'}) is not None:
shoe_model = container.div.div.img["title"]
review = container.find('div', {'class':'gl-product-card__reviews-number'})
review = int(review.text)
driver = webdriver.Chrome()
driver.get(url)
time.sleep(5)
shoe_prices = driver.find_elements_by_css_selector('.gl-price')
for price in shoe_prices:
print(price.text)
print(len(shoe_prices))