Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

569

570

571

572

573

574

575

576

577

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

593

594

595

596

597

598

599

600

601

602

603

604

605

606

607

608

609

610

611

612

613

614

615

616

617

618

619

620

621

622

623

624

625

626

627

628

629

630

631

632

633

634

635

636

637

638

639

640

641

642

643

644

645

646

647

648

649

650

651

652

653

654

655

656

657

658

659

660

661

662

663

664

665

666

667

668

669

670

671

672

673

674

675

676

677

678

679

680

681

682

683

684

685

686

687

688

689

690

691

692

693

694

695

696

697

698

699

700

701

702

703

704

705

706

707

708

709

710

711

712

713

714

715

716

717

718

719

720

721

722

723

724

725

726

727

728

729

730

731

732

733

734

735

736

737

738

739

740

741

742

743

744

745

746

747

748

749

750

751

752

753

754

755

756

757

758

759

760

761

762

763

764

765

766

767

768

769

770

771

772

773

774

775

776

777

778

779

780

781

782

783

784

785

786

787

788

789

790

791

792

793

794

795

796

797

798

799

800

801

802

803

804

805

806

807

808

809

810

811

812

813

814

815

816

817

818

819

820

821

822

823

824

825

826

827

828

829

830

831

832

833

834

835

836

837

838

839

840

841

842

843

844

845

846

847

848

849

850

851

852

853

854

855

856

857

858

859

860

861

862

863

864

865

866

867

868

869

870

871

872

873

874

875

876

877

878

879

880

881

882

883

884

885

886

887

888

889

890

891

892

893

894

895

896

897

898

899

900

901

902

903

904

905

906

907

908

909

910

911

912

913

914

915

916

917

918

919

920

921

922

923

924

925

926

927

928

929

930

931

932

933

934

935

936

937

938

939

940

941

942

943

944

945

946

947

948

949

950

951

952

953

954

955

956

957

958

959

960

961

962

963

964

965

966

967

968

969

970

971

972

973

974

975

976

977

978

979

980

981

982

983

984

985

986

987

988

989

990

991

992

993

994

995

996

997

998

999

1000

1001

1002

1003

1004

1005

1006

1007

1008

1009

1010

1011

1012

1013

1014

1015

1016

1017

1018

1019

1020

1021

1022

1023

1024

1025

1026

1027

1028

1029

1030

1031

1032

1033

1034

1035

1036

1037

1038

1039

1040

1041

1042

1043

1044

1045

1046

1047

1048

1049

1050

1051

1052

1053

1054

1055

1056

1057

1058

1059

1060

1061

1062

1063

1064

1065

1066

1067

1068

1069

1070

1071

1072

1073

1074

1075

1076

1077

1078

1079

1080

1081

1082

1083

1084

1085

1086

1087

1088

1089

1090

1091

1092

1093

1094

1095

1096

1097

1098

1099

1100

1101

1102

1103

1104

1105

1106

1107

1108

1109

1110

1111

1112

1113

1114

1115

1116

1117

1118

1119

1120

1121

1122

1123

1124

1125

1126

1127

1128

1129

1130

1131

1132

1133

1134

1135

1136

1137

1138

1139

1140

1141

1142

1143

1144

1145

1146

1147

1148

1149

1150

1151

1152

1153

1154

1155

1156

1157

1158

1159

1160

1161

1162

1163

1164

1165

1166

1167

1168

1169

1170

1171

1172

1173

1174

1175

1176

1177

1178

1179

1180

1181

1182

1183

1184

1185

1186

1187

1188

1189

1190

1191

1192

1193

1194

1195

1196

1197

1198

1199

1200

1201

1202

1203

1204

1205

1206

1207

1208

1209

1210

1211

1212

1213

1214

1215

1216

1217

1218

1219

1220

1221

1222

1223

1224

1225

1226

1227

1228

1229

1230

1231

1232

1233

1234

1235

1236

1237

1238

1239

1240

1241

1242

1243

1244

1245

1246

1247

1248

1249

1250

1251

1252

1253

1254

1255

1256

1257

1258

1259

1260

1261

1262

1263

1264

1265

1266

1267

1268

1269

1270

1271

1272

1273

1274

1275

1276

1277

1278

1279

1280

1281

1282

1283

1284

1285

1286

1287

1288

1289

1290

1291

1292

1293

1294

1295

1296

1297

1298

1299

1300

1301

1302

1303

1304

1305

1306

1307

1308

1309

1310

1311

1312

1313

1314

1315

1316

1317

1318

1319

1320

1321

1322

1323

1324

1325

1326

1327

1328

1329

1330

1331

1332

1333

1334

1335

1336

1337

1338

1339

1340

1341

1342

1343

1344

1345

1346

1347

1348

1349

1350

1351

1352

1353

1354

1355

1356

1357

1358

1359

1360

1361

1362

1363

1364

1365

1366

1367

1368

1369

1370

1371

1372

1373

1374

1375

1376

1377

1378

1379

1380

1381

1382

1383

1384

1385

1386

1387

1388

1389

1390

1391

1392

1393

1394

1395

1396

1397

1398

1399

1400

1401

1402

1403

1404

1405

1406

1407

1408

1409

1410

1411

1412

1413

1414

1415

1416

1417

1418

1419

1420

1421

1422

1423

1424

1425

1426

1427

1428

1429

1430

1431

1432

1433

1434

1435

1436

1437

1438

1439

1440

1441

1442

1443

1444

1445

1446

1447

1448

1449

1450

1451

1452

1453

1454

1455

1456

1457

1458

1459

1460

1461

1462

1463

1464

1465

1466

1467

1468

1469

1470

1471

1472

1473

1474

1475

1476

1477

1478

1479

1480

1481

1482

1483

1484

1485

1486

1487

1488

1489

1490

1491

1492

1493

1494

1495

1496

1497

1498

1499

1500

1501

1502

1503

1504

1505

1506

1507

1508

1509

1510

1511

1512

1513

1514

1515

1516

1517

1518

1519

1520

1521

1522

1523

1524

1525

1526

1527

1528

1529

1530

1531

1532

1533

1534

1535

1536

1537

1538

1539

1540

1541

1542

1543

1544

1545

1546

1547

1548

1549

1550

1551

1552

1553

1554

1555

1556

1557

1558

1559

1560

1561

1562

1563

1564

1565

1566

1567

1568

1569

1570

1571

1572

1573

1574

1575

1576

1577

1578

1579

1580

1581

1582

1583

1584

1585

1586

1587

1588

1589

1590

1591

1592

1593

1594

1595

1596

1597

1598

1599

1600

1601

1602

1603

1604

1605

1606

1607

1608

1609

1610

1611

1612

1613

1614

1615

1616

1617

1618

1619

1620

1621

1622

1623

1624

1625

1626

1627

1628

1629

1630

1631

1632

1633

1634

1635

1636

1637

1638

1639

1640

1641

1642

1643

1644

1645

1646

1647

1648

1649

1650

1651

1652

1653

1654

1655

1656

1657

1658

1659

1660

1661

1662

1663

1664

1665

1666

1667

1668

1669

1670

1671

1672

1673

1674

1675

1676

1677

1678

1679

1680

1681

1682

1683

1684

1685

1686

1687

1688

1689

1690

1691

1692

1693

1694

1695

1696

1697

1698

1699

1700

1701

1702

1703

1704

1705

1706

1707

1708

1709

1710

1711

1712

1713

1714

1715

1716

1717

1718

1719

1720

1721

1722

1723

1724

1725

1726

1727

1728

1729

1730

1731

1732

1733

1734

1735

1736

1737

1738

1739

1740

1741

1742

1743

1744

1745

1746

1747

1748

1749

1750

1751

1752

1753

1754

1755

1756

1757

1758

1759

1760

1761

1762

1763

1764

1765

1766

1767

1768

1769

1770

1771

1772

1773

1774

1775

1776

1777

1778

1779

1780

1781

1782

1783

1784

1785

1786

1787

1788

1789

1790

1791

1792

1793

1794

1795

1796

1797

1798

1799

1800

1801

1802

1803

1804

1805

1806

1807

1808

1809

1810

1811

1812

1813

1814

1815

1816

1817

1818

1819

1820

1821

1822

1823

1824

1825

1826

1827

1828

1829

1830

1831

1832

1833

1834

1835

1836

1837

1838

1839

1840

1841

1842

1843

1844

1845

1846

1847

1848

1849

1850

1851

1852

1853

1854

1855

1856

1857

1858

1859

1860

1861

1862

1863

1864

1865

1866

1867

1868

1869

1870

1871

1872

1873

1874

1875

1876

1877

1878

1879

1880

1881

1882

1883

1884

1885

1886

1887

1888

1889

1890

1891

1892

1893

1894

1895

1896

1897

1898

1899

1900

1901

1902

1903

1904

1905

1906

1907

1908

1909

1910

1911

1912

1913

1914

1915

1916

1917

1918

1919

1920

1921

1922

1923

1924

1925

1926

1927

1928

1929

1930

1931

1932

1933

1934

1935

1936

1937

1938

1939

1940

1941

1942

1943

1944

1945

1946

1947

1948

1949

1950

1951

1952

1953

1954

1955

1956

1957

1958

1959

1960

1961

1962

1963

1964

1965

1966

1967

1968

1969

1970

1971

1972

1973

1974

1975

1976

1977

1978

1979

1980

1981

1982

1983

1984

1985

1986

1987

1988

1989

1990

1991

1992

1993

1994

1995

1996

1997

1998

1999

2000

2001

2002

2003

2004

2005

2006

2007

2008

2009

2010

2011

2012

2013

2014

2015

2016

2017

2018

2019

2020

2021

2022

2023

2024

2025

2026

2027

2028

2029

2030

2031

2032

2033

2034

2035

2036

2037

2038

2039

2040

2041

2042

2043

2044

2045

2046

2047

2048

2049

2050

2051

2052

2053

2054

2055

2056

2057

2058

2059

2060

2061

2062

2063

2064

2065

2066

2067

2068

2069

2070

2071

2072

2073

2074

2075

2076

2077

2078

2079

2080

2081

2082

2083

2084

2085

2086

2087

2088

2089

2090

2091

2092

2093

2094

2095

2096

2097

2098

2099

2100

2101

2102

2103

2104

2105

2106

2107

2108

2109

2110

2111

2112

2113

2114

2115

2116

2117

2118

2119

2120

2121

2122

2123

2124

2125

2126

2127

2128

2129

2130

2131

2132

2133

2134

2135

2136

2137

2138

2139

2140

2141

2142

2143

2144

2145

2146

2147

2148

2149

2150

2151

2152

2153

2154

2155

2156

2157

2158

2159

2160

2161

2162

2163

2164

2165

2166

2167

2168

2169

2170

2171

2172

2173

2174

2175

2176

2177

2178

2179

2180

2181

2182

2183

2184

2185

2186

2187

2188

2189

2190

2191

2192

2193

2194

2195

2196

2197

2198

2199

2200

2201

2202

2203

2204

2205

2206

2207

2208

2209

2210

2211

2212

2213

2214

2215

2216

2217

2218

2219

2220

2221

2222

2223

2224

2225

2226

2227

2228

2229

2230

2231

2232

2233

2234

2235

2236

2237

2238

2239

2240

2241

2242

2243

2244

2245

2246

2247

2248

2249

2250

2251

2252

2253

2254

2255

2256

2257

2258

2259

2260

2261

2262

2263

2264

2265

2266

2267

2268

2269

2270

2271

2272

2273

2274

2275

2276

2277

2278

2279

2280

2281

2282

2283

2284

2285

2286

2287

2288

2289

2290

2291

2292

2293

2294

2295

2296

2297

2298

2299

2300

2301

2302

2303

2304

2305

2306

2307

2308

2309

2310

2311

2312

2313

2314

2315

2316

2317

2318

2319

2320

2321

2322

2323

2324

2325

2326

2327

2328

2329

2330

2331

2332

2333

2334

2335

2336

2337

2338

2339

2340

2341

2342

2343

2344

2345

2346

2347

2348

2349

2350

2351

2352

2353

2354

2355

2356

2357

2358

2359

2360

2361

2362

2363

2364

2365

2366

2367

2368

2369

2370

2371

2372

2373

2374

2375

2376

2377

2378

2379

2380

2381

2382

2383

2384

2385

2386

2387

2388

2389

2390

2391

2392

2393

2394

2395

2396

2397

2398

2399

2400

2401

2402

2403

2404

2405

2406

2407

2408

2409

2410

2411

2412

2413

2414

2415

2416

2417

2418

2419

2420

2421

2422

2423

2424

2425

2426

2427

2428

2429

2430

2431

2432

2433

2434

2435

2436

2437

2438

2439

2440

2441

2442

2443

2444

2445

2446

2447

2448

2449

2450

2451

2452

2453

2454

2455

2456

2457

2458

2459

2460

2461

2462

2463

2464

2465

2466

2467

2468

2469

2470

2471

2472

2473

2474

2475

2476

2477

2478

2479

2480

2481

2482

2483

2484

2485

2486

2487

2488

2489

2490

2491

2492

2493

2494

2495

2496

2497

2498

2499

2500

2501

2502

2503

2504

2505

2506

2507

2508

2509

2510

2511

2512

2513

2514

2515

2516

2517

2518

2519

2520

2521

2522

2523

2524

2525

2526

2527

2528

2529

2530

2531

2532

2533

2534

2535

2536

2537

2538

2539

2540

2541

2542

2543

2544

2545

2546

2547

2548

2549

2550

2551

2552

2553

2554

2555

2556

2557

2558

2559

2560

2561

2562

2563

2564

2565

2566

2567

2568

2569

2570

2571

2572

2573

2574

2575

2576

2577

2578

2579

2580

2581

2582

2583

2584

2585

2586

2587

2588

2589

2590

2591

2592

2593

2594

2595

2596

2597

2598

2599

2600

2601

2602

2603

2604

2605

2606

2607

2608

2609

2610

2611

2612

2613

2614

2615

2616

2617

2618

2619

2620

2621

2622

2623

2624

2625

2626

2627

2628

2629

2630

2631

2632

2633

2634

2635

2636

2637

2638

2639

2640

2641

2642

2643

2644

2645

2646

2647

2648

2649

2650

2651

2652

2653

2654

2655

2656

2657

2658

2659

2660

2661

2662

2663

2664

2665

2666

2667

2668

2669

2670

2671

2672

2673

2674

2675

2676

2677

2678

2679

2680

2681

2682

2683

2684

2685

2686

2687

2688

2689

2690

2691

2692

2693

2694

2695

2696

2697

2698

2699

2700

2701

2702

2703

2704

2705

2706

2707

2708

2709

2710

2711

2712

2713

2714

2715

2716

2717

2718

2719

2720

2721

2722

2723

2724

2725

2726

2727

2728

2729

2730

2731

2732

2733

2734

2735

2736

2737

2738

2739

2740

2741

2742

2743

2744

2745

2746

2747

2748

2749

2750

2751

2752

2753

2754

2755

2756

2757

2758

2759

2760

2761

2762

2763

2764

2765

2766

2767

2768

2769

2770

2771

2772

2773

2774

2775

2776

2777

2778

2779

2780

2781

2782

2783

2784

2785

2786

2787

2788

2789

2790

2791

2792

2793

2794

2795

2796

2797

2798

2799

2800

2801

2802

2803

2804

2805

2806

2807

2808

2809

2810

2811

2812

2813

2814

2815

2816

2817

2818

2819

2820

2821

2822

2823

2824

2825

2826

2827

2828

2829

2830

2831

2832

2833

2834

2835

2836

2837

2838

2839

2840

2841

2842

2843

2844

2845

2846

2847

2848

2849

2850

2851

2852

2853

2854

2855

2856

2857

2858

2859

2860

2861

2862

2863

2864

2865

2866

2867

2868

2869

2870

2871

2872

2873

2874

2875

2876

2877

2878

2879

2880

2881

2882

2883

2884

2885

2886

2887

2888

2889

2890

2891

2892

2893

2894

2895

2896

2897

2898

2899

2900

2901

2902

2903

2904

2905

2906

2907

2908

2909

2910

2911

2912

2913

2914

2915

2916

2917

2918

2919

2920

2921

2922

2923

2924

2925

2926

2927

2928

2929

2930

2931

2932

2933

2934

2935

2936

2937

2938

2939

2940

2941

2942

2943

2944

2945

2946

2947

2948

2949

2950

2951

2952

2953

2954

2955

2956

2957

2958

2959

2960

2961

2962

2963

2964

2965

2966

2967

2968

2969

2970

2971

2972

2973

2974

2975

2976

2977

2978

2979

2980

2981

2982

2983

2984

2985

2986

2987

2988

2989

2990

2991

2992

2993

2994

2995

2996

2997

2998

2999

3000

3001

3002

3003

3004

3005

3006

3007

3008

3009

3010

3011

3012

3013

3014

3015

3016

3017

3018

3019

3020

3021

3022

3023

3024

3025

3026

3027

3028

3029

3030

3031

3032

3033

3034

3035

3036

3037

3038

3039

3040

3041

3042

3043

3044

3045

3046

3047

3048

3049

3050

3051

3052

3053

3054

3055

3056

3057

3058

3059

3060

3061

3062

3063

3064

3065

3066

3067

3068

3069

3070

3071

3072

3073

3074

3075

3076

3077

3078

3079

3080

3081

3082

3083

3084

3085

3086

3087

3088

3089

3090

3091

3092

3093

3094

3095

3096

3097

3098

3099

3100

3101

3102

3103

3104

3105

3106

3107

3108

3109

3110

3111

3112

3113

3114

3115

3116

3117

3118

3119

3120

3121

3122

3123

3124

3125

3126

3127

3128

3129

3130

3131

3132

3133

3134

3135

3136

3137

3138

3139

3140

3141

3142

3143

3144

3145

3146

3147

3148

3149

3150

3151

3152

3153

3154

3155

3156

3157

3158

3159

3160

3161

3162

3163

3164

3165

3166

3167

3168

3169

3170

3171

3172

3173

3174

3175

3176

3177

3178

3179

3180

3181

3182

3183

3184

3185

3186

3187

3188

3189

3190

3191

3192

3193

3194

3195

3196

3197

3198

3199

3200

3201

3202

3203

3204

3205

3206

3207

3208

3209

3210

3211

3212

3213

3214

3215

3216

3217

3218

3219

3220

3221

3222

3223

3224

3225

3226

3227

3228

3229

3230

3231

3232

3233

3234

3235

3236

3237

3238

3239

3240

3241

3242

3243

3244

3245

3246

3247

3248

3249

3250

3251

3252

3253

3254

3255

3256

3257

3258

3259

3260

3261

3262

3263

3264

3265

3266

3267

3268

3269

3270

3271

3272

3273

3274

3275

3276

3277

3278

3279

3280

3281

3282

3283

3284

3285

3286

3287

3288

3289

3290

3291

3292

3293

3294

3295

3296

3297

3298

3299

3300

3301

3302

3303

3304

3305

3306

3307

3308

3309

3310

3311

3312

3313

3314

3315

3316

3317

3318

3319

3320

3321

3322

3323

3324

3325

3326

3327

3328

3329

3330

3331

3332

3333

3334

3335

3336

3337

3338

3339

3340

3341

3342

3343

3344

3345

3346

3347

3348

3349

3350

3351

3352

3353

3354

3355

3356

3357

3358

3359

3360

3361

3362

3363

3364

3365

3366

3367

3368

3369

3370

3371

3372

3373

3374

3375

3376

3377

3378

3379

3380

3381

3382

3383

3384

3385

3386

3387

3388

3389

3390

3391

3392

3393

3394

3395

3396

3397

3398

3399

3400

3401

3402

3403

3404

3405

3406

3407

3408

3409

3410

3411

3412

3413

3414

3415

3416

3417

3418

3419

3420

3421

3422

3423

3424

3425

3426

3427

3428

3429

3430

3431

3432

3433

3434

3435

3436

3437

3438

3439

3440

3441

3442

3443

3444

3445

3446

3447

3448

3449

3450

3451

3452

3453

3454

3455

3456

3457

3458

3459

3460

3461

3462

3463

3464

3465

3466

3467

3468

3469

3470

3471

3472

3473

3474

3475

3476

3477

3478

3479

3480

3481

3482

3483

3484

3485

3486

3487

3488

3489

3490

3491

3492

3493

3494

3495

3496

3497

3498

3499

3500

3501

3502

3503

3504

3505

3506

3507

3508

3509

3510

3511

3512

3513

3514

3515

3516

3517

3518

3519

3520

3521

3522

3523

3524

3525

3526

3527

3528

3529

3530

3531

3532

3533

3534

3535

3536

3537

3538

3539

3540

3541

3542

3543

3544

3545

3546

3547

3548

3549

3550

3551

3552

3553

3554

3555

3556

3557

3558

3559

3560

3561

3562

3563

3564

3565

3566

3567

3568

3569

3570

3571

3572

3573

3574

3575

3576

3577

3578

3579

3580

3581

3582

3583

3584

3585

3586

3587

3588

3589

3590

3591

3592

3593

3594

3595

3596

3597

3598

3599

3600

3601

3602

3603

3604

3605

3606

3607

3608

3609

3610

3611

3612

3613

3614

3615

3616

3617

3618

3619

3620

3621

3622

3623

3624

3625

3626

3627

3628

3629

3630

3631

3632

#!/usr/bin/env python 

# -*- coding: utf-8 -*- 

 

from __future__ import absolute_import 

 

import datetime 

import netrc 

import os 

import re 

import socket 

import time 

import email.utils 

import xml.etree.ElementTree 

import random 

import math 

 

from .utils import * 

 

 

class InfoExtractor(object): 

    """Information Extractor class. 

 

    Information extractors are the classes that, given a URL, extract 

    information about the video (or videos) the URL refers to. This 

    information includes the real video URL, the video title, author and 

    others. The information is stored in a dictionary which is then  

    passed to the FileDownloader. The FileDownloader processes this 

    information possibly downloading the video to the file system, among 

    other possible outcomes. 

 

    The dictionaries must include the following fields: 

 

    id:             Video identifier. 

    url:            Final video URL. 

    uploader:       Nickname of the video uploader, unescaped. 

    upload_date:    Video upload date (YYYYMMDD). 

    title:          Video title, unescaped. 

    ext:            Video filename extension. 

 

    The following fields are optional: 

 

    format:         The video format, defaults to ext (used for --get-format) 

    thumbnail:      Full URL to a video thumbnail image. 

    description:    One-line video description. 

    player_url:     SWF Player URL (used for rtmpdump). 

    subtitles:      The .srt file contents. 

    urlhandle:      [internal] The urlHandle to be used to download the file, 

                    like returned by urllib.request.urlopen 

 

    The fields should all be Unicode strings. 

 

    Subclasses of this one should re-define the _real_initialize() and 

    _real_extract() methods and define a _VALID_URL regexp. 

    Probably, they should also be added to the list of extractors. 

 

    _real_extract() must return a *list* of information dictionaries as 

    described above. 

 

    Finally, the _WORKING attribute should be set to False for broken IEs 

    in order to warn the users and skip the tests. 

    """ 

 

    _ready = False 

    _downloader = None 

    _WORKING = True 

 

    def __init__(self, downloader=None): 

        """Constructor. Receives an optional downloader.""" 

        self._ready = False 

        self.set_downloader(downloader) 

 

    def suitable(self, url): 

        """Receives a URL and returns True if suitable for this IE.""" 

        return re.match(self._VALID_URL, url) is not None 

 

    def working(self): 

        """Getter method for _WORKING.""" 

        return self._WORKING 

 

    def initialize(self): 

        """Initializes an instance (authentication, etc).""" 

        if not self._ready: 

            self._real_initialize() 

            self._ready = True 

 

    def extract(self, url): 

        """Extracts URL information and returns it in list of dicts.""" 

        self.initialize() 

        return self._real_extract(url) 

 

    def set_downloader(self, downloader): 

        """Sets the downloader for this IE.""" 

        self._downloader = downloader 

 

    def _real_initialize(self): 

        """Real initialization process. Redefine in subclasses.""" 

        pass 

 

    def _real_extract(self, url): 

        """Real extraction process. Redefine in subclasses.""" 

        pass 

 

 

class YoutubeIE(InfoExtractor): 

    """Information extractor for youtube.com.""" 

 

    _VALID_URL = r"""^ 

                     ( 

                         (?:https?://)?                                       # http(s):// (optional) 

                         (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| 

                            tube\.majestyc\.net/)                             # the various hostnames, with wildcard subdomains 

                         (?:.*?\#/)?                                          # handle anchor (#/) redirect urls 

                         (?!view_play_list|my_playlists|artist|playlist)      # ignore playlist URLs 

                         (?:                                                  # the various things that can precede the ID: 

                             (?:(?:v|embed|e)/)                               # v/ or embed/ or e/ 

                             |(?:                                             # or the v= param in all its forms 

                                 (?:watch(?:_popup)?(?:\.php)?)?              # preceding watch(_popup|.php) or nothing (like /?v=xxxx) 

                                 (?:\?|\#!?)                                  # the params delimiter ? or # or #! 

                                 (?:.+&)?                                     # any other preceding param (like /?s=tuff&v=xxxx) 

                                 v= 

                             ) 

                         )?                                                   # optional -> youtube.com/xxxx is OK 

                     )?                                                       # all until now is optional -> you can pass the naked ID 

                     ([0-9A-Za-z_-]+)                                         # here is it! the YouTube video ID 

                     (?(1).+)?                                                # if we found the ID, everything can follow 

                     $""" 

    _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' 

    _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en' 

    _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' 

    _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' 

    _NETRC_MACHINE = 'youtube' 

    # Listed in order of quality 

    _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] 

    _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] 

    _video_extensions = { 

        '13': '3gp', 

        '17': 'mp4', 

        '18': 'mp4', 

        '22': 'mp4', 

        '37': 'mp4', 

        '38': 'video', # You actually don't know if this will be MOV, AVI or whatever 

        '43': 'webm', 

        '44': 'webm', 

        '45': 'webm', 

        '46': 'webm', 

    } 

    _video_dimensions = { 

        '5': '240x400', 

        '6': '???', 

        '13': '???', 

        '17': '144x176', 

        '18': '360x640', 

        '22': '720x1280', 

        '34': '360x640', 

        '35': '480x854', 

        '37': '1080x1920', 

        '38': '3072x4096', 

        '43': '360x640', 

        '44': '480x854', 

        '45': '720x1280', 

        '46': '1080x1920', 

    } 

    IE_NAME = u'youtube' 

 

    def suitable(self, url): 

        """Receives a URL and returns True if suitable for this IE.""" 

        return re.match(self._VALID_URL, url, re.VERBOSE) is not None 

 

    def report_lang(self): 

        """Report attempt to set language.""" 

        self._downloader.to_screen(u'[youtube] Setting language') 

 

    def report_login(self): 

        """Report attempt to log in.""" 

        self._downloader.to_screen(u'[youtube] Logging in') 

 

    def report_age_confirmation(self): 

        """Report attempt to confirm age.""" 

        self._downloader.to_screen(u'[youtube] Confirming age') 

 

    def report_video_webpage_download(self, video_id): 

        """Report attempt to download video webpage.""" 

        self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id) 

 

    def report_video_info_webpage_download(self, video_id): 

        """Report attempt to download video info webpage.""" 

        self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id) 

 

    def report_video_subtitles_download(self, video_id): 

        """Report attempt to download video info webpage.""" 

        self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id) 

 

    def report_information_extraction(self, video_id): 

        """Report attempt to extract video information.""" 

        self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id) 

 

    def report_unavailable_format(self, video_id, format): 

        """Report extracted video URL.""" 

        self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format)) 

 

    def report_rtmp_download(self): 

        """Indicate the download will use the RTMP protocol.""" 

        self._downloader.to_screen(u'[youtube] RTMP download detected') 

 

    def _closed_captions_xml_to_srt(self, xml_string): 

        srt = '' 

        texts = re.findall(r'<text start="([\d\.]+)"( dur="([\d\.]+)")?>([^<]+)</text>', xml_string, re.MULTILINE) 

        # TODO parse xml instead of regex 

        for n, (start, dur_tag, dur, caption) in enumerate(texts): 

            if not dur: dur = '4' 

            start = float(start) 

            end = start + float(dur) 

            start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000) 

            end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) 

            caption = unescapeHTML(caption) 

            caption = unescapeHTML(caption) # double cycle, intentional 

            srt += str(n+1) + '\n' 

            srt += start + ' --> ' + end + '\n' 

            srt += caption + '\n\n' 

        return srt 

 

    def _print_formats(self, formats): 

        print('Available formats:') 

        for x in formats: 

            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???'))) 

 

    def _real_initialize(self): 

        if self._downloader is None: 

            return 

 

        username = None 

        password = None 

        downloader_params = self._downloader.params 

 

        # Attempt to use provided username and password or .netrc data 

        if downloader_params.get('username', None) is not None: 

            username = downloader_params['username'] 

            password = downloader_params['password'] 

        elif downloader_params.get('usenetrc', False): 

            try: 

                info = netrc.netrc().authenticators(self._NETRC_MACHINE) 

                if info is not None: 

                    username = info[0] 

                    password = info[2] 

                else: 

                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) 

            except (IOError, netrc.NetrcParseError) as err: 

                self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) 

                return 

 

        # Set language 

        request = compat_urllib_request.Request(self._LANG_URL) 

        try: 

            self.report_lang() 

            compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err)) 

            return 

 

        # No authentication to be performed 

        if username is None: 

            return 

 

        # Log in 

        login_form = { 

                'current_form': 'loginForm', 

                'next':     '/', 

                'action_login': 'Log In', 

                'username': username, 

                'password': password, 

                } 

        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) 

        try: 

            self.report_login() 

            login_results = compat_urllib_request.urlopen(request).read().decode('utf-8') 

            if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None: 

                self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') 

                return 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) 

            return 

 

        # Confirm age 

        age_form = { 

                'next_url':     '/', 

                'action_confirm':   'Confirm', 

                } 

        request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form)) 

        try: 

            self.report_age_confirmation() 

            age_results = compat_urllib_request.urlopen(request).read().decode('utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) 

            return 

 

    def _real_extract(self, url): 

        # Extract original video URL from URL with redirection, like age verification, using next_url parameter 

        mobj = re.search(self._NEXT_URL_RE, url) 

        if mobj: 

            url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/') 

 

        # Extract video id from URL 

        mobj = re.match(self._VALID_URL, url, re.VERBOSE) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

        video_id = mobj.group(2) 

 

        # Get video webpage 

        self.report_video_webpage_download(video_id) 

        request = compat_urllib_request.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id) 

        try: 

            video_webpage_bytes = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) 

            return 

 

        video_webpage = video_webpage_bytes.decode('utf-8', 'ignore') 

 

        # Attempt to extract SWF player URL 

        mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) 

        if mobj is not None: 

            player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) 

        else: 

            player_url = None 

 

        # Get video info 

        self.report_video_info_webpage_download(video_id) 

        for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: 

            video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' 

                    % (video_id, el_type)) 

            request = compat_urllib_request.Request(video_info_url) 

            try: 

                video_info_webpage_bytes = compat_urllib_request.urlopen(request).read() 

                video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore') 

                video_info = compat_parse_qs(video_info_webpage) 

                if 'token' in video_info: 

                    break 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) 

                return 

        if 'token' not in video_info: 

            if 'reason' in video_info: 

                self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0]) 

            else: 

                self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') 

            return 

 

        # Check for "rental" videos 

        if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: 

            self._downloader.trouble(u'ERROR: "rental" videos not supported') 

            return 

 

        # Start extracting information 

        self.report_information_extraction(video_id) 

 

        # uploader 

        if 'author' not in video_info: 

            self._downloader.trouble(u'ERROR: unable to extract uploader nickname') 

            return 

        video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) 

 

        # title 

        if 'title' not in video_info: 

            self._downloader.trouble(u'ERROR: unable to extract video title') 

            return 

        video_title = compat_urllib_parse.unquote_plus(video_info['title'][0]) 

 

        # thumbnail image 

        if 'thumbnail_url' not in video_info: 

            self._downloader.trouble(u'WARNING: unable to extract video thumbnail') 

            video_thumbnail = '' 

        else:   # don't panic if we can't find it 

            video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0]) 

 

        # upload date 

        upload_date = None 

        mobj = re.search(r'id="eow-date.*?>(.*?)</span>', video_webpage, re.DOTALL) 

        if mobj is not None: 

            upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) 

            format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y'] 

            for expression in format_expressions: 

                try: 

                    upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d') 

                except: 

                    pass 

 

        # description 

        video_description = get_element_by_id("eow-description", video_webpage) 

        if video_description: 

            video_description = clean_html(video_description) 

        else: 

            video_description = '' 

 

        # closed captions 

        video_subtitles = None 

        if self._downloader.params.get('writesubtitles', False): 

            try: 

                self.report_video_subtitles_download(video_id) 

                request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) 

                try: 

                    srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8') 

                except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                    raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) 

                srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) 

                srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) 

                if not srt_lang_list: 

                    raise Trouble(u'WARNING: video has no closed captions') 

                if self._downloader.params.get('subtitleslang', False): 

                    srt_lang = self._downloader.params.get('subtitleslang') 

                elif 'en' in srt_lang_list: 

                    srt_lang = 'en' 

                else: 

                    srt_lang = srt_lang_list.keys()[0] 

                if not srt_lang in srt_lang_list: 

                    raise Trouble(u'WARNING: no closed captions found in the specified language') 

                request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) 

                try: 

                    srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8') 

                except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                    raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) 

                if not srt_xml: 

                    raise Trouble(u'WARNING: unable to download video subtitles') 

                video_subtitles = self._closed_captions_xml_to_srt(srt_xml) 

            except Trouble as trouble: 

                self._downloader.trouble(str(trouble)) 

 

        if 'length_seconds' not in video_info: 

            self._downloader.trouble(u'WARNING: unable to extract video duration') 

            video_duration = '' 

        else: 

            video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]) 

 

        # token 

        video_token = compat_urllib_parse.unquote_plus(video_info['token'][0]) 

 

        # Decide which formats to download 

        req_format = self._downloader.params.get('format', None) 

 

        if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): 

            self.report_rtmp_download() 

            video_url_list = [(None, video_info['conn'][0])] 

        elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: 

            url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') 

            url_data = [compat_parse_qs(uds) for uds in url_data_strs] 

            url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data) 

            url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data) 

 

            format_limit = self._downloader.params.get('format_limit', None) 

            available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats 

            if format_limit is not None and format_limit in available_formats: 

                format_list = available_formats[available_formats.index(format_limit):] 

            else: 

                format_list = available_formats 

            existing_formats = [x for x in format_list if x in url_map] 

            if len(existing_formats) == 0: 

                self._downloader.trouble(u'ERROR: no known formats available for video') 

                return 

            if self._downloader.params.get('listformats', None): 

                self._print_formats(existing_formats) 

                return 

            if req_format is None or req_format == 'best': 

                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality 

            elif req_format == 'worst': 

                video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality 

            elif req_format in ('-1', 'all'): 

                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats 

            else: 

                # Specific formats. We pick the first in a slash-delimeted sequence. 

                # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. 

                req_formats = req_format.split('/') 

                video_url_list = None 

                for rf in req_formats: 

                    if rf in url_map: 

                        video_url_list = [(rf, url_map[rf])] 

                        break 

                if video_url_list is None: 

                    self._downloader.trouble(u'ERROR: requested format not available') 

                    return 

        else: 

            self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info') 

            return 

 

        results = [] 

        for format_param, video_real_url in video_url_list: 

            # Extension 

            video_extension = self._video_extensions.get(format_param, 'flv') 

 

            video_format = '{0} - {1}'.format(format_param if format_param else video_extension, 

                                              self._video_dimensions.get(format_param, '???')) 

 

            results.append({ 

                'id':       video_id, 

                'url':      video_real_url, 

                'uploader': video_uploader, 

                'upload_date':  upload_date, 

                'title':    video_title, 

                'ext':      video_extension, 

                'format':   video_format, 

                'thumbnail':    video_thumbnail, 

                'description':  video_description, 

                'player_url':   player_url, 

                'subtitles':    video_subtitles, 

                'duration':     video_duration 

            }) 

        return results 

 

 

class MetacafeIE(InfoExtractor): 

    """Information Extractor for metacafe.com.""" 

 

    _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*' 

    _DISCLAIMER = 'http://www.metacafe.com/family_filter/' 

    _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' 

    IE_NAME = u'metacafe' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_disclaimer(self): 

        """Report disclaimer retrieval.""" 

        self._downloader.to_screen(u'[metacafe] Retrieving disclaimer') 

 

    def report_age_confirmation(self): 

        """Report attempt to confirm age.""" 

        self._downloader.to_screen(u'[metacafe] Confirming age') 

 

    def report_download_webpage(self, video_id): 

        """Report webpage download.""" 

        self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id) 

 

    def _real_initialize(self): 

        # Retrieve disclaimer 

        request = compat_urllib_request.Request(self._DISCLAIMER) 

        try: 

            self.report_disclaimer() 

            disclaimer = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err)) 

            return 

 

        # Confirm age 

        disclaimer_form = { 

            'filters': '0', 

            'submit': "Continue - I'm over 18", 

            } 

        request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form)) 

        try: 

            self.report_age_confirmation() 

            disclaimer = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) 

            return 

 

    def _real_extract(self, url): 

        # Extract id and simplified title from URL 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

 

        video_id = mobj.group(1) 

 

        # Check if video comes from YouTube 

        mobj2 = re.match(r'^yt-(.*)$', video_id) 

        if mobj2 is not None: 

            self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)]) 

            return 

 

        # Retrieve video webpage to extract further information 

        request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id) 

        try: 

            self.report_download_webpage(video_id) 

            webpage = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) 

            return 

 

        # Extract URL, uploader and title from webpage 

        self.report_extraction(video_id) 

        mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) 

        if mobj is not None: 

            mediaURL = compat_urllib_parse.unquote(mobj.group(1)) 

            video_extension = mediaURL[-3:] 

 

            # Extract gdaKey if available 

            mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) 

            if mobj is None: 

                video_url = mediaURL 

            else: 

                gdaKey = mobj.group(1) 

                video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) 

        else: 

            mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) 

            if mobj is None: 

                self._downloader.trouble(u'ERROR: unable to extract media URL') 

                return 

            vardict = compat_parse_qs(mobj.group(1)) 

            if 'mediaData' not in vardict: 

                self._downloader.trouble(u'ERROR: unable to extract media URL') 

                return 

            mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) 

            if mobj is None: 

                self._downloader.trouble(u'ERROR: unable to extract media URL') 

                return 

            mediaURL = mobj.group(1).replace('\\/', '/') 

            video_extension = mediaURL[-3:] 

            video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2)) 

 

        mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract title') 

            return 

        video_title = mobj.group(1).decode('utf-8') 

 

        mobj = re.search(r'submitter=(.*?);', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract uploader nickname') 

            return 

        video_uploader = mobj.group(1) 

 

        return [{ 

            'id':       video_id.decode('utf-8'), 

            'url':      video_url.decode('utf-8'), 

            'uploader': video_uploader.decode('utf-8'), 

            'upload_date':  None, 

            'title':    video_title, 

            'ext':      video_extension.decode('utf-8'), 

        }] 

 

 

class DailymotionIE(InfoExtractor): 

    """Information Extractor for Dailymotion""" 

 

    _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)' 

    IE_NAME = u'dailymotion' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_webpage(self, video_id): 

        """Report webpage download.""" 

        self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id) 

 

    def _real_extract(self, url): 

        # Extract id and simplified title from URL 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

 

        video_id = mobj.group(1).split('_')[0].split('?')[0] 

 

        video_extension = 'mp4' 

 

        # Retrieve video webpage to extract further information 

        request = compat_urllib_request.Request(url) 

        request.add_header('Cookie', 'family_filter=off') 

        try: 

            self.report_download_webpage(video_id) 

            webpage_bytes = compat_urllib_request.urlopen(request).read() 

            webpage = webpage_bytes.decode('utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) 

            return 

 

        # Extract URL, uploader and title from webpage 

        self.report_extraction(video_id) 

        mobj = re.search(r'\s*var flashvars = (.*)', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract media URL') 

            return 

        flashvars = compat_urllib_parse.unquote(mobj.group(1)) 

 

        for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']: 

            if key in flashvars: 

                max_quality = key 

                self._downloader.to_screen(u'[dailymotion] Using %s' % key) 

                break 

        else: 

            self._downloader.trouble(u'ERROR: unable to extract video URL') 

            return 

 

        mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video URL') 

            return 

 

        video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/') 

 

        # TODO: support choosing qualities 

 

        mobj = re.search(r'<meta property="og:title" content="(?P<title>[^"]*)" />', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract title') 

            return 

        video_title = unescapeHTML(mobj.group('title')) 

 

        video_uploader = None 

        mobj = re.search(r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>', webpage) 

        if mobj is None: 

            # lookin for official user 

            mobj_official = re.search(r'<span rel="author"[^>]+?>([^<]+?)</span>', webpage) 

            if mobj_official is None: 

                self._downloader.trouble(u'WARNING: unable to extract uploader nickname') 

            else: 

                video_uploader = mobj_official.group(1) 

        else: 

            video_uploader = mobj.group(1) 

 

        video_upload_date = None 

        mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage) 

        if mobj is not None: 

            video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1) 

 

        return [{ 

            'id':       video_id, 

            'url':      video_url, 

            'uploader': video_uploader, 

            'upload_date':  video_upload_date, 

            'title':    video_title, 

            'ext':      video_extension, 

        }] 

 

 

class PhotobucketIE(InfoExtractor): 

    """Information extractor for photobucket.com.""" 

 

    _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)' 

    IE_NAME = u'photobucket' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_webpage(self, video_id): 

        """Report webpage download.""" 

        self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id) 

 

    def _real_extract(self, url): 

        # Extract id from URL 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

            return 

 

        video_id = mobj.group(1) 

 

        video_extension = 'flv' 

 

        # Retrieve video webpage to extract further information 

        request = compat_urllib_request.Request(url) 

        try: 

            self.report_download_webpage(video_id) 

            webpage = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

            return 

 

        # Extract URL, uploader, and title from webpage 

        self.report_extraction(video_id) 

        mobj = re.search(r'<link rel="video_src" href=".*\?file=([^"]+)" />', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract media URL') 

            return 

        mediaURL = compat_urllib_parse.unquote(mobj.group(1)) 

 

        video_url = mediaURL 

 

        mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract title') 

            return 

        video_title = mobj.group(1).decode('utf-8') 

 

        video_uploader = mobj.group(2).decode('utf-8') 

 

        return [{ 

            'id':       video_id.decode('utf-8'), 

            'url':      video_url.decode('utf-8'), 

            'uploader': video_uploader, 

            'upload_date':  None, 

            'title':    video_title, 

            'ext':      video_extension.decode('utf-8'), 

        }] 

 

 

class YahooIE(InfoExtractor): 

    """Information extractor for video.yahoo.com.""" 

 

    _WORKING = False 

    # _VALID_URL matches all Yahoo! Video URLs 

    # _VPAGE_URL matches only the extractable '/watch/' URLs 

    _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?' 

    _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?' 

    IE_NAME = u'video.yahoo' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_webpage(self, video_id): 

        """Report webpage download.""" 

        self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id) 

 

    def _real_extract(self, url, new_video=True): 

        # Extract ID from URL 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

            return 

 

        video_id = mobj.group(2) 

        video_extension = 'flv' 

 

        # Rewrite valid but non-extractable URLs as 

        # extractable English language /watch/ URLs 

        if re.match(self._VPAGE_URL, url) is None: 

            request = compat_urllib_request.Request(url) 

            try: 

                webpage = compat_urllib_request.urlopen(request).read() 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

                return 

 

            mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) 

            if mobj is None: 

                self._downloader.trouble(u'ERROR: Unable to extract id field') 

                return 

            yahoo_id = mobj.group(1) 

 

            mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) 

            if mobj is None: 

                self._downloader.trouble(u'ERROR: Unable to extract vid field') 

                return 

            yahoo_vid = mobj.group(1) 

 

            url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id) 

            return self._real_extract(url, new_video=False) 

 

        # Retrieve video webpage to extract further information 

        request = compat_urllib_request.Request(url) 

        try: 

            self.report_download_webpage(video_id) 

            webpage = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

            return 

 

        # Extract uploader and title from webpage 

        self.report_extraction(video_id) 

        mobj = re.search(r'<meta name="title" content="(.*)" />', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video title') 

            return 

        video_title = mobj.group(1).decode('utf-8') 

 

        mobj = re.search(r'<h2 class="ti-5"><a href="http://video\.yahoo\.com/(people|profile)/[0-9]+" beacon=".*">(.*)</a></h2>', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video uploader') 

            return 

        video_uploader = mobj.group(1).decode('utf-8') 

 

        # Extract video thumbnail 

        mobj = re.search(r'<link rel="image_src" href="(.*)" />', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video thumbnail') 

            return 

        video_thumbnail = mobj.group(1).decode('utf-8') 

 

        # Extract video description 

        mobj = re.search(r'<meta name="description" content="(.*)" />', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video description') 

            return 

        video_description = mobj.group(1).decode('utf-8') 

        if not video_description: 

            video_description = 'No description available.' 

 

        # Extract video height and width 

        mobj = re.search(r'<meta name="video_height" content="([0-9]+)" />', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video height') 

            return 

        yv_video_height = mobj.group(1) 

 

        mobj = re.search(r'<meta name="video_width" content="([0-9]+)" />', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video width') 

            return 

        yv_video_width = mobj.group(1) 

 

        # Retrieve video playlist to extract media URL 

        # I'm not completely sure what all these options are, but we 

        # seem to need most of them, otherwise the server sends a 401. 

        yv_lg = 'R0xx6idZnW2zlrKP8xxAIR'  # not sure what this represents 

        yv_bitrate = '700'  # according to Wikipedia this is hard-coded 

        request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id + 

                '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height + 

                '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797') 

        try: 

            self.report_download_webpage(video_id) 

            webpage = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

            return 

 

        # Extract media URL from playlist XML 

        mobj = re.search(r'<STREAM APP="(http://.*)" FULLPATH="/?(/.*\.flv\?[^"]*)"', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: Unable to extract media URL') 

            return 

        video_url = compat_urllib_parse.unquote(mobj.group(1) + mobj.group(2)).decode('utf-8') 

        video_url = unescapeHTML(video_url) 

 

        return [{ 

            'id':       video_id.decode('utf-8'), 

            'url':      video_url, 

            'uploader': video_uploader, 

            'upload_date':  None, 

            'title':    video_title, 

            'ext':      video_extension.decode('utf-8'), 

            'thumbnail':    video_thumbnail.decode('utf-8'), 

            'description':  video_description, 

        }] 

 

 

class VimeoIE(InfoExtractor): 

    """Information extractor for vimeo.com.""" 

 

    # _VALID_URL matches Vimeo URLs 

    _VALID_URL = r'(?:https?://)?(?:(?:www|player).)?vimeo\.com/(?:(?:groups|album)/[^/]+/)?(?:videos?/)?([0-9]+)' 

    IE_NAME = u'vimeo' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_webpage(self, video_id): 

        """Report webpage download.""" 

        self._downloader.to_screen(u'[vimeo] %s: Downloading webpage' % video_id) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[vimeo] %s: Extracting information' % video_id) 

 

    def _real_extract(self, url, new_video=True): 

        # Extract ID from URL 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

            return 

 

        video_id = mobj.group(1) 

 

        # Retrieve video webpage to extract further information 

        request = compat_urllib_request.Request(url, None, std_headers) 

        try: 

            self.report_download_webpage(video_id) 

            webpage_bytes = compat_urllib_request.urlopen(request).read() 

            webpage = webpage_bytes.decode('utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

            return 

 

        # Now we begin extracting as much information as we can from what we 

        # retrieved. First we extract the information common to all extractors, 

        # and latter we extract those that are Vimeo specific. 

        self.report_extraction(video_id) 

 

        # Extract the config JSON 

        try: 

            config = webpage.split(' = {config:')[1].split(',assets:')[0] 

            config = json.loads(config) 

        except: 

            self._downloader.trouble(u'ERROR: unable to extract info section') 

            return 

 

        # Extract title 

        video_title = config["video"]["title"] 

 

        # Extract uploader 

        video_uploader = config["video"]["owner"]["name"] 

 

        # Extract video thumbnail 

        video_thumbnail = config["video"]["thumbnail"] 

 

        # Extract video description 

        video_description = get_element_by_id("description", webpage) 

        if video_description: video_description = clean_html(video_description) 

        else: video_description = '' 

 

        # Extract upload date 

        video_upload_date = None 

        mobj = re.search(r'<span id="clip-date" style="display:none">[^:]*: (.*?)( \([^\(]*\))?</span>', webpage) 

        if mobj is not None: 

            video_upload_date = mobj.group(1) 

 

        # Vimeo specific: extract request signature and timestamp 

        sig = config['request']['signature'] 

        timestamp = config['request']['timestamp'] 

 

        # Vimeo specific: extract video codec and quality information 

        # First consider quality, then codecs, then take everything 

        # TODO bind to format param 

        codecs = [('h264', 'mp4'), ('vp8', 'flv'), ('vp6', 'flv')] 

        files = { 'hd': [], 'sd': [], 'other': []} 

        for codec_name, codec_extension in codecs: 

            if codec_name in config["video"]["files"]: 

                if 'hd' in config["video"]["files"][codec_name]: 

                    files['hd'].append((codec_name, codec_extension, 'hd')) 

                elif 'sd' in config["video"]["files"][codec_name]: 

                    files['sd'].append((codec_name, codec_extension, 'sd')) 

                else: 

                    files['other'].append((codec_name, codec_extension, config["video"]["files"][codec_name][0])) 

 

        for quality in ('hd', 'sd', 'other'): 

            if len(files[quality]) > 0: 

                video_quality = files[quality][0][2] 

                video_codec = files[quality][0][0] 

                video_extension = files[quality][0][1] 

                self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality)) 

                break 

        else: 

            self._downloader.trouble(u'ERROR: no known codec found') 

            return 

 

        video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ 

                    %(video_id, sig, timestamp, video_quality, video_codec.upper()) 

 

        return [{ 

            'id':       video_id, 

            'url':      video_url, 

            'uploader': video_uploader, 

            'upload_date':  video_upload_date, 

            'title':    video_title, 

            'ext':      video_extension, 

            'thumbnail':    video_thumbnail, 

            'description':  video_description, 

        }] 

 

 

class ArteTvIE(InfoExtractor): 

    """arte.tv information extractor.""" 

 

    _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*' 

    _LIVE_URL = r'index-[0-9]+\.html$' 

 

    IE_NAME = u'arte.tv' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_webpage(self, video_id): 

        """Report webpage download.""" 

        self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id) 

 

    def fetch_webpage(self, url): 

        self._downloader.increment_downloads() 

        request = compat_urllib_request.Request(url) 

        try: 

            self.report_download_webpage(url) 

            webpage = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

            return 

        except ValueError as err: 

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

            return 

        return webpage 

 

    def grep_webpage(self, url, regex, regexFlags, matchTuples): 

        page = self.fetch_webpage(url) 

        mobj = re.search(regex, page, regexFlags) 

        info = {} 

 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

            return 

 

        for (i, key, err) in matchTuples: 

            if mobj.group(i) is None: 

                self._downloader.trouble(err) 

                return 

            else: 

                info[key] = mobj.group(i) 

 

        return info 

 

    def extractLiveStream(self, url): 

        video_lang = url.split('/')[-4] 

        info = self.grep_webpage( 

            url, 

            r'src="(.*?/videothek_js.*?\.js)', 

            0, 

            [ 

                (1, 'url', u'ERROR: Invalid URL: %s' % url) 

            ] 

        ) 

        http_host = url.split('/')[2] 

        next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url'))) 

        info = self.grep_webpage( 

            next_url, 

            r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' + 

                '(http://.*?\.swf).*?' + 

                '(rtmp://.*?)\'', 

            re.DOTALL, 

            [ 

                (1, 'path',   u'ERROR: could not extract video path: %s' % url), 

                (2, 'player', u'ERROR: could not extract video player: %s' % url), 

                (3, 'url',    u'ERROR: could not extract video url: %s' % url) 

            ] 

        ) 

        video_url = u'%s/%s' % (info.get('url'), info.get('path')) 

 

    def extractPlus7Stream(self, url): 

        video_lang = url.split('/')[-3] 

        info = self.grep_webpage( 

            url, 

            r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)', 

            0, 

            [ 

                (1, 'url', u'ERROR: Invalid URL: %s' % url) 

            ] 

        ) 

        next_url = compat_urllib_parse.unquote(info.get('url')) 

        info = self.grep_webpage( 

            next_url, 

            r'<video lang="%s" ref="(http[^\'"&]*)' % video_lang, 

            0, 

            [ 

                (1, 'url', u'ERROR: Could not find <video> tag: %s' % url) 

            ] 

        ) 

        next_url = compat_urllib_parse.unquote(info.get('url')) 

 

        info = self.grep_webpage( 

            next_url, 

            r'<video id="(.*?)".*?>.*?' + 

                '<name>(.*?)</name>.*?' + 

                '<dateVideo>(.*?)</dateVideo>.*?' + 

                '<url quality="hd">(.*?)</url>', 

            re.DOTALL, 

            [ 

                (1, 'id',    u'ERROR: could not extract video id: %s' % url), 

                (2, 'title', u'ERROR: could not extract video title: %s' % url), 

                (3, 'date',  u'ERROR: could not extract video date: %s' % url), 

                (4, 'url',   u'ERROR: could not extract video url: %s' % url) 

            ] 

        ) 

 

        return { 

            'id':           info.get('id'), 

            'url':          compat_urllib_parse.unquote(info.get('url')), 

            'uploader':     u'arte.tv', 

            'upload_date':  info.get('date'), 

            'title':        info.get('title').decode('utf-8'), 

            'ext':          u'mp4', 

            'format':       u'NA', 

            'player_url':   None, 

        } 

 

    def _real_extract(self, url): 

        video_id = url.split('/')[-1] 

        self.report_extraction(video_id) 

 

        if re.search(self._LIVE_URL, video_id) is not None: 

            self.extractLiveStream(url) 

            return 

        else: 

            info = self.extractPlus7Stream(url) 

 

        return [info] 

 

 

class GenericIE(InfoExtractor): 

    """Generic last-resort information extractor.""" 

 

    _VALID_URL = r'.*' 

    IE_NAME = u'generic' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_webpage(self, video_id): 

        """Report webpage download.""" 

        self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.') 

        self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id) 

 

    def report_following_redirect(self, new_url): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url) 

 

    def _test_redirect(self, url): 

        """Check if it is a redirect, like url shorteners, in case restart chain.""" 

        class HeadRequest(compat_urllib_request.Request): 

            def get_method(self): 

                return "HEAD" 

 

        class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler): 

            """ 

            Subclass the HTTPRedirectHandler to make it use our  

            HeadRequest also on the redirected URL 

            """ 

            def redirect_request(self, req, fp, code, msg, headers, newurl): 

                if code in (301, 302, 303, 307): 

                    newurl = newurl.replace(' ', '%20') 

                    newheaders = dict((k,v) for k,v in req.headers.items() 

                                      if k.lower() not in ("content-length", "content-type")) 

                    return HeadRequest(newurl, 

                                       headers=newheaders, 

                                       origin_req_host=req.get_origin_req_host(), 

                                       unverifiable=True) 

                else: 

                    raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp) 

 

        class HTTPMethodFallback(compat_urllib_request.BaseHandler): 

            """ 

            Fallback to GET if HEAD is not allowed (405 HTTP error) 

            """ 

            def http_error_405(self, req, fp, code, msg, headers): 

                fp.read() 

                fp.close() 

 

                newheaders = dict((k,v) for k,v in req.headers.items() 

                                  if k.lower() not in ("content-length", "content-type")) 

                return self.parent.open(compat_urllib_request.Request(req.get_full_url(), 

                                                 headers=newheaders, 

                                                 origin_req_host=req.get_origin_req_host(), 

                                                 unverifiable=True)) 

 

        # Build our opener 

        opener = compat_urllib_request.OpenerDirector() 

        for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler, 

                        HTTPMethodFallback, HEADRedirectHandler, 

                        compat_urllib_error.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]: 

            opener.add_handler(handler()) 

 

        response = opener.open(HeadRequest(url)) 

        new_url = response.geturl() 

 

        if url == new_url: 

            return False 

 

        self.report_following_redirect(new_url) 

        self._downloader.download([new_url]) 

        return True 

 

    def _real_extract(self, url): 

        if self._test_redirect(url): return 

 

        video_id = url.split('/')[-1] 

        request = compat_urllib_request.Request(url) 

        try: 

            self.report_download_webpage(video_id) 

            webpage = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

            return 

        except ValueError as err: 

            # since this is the last-resort InfoExtractor, if 

            # this error is thrown, it'll be thrown here 

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

            return 

 

        self.report_extraction(video_id) 

        # Start with something easy: JW Player in SWFObject 

        mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage) 

        if mobj is None: 

            # Broaden the search a little bit 

            mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

            return 

 

        # It's possible that one of the regexes 

        # matched, but returned an empty group: 

        if mobj.group(1) is None: 

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

            return 

 

        video_url = compat_urllib_parse.unquote(mobj.group(1)) 

        video_id = os.path.basename(video_url) 

 

        # here's a fun little line of code for you: 

        video_extension = os.path.splitext(video_id)[1][1:] 

        video_id = os.path.splitext(video_id)[0] 

 

        # it's tempting to parse this further, but you would 

        # have to take into account all the variations like 

        #   Video Title - Site Name 

        #   Site Name | Video Title 

        #   Video Title - Tagline | Site Name 

        # and so on and so forth; it's just not practical 

        mobj = re.search(r'<title>(.*)</title>', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract title') 

            return 

        video_title = mobj.group(1) 

 

        # video uploader is domain name 

        mobj = re.match(r'(?:https?://)?([^/]*)/.*', url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract title') 

            return 

        video_uploader = mobj.group(1) 

 

        return [{ 

            'id':       video_id, 

            'url':      video_url, 

            'uploader': video_uploader, 

            'upload_date':  None, 

            'title':    video_title, 

            'ext':      video_extension, 

        }] 

 

 

class YoutubeSearchIE(InfoExtractor): 

    """Information Extractor for YouTube search queries.""" 

    _VALID_URL = r'ytsearch(\d+|all)?:[\s\S]+' 

    _API_URL = 'https://gdata.youtube.com/feeds/api/videos?q=%s&start-index=%i&max-results=50&v=2&alt=jsonc' 

    _max_youtube_results = 1000 

    IE_NAME = u'youtube:search' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_page(self, query, pagenum): 

        """Report attempt to download search page with given number.""" 

        query = query.decode(preferredencoding()) 

        self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum)) 

 

    def _real_extract(self, query): 

        mobj = re.match(self._VALID_URL, query) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) 

            return 

 

        prefix, query = query.split(':') 

        prefix = prefix[8:] 

        query = query.encode('utf-8') 

        if prefix == '': 

            self._download_n_results(query, 1) 

            return 

        elif prefix == 'all': 

            self._download_n_results(query, self._max_youtube_results) 

            return 

        else: 

            try: 

                n = int(prefix) 

                if n <= 0: 

                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) 

                    return 

                elif n > self._max_youtube_results: 

                    self._downloader.to_stderr(u'WARNING: ytsearch returns max %i results (you requested %i)' % (self._max_youtube_results, n)) 

                    n = self._max_youtube_results 

                self._download_n_results(query, n) 

                return 

            except ValueError: # parsing prefix as integer fails 

                self._download_n_results(query, 1) 

                return 

 

    def _download_n_results(self, query, n): 

        """Downloads a specified number of results for a query""" 

 

        video_ids = [] 

        pagenum = 0 

        limit = n 

 

        while (50 * pagenum) < limit: 

            self.report_download_page(query, pagenum+1) 

            result_url = self._API_URL % (compat_urllib_parse.quote_plus(query), (50*pagenum)+1) 

            request = compat_urllib_request.Request(result_url) 

            try: 

                data = compat_urllib_request.urlopen(request).read() 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err)) 

                return 

            api_response = json.loads(data)['data'] 

 

            new_ids = list(video['id'] for video in api_response['items']) 

            video_ids += new_ids 

 

            limit = min(n, api_response['totalItems']) 

            pagenum += 1 

 

        if len(video_ids) > n: 

            video_ids = video_ids[:n] 

        for id in video_ids: 

            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) 

        return 

 

 

class GoogleSearchIE(InfoExtractor): 

    """Information Extractor for Google Video search queries.""" 

    _VALID_URL = r'gvsearch(\d+|all)?:[\s\S]+' 

    _TEMPLATE_URL = 'http://video.google.com/videosearch?q=%s+site:video.google.com&start=%s&hl=en' 

    _VIDEO_INDICATOR = r'<a href="http://video\.google\.com/videoplay\?docid=([^"\&]+)' 

    _MORE_PAGES_INDICATOR = r'class="pn" id="pnnext"' 

    _max_google_results = 1000 

    IE_NAME = u'video.google:search' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_page(self, query, pagenum): 

        """Report attempt to download playlist page with given number.""" 

        query = query.decode(preferredencoding()) 

        self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum)) 

 

    def _real_extract(self, query): 

        mobj = re.match(self._VALID_URL, query) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) 

            return 

 

        prefix, query = query.split(':') 

        prefix = prefix[8:] 

        query = query.encode('utf-8') 

        if prefix == '': 

            self._download_n_results(query, 1) 

            return 

        elif prefix == 'all': 

            self._download_n_results(query, self._max_google_results) 

            return 

        else: 

            try: 

                n = int(prefix) 

                if n <= 0: 

                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) 

                    return 

                elif n > self._max_google_results: 

                    self._downloader.to_stderr(u'WARNING: gvsearch returns max %i results (you requested %i)' % (self._max_google_results, n)) 

                    n = self._max_google_results 

                self._download_n_results(query, n) 

                return 

            except ValueError: # parsing prefix as integer fails 

                self._download_n_results(query, 1) 

                return 

 

    def _download_n_results(self, query, n): 

        """Downloads a specified number of results for a query""" 

 

        video_ids = [] 

        pagenum = 0 

 

        while True: 

            self.report_download_page(query, pagenum) 

            result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum*10) 

            request = compat_urllib_request.Request(result_url) 

            try: 

                page = compat_urllib_request.urlopen(request).read() 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

                return 

 

            # Extract video identifiers 

            for mobj in re.finditer(self._VIDEO_INDICATOR, page): 

                video_id = mobj.group(1) 

                if video_id not in video_ids: 

                    video_ids.append(video_id) 

                    if len(video_ids) == n: 

                        # Specified n videos reached 

                        for id in video_ids: 

                            self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id]) 

                        return 

 

            if re.search(self._MORE_PAGES_INDICATOR, page) is None: 

                for id in video_ids: 

                    self._downloader.download(['http://video.google.com/videoplay?docid=%s' % id]) 

                return 

 

            pagenum = pagenum + 1 

 

 

class YahooSearchIE(InfoExtractor): 

    """Information Extractor for Yahoo! Video search queries.""" 

 

    _WORKING = False 

    _VALID_URL = r'yvsearch(\d+|all)?:[\s\S]+' 

    _TEMPLATE_URL = 'http://video.yahoo.com/search/?p=%s&o=%s' 

    _VIDEO_INDICATOR = r'href="http://video\.yahoo\.com/watch/([0-9]+/[0-9]+)"' 

    _MORE_PAGES_INDICATOR = r'\s*Next' 

    _max_yahoo_results = 1000 

    IE_NAME = u'video.yahoo:search' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_page(self, query, pagenum): 

        """Report attempt to download playlist page with given number.""" 

        query = query.decode(preferredencoding()) 

        self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum)) 

 

    def _real_extract(self, query): 

        mobj = re.match(self._VALID_URL, query) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid search query "%s"' % query) 

            return 

 

        prefix, query = query.split(':') 

        prefix = prefix[8:] 

        query = query.encode('utf-8') 

        if prefix == '': 

            self._download_n_results(query, 1) 

            return 

        elif prefix == 'all': 

            self._download_n_results(query, self._max_yahoo_results) 

            return 

        else: 

            try: 

                n = int(prefix) 

                if n <= 0: 

                    self._downloader.trouble(u'ERROR: invalid download number %s for query "%s"' % (n, query)) 

                    return 

                elif n > self._max_yahoo_results: 

                    self._downloader.to_stderr(u'WARNING: yvsearch returns max %i results (you requested %i)' % (self._max_yahoo_results, n)) 

                    n = self._max_yahoo_results 

                self._download_n_results(query, n) 

                return 

            except ValueError: # parsing prefix as integer fails 

                self._download_n_results(query, 1) 

                return 

 

    def _download_n_results(self, query, n): 

        """Downloads a specified number of results for a query""" 

 

        video_ids = [] 

        already_seen = set() 

        pagenum = 1 

 

        while True: 

            self.report_download_page(query, pagenum) 

            result_url = self._TEMPLATE_URL % (compat_urllib_parse.quote_plus(query), pagenum) 

            request = compat_urllib_request.Request(result_url) 

            try: 

                page = compat_urllib_request.urlopen(request).read() 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

                return 

 

            # Extract video identifiers 

            for mobj in re.finditer(self._VIDEO_INDICATOR, page): 

                video_id = mobj.group(1) 

                if video_id not in already_seen: 

                    video_ids.append(video_id) 

                    already_seen.add(video_id) 

                    if len(video_ids) == n: 

                        # Specified n videos reached 

                        for id in video_ids: 

                            self._downloader.download(['http://video.yahoo.com/watch/%s' % id]) 

                        return 

 

            if re.search(self._MORE_PAGES_INDICATOR, page) is None: 

                for id in video_ids: 

                    self._downloader.download(['http://video.yahoo.com/watch/%s' % id]) 

                return 

 

            pagenum = pagenum + 1 

 

 

class YoutubePlaylistIE(InfoExtractor): 

    """Information Extractor for YouTube playlists.""" 

 

    _VALID_URL = r'(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL|EC)?|PL|EC)([0-9A-Za-z-_]{10,})(?:/.*?/([0-9A-Za-z_-]+))?.*' 

    _TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en' 

    _VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&amp;([^&"]+&amp;)*list=.*?%s' 

    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}" 

    IE_NAME = u'youtube:playlist' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_page(self, playlist_id, pagenum): 

        """Report attempt to download playlist page with given number.""" 

        self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum)) 

 

    def _real_extract(self, url): 

        # Extract playlist id 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid url: %s' % url) 

            return 

 

        # Single video case 

        if mobj.group(3) is not None: 

            self._downloader.download([mobj.group(3)]) 

            return 

 

        # Download playlist pages 

        # prefix is 'p' as default for playlists but there are other types that need extra care 

        playlist_prefix = mobj.group(1) 

        if playlist_prefix == 'a': 

            playlist_access = 'artist' 

        else: 

            playlist_prefix = 'p' 

            playlist_access = 'view_play_list' 

        playlist_id = mobj.group(2) 

        video_ids = [] 

        pagenum = 1 

 

        while True: 

            self.report_download_page(playlist_id, pagenum) 

            url = self._TEMPLATE_URL % (playlist_access, playlist_prefix, playlist_id, pagenum) 

            request = compat_urllib_request.Request(url) 

            try: 

                page = compat_urllib_request.urlopen(request).read().decode('utf-8') 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

                return 

 

            # Extract video identifiers 

            ids_in_page = [] 

            for mobj in re.finditer(self._VIDEO_INDICATOR_TEMPLATE % playlist_id, page): 

                if mobj.group(1) not in ids_in_page: 

                    ids_in_page.append(mobj.group(1)) 

            video_ids.extend(ids_in_page) 

 

            if self._MORE_PAGES_INDICATOR not in page: 

                break 

            pagenum = pagenum + 1 

 

        total = len(video_ids) 

 

        playliststart = self._downloader.params.get('playliststart', 1) - 1 

        playlistend = self._downloader.params.get('playlistend', -1) 

        if playlistend == -1: 

            video_ids = video_ids[playliststart:] 

        else: 

            video_ids = video_ids[playliststart:playlistend] 

 

        if len(video_ids) == total: 

            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos' % (playlist_id, total)) 

        else: 

            self._downloader.to_screen(u'[youtube] PL %s: Found %i videos, downloading %i' % (playlist_id, total, len(video_ids))) 

 

        for id in video_ids: 

            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) 

        return 

 

 

class YoutubeChannelIE(InfoExtractor): 

    """Information Extractor for YouTube channels.""" 

 

    _VALID_URL = r"^(?:https?://)?(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/([0-9A-Za-z_-]+)(?:/.*)?$" 

    _TEMPLATE_URL = 'http://www.youtube.com/channel/%s/videos?sort=da&flow=list&view=0&page=%s&gl=US&hl=en' 

    _MORE_PAGES_INDICATOR = u"Next \N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}" 

    IE_NAME = u'youtube:channel' 

 

    def report_download_page(self, channel_id, pagenum): 

        """Report attempt to download channel page with given number.""" 

        self._downloader.to_screen(u'[youtube] Channel %s: Downloading page #%s' % (channel_id, pagenum)) 

 

    def _real_extract(self, url): 

        # Extract channel id 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid url: %s' % url) 

            return 

 

        # Download channel pages 

        channel_id = mobj.group(1) 

        video_ids = [] 

        pagenum = 1 

 

        while True: 

            self.report_download_page(channel_id, pagenum) 

            url = self._TEMPLATE_URL % (channel_id, pagenum) 

            request = compat_urllib_request.Request(url) 

            try: 

                page = compat_urllib_request.urlopen(request).read().decode('utf8') 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

                return 

 

            # Extract video identifiers 

            ids_in_page = [] 

            for mobj in re.finditer(r'href="/watch\?v=([0-9A-Za-z_-]+)&', page): 

                if mobj.group(1) not in ids_in_page: 

                    ids_in_page.append(mobj.group(1)) 

            video_ids.extend(ids_in_page) 

 

            if self._MORE_PAGES_INDICATOR not in page: 

                break 

            pagenum = pagenum + 1 

 

        self._downloader.to_screen(u'[youtube] Channel %s: Found %i videos' % (channel_id, len(video_ids))) 

 

        for id in video_ids: 

            self._downloader.download(['http://www.youtube.com/watch?v=%s' % id]) 

        return 

 

 

class YoutubeUserIE(InfoExtractor): 

    """Information Extractor for YouTube users.""" 

 

    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/user/)|ytuser:)([A-Za-z0-9_-]+)' 

    _TEMPLATE_URL = 'http://gdata.youtube.com/feeds/api/users/%s' 

    _GDATA_PAGE_SIZE = 50 

    _GDATA_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?max-results=%d&start-index=%d' 

    _VIDEO_INDICATOR = r'/watch\?v=(.+?)[\<&]' 

    IE_NAME = u'youtube:user' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_page(self, username, start_index): 

        """Report attempt to download user page.""" 

        self._downloader.to_screen(u'[youtube] user %s: Downloading video ids from %d to %d' % 

                (username, start_index, start_index + self._GDATA_PAGE_SIZE)) 

 

    def _real_extract(self, url): 

        # Extract username 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid url: %s' % url) 

            return 

 

        username = mobj.group(1) 

 

        # Download video ids using YouTube Data API. Result size per 

        # query is limited (currently to 50 videos) so we need to query 

        # page by page until there are no video ids - it means we got 

        # all of them. 

 

        video_ids = [] 

        pagenum = 0 

 

        while True: 

            start_index = pagenum * self._GDATA_PAGE_SIZE + 1 

            self.report_download_page(username, start_index) 

 

            request = compat_urllib_request.Request(self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)) 

 

            try: 

                page = compat_urllib_request.urlopen(request).read().decode('utf-8') 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

                return 

 

            # Extract video identifiers 

            ids_in_page = [] 

 

            for mobj in re.finditer(self._VIDEO_INDICATOR, page): 

                if mobj.group(1) not in ids_in_page: 

                    ids_in_page.append(mobj.group(1)) 

 

            video_ids.extend(ids_in_page) 

 

            # A little optimization - if current page is not 

            # "full", ie. does not contain PAGE_SIZE video ids then 

            # we can assume that this page is the last one - there 

            # are no more ids on further pages - no need to query 

            # again. 

 

            if len(ids_in_page) < self._GDATA_PAGE_SIZE: 

                break 

 

            pagenum += 1 

 

        all_ids_count = len(video_ids) 

        playliststart = self._downloader.params.get('playliststart', 1) - 1 

        playlistend = self._downloader.params.get('playlistend', -1) 

 

        if playlistend == -1: 

            video_ids = video_ids[playliststart:] 

        else: 

            video_ids = video_ids[playliststart:playlistend] 

 

        self._downloader.to_screen(u"[youtube] user %s: Collected %d video ids (downloading %d of them)" % 

                (username, all_ids_count, len(video_ids))) 

 

        for video_id in video_ids: 

            self._downloader.download(['http://www.youtube.com/watch?v=%s' % video_id]) 

 

 

class BlipTVUserIE(InfoExtractor): 

    """Information Extractor for blip.tv users.""" 

 

    _VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$' 

    _PAGE_SIZE = 12 

    IE_NAME = u'blip.tv:user' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_page(self, username, pagenum): 

        """Report attempt to download user page.""" 

        self._downloader.to_screen(u'[%s] user %s: Downloading video ids from page %d' % 

                (self.IE_NAME, username, pagenum)) 

 

    def _real_extract(self, url): 

        # Extract username 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid url: %s' % url) 

            return 

 

        username = mobj.group(1) 

 

        page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1' 

 

        request = compat_urllib_request.Request(url) 

 

        try: 

            page = compat_urllib_request.urlopen(request).read().decode('utf-8') 

            mobj = re.search(r'data-users-id="([^"]+)"', page) 

            page_base = page_base % mobj.group(1) 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

            return 

 

 

        # Download video ids using BlipTV Ajax calls. Result size per 

        # query is limited (currently to 12 videos) so we need to query 

        # page by page until there are no video ids - it means we got 

        # all of them. 

 

        video_ids = [] 

        pagenum = 1 

 

        while True: 

            self.report_download_page(username, pagenum) 

 

            request = compat_urllib_request.Request( page_base + "&page=" + str(pagenum) ) 

 

            try: 

                page = compat_urllib_request.urlopen(request).read().decode('utf-8') 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err)) 

                return 

 

            # Extract video identifiers 

            ids_in_page = [] 

 

            for mobj in re.finditer(r'href="/([^"]+)"', page): 

                if mobj.group(1) not in ids_in_page: 

                    ids_in_page.append(unescapeHTML(mobj.group(1))) 

 

            video_ids.extend(ids_in_page) 

 

            # A little optimization - if current page is not 

            # "full", ie. does not contain PAGE_SIZE video ids then 

            # we can assume that this page is the last one - there 

            # are no more ids on further pages - no need to query 

            # again. 

 

            if len(ids_in_page) < self._PAGE_SIZE: 

                break 

 

            pagenum += 1 

 

        all_ids_count = len(video_ids) 

        playliststart = self._downloader.params.get('playliststart', 1) - 1 

        playlistend = self._downloader.params.get('playlistend', -1) 

 

        if playlistend == -1: 

            video_ids = video_ids[playliststart:] 

        else: 

            video_ids = video_ids[playliststart:playlistend] 

 

        self._downloader.to_screen(u"[%s] user %s: Collected %d video ids (downloading %d of them)" % 

                (self.IE_NAME, username, all_ids_count, len(video_ids))) 

 

        for video_id in video_ids: 

            self._downloader.download([u'http://blip.tv/'+video_id]) 

 

 

class DepositFilesIE(InfoExtractor): 

    """Information extractor for depositfiles.com""" 

 

    _VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)' 

    IE_NAME = u'DepositFiles' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_webpage(self, file_id): 

        """Report webpage download.""" 

        self._downloader.to_screen(u'[DepositFiles] %s: Downloading webpage' % file_id) 

 

    def report_extraction(self, file_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[DepositFiles] %s: Extracting information' % file_id) 

 

    def _real_extract(self, url): 

        file_id = url.split('/')[-1] 

        # Rebuild url in english locale 

        url = 'http://depositfiles.com/en/files/' + file_id 

 

        # Retrieve file webpage with 'Free download' button pressed 

        free_download_indication = { 'gateway_result' : '1' } 

        request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication)) 

        try: 

            self.report_download_webpage(file_id) 

            webpage = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err)) 

            return 

 

        # Search for the real file URL 

        mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage) 

        if (mobj is None) or (mobj.group(1) is None): 

            # Try to figure out reason of the error. 

            mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL) 

            if (mobj is not None) and (mobj.group(1) is not None): 

                restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip() 

                self._downloader.trouble(u'ERROR: %s' % restriction_message) 

            else: 

                self._downloader.trouble(u'ERROR: unable to extract download URL from: %s' % url) 

            return 

 

        file_url = mobj.group(1) 

        file_extension = os.path.splitext(file_url)[1][1:] 

 

        # Search for file title 

        mobj = re.search(r'<b title="(.*?)">', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract title') 

            return 

        file_title = mobj.group(1).decode('utf-8') 

 

        return [{ 

            'id':       file_id.decode('utf-8'), 

            'url':      file_url.decode('utf-8'), 

            'uploader': None, 

            'upload_date':  None, 

            'title':    file_title, 

            'ext':      file_extension.decode('utf-8'), 

        }] 

 

 

class FacebookIE(InfoExtractor): 

    """Information Extractor for Facebook""" 

 

    _WORKING = False 

    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)' 

    _LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&' 

    _NETRC_MACHINE = 'facebook' 

    _available_formats = ['video', 'highqual', 'lowqual'] 

    _video_extensions = { 

        'video': 'mp4', 

        'highqual': 'mp4', 

        'lowqual': 'mp4', 

    } 

    IE_NAME = u'facebook' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def _reporter(self, message): 

        """Add header and report message.""" 

        self._downloader.to_screen(u'[facebook] %s' % message) 

 

    def report_login(self): 

        """Report attempt to log in.""" 

        self._reporter(u'Logging in') 

 

    def report_video_webpage_download(self, video_id): 

        """Report attempt to download video webpage.""" 

        self._reporter(u'%s: Downloading video webpage' % video_id) 

 

    def report_information_extraction(self, video_id): 

        """Report attempt to extract video information.""" 

        self._reporter(u'%s: Extracting video information' % video_id) 

 

    def _parse_page(self, video_webpage): 

        """Extract video information from page""" 

        # General data 

        data = {'title': r'\("video_title", "(.*?)"\)', 

            'description': r'<div class="datawrap">(.*?)</div>', 

            'owner': r'\("video_owner_name", "(.*?)"\)', 

            'thumbnail':  r'\("thumb_url", "(?P<THUMB>.*?)"\)', 

            } 

        video_info = {} 

        for piece in data.keys(): 

            mobj = re.search(data[piece], video_webpage) 

            if mobj is not None: 

                video_info[piece] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape")) 

 

        # Video urls 

        video_urls = {} 

        for fmt in self._available_formats: 

            mobj = re.search(r'\("%s_src\", "(.+?)"\)' % fmt, video_webpage) 

            if mobj is not None: 

                # URL is in a Javascript segment inside an escaped Unicode format within 

                # the generally utf-8 page 

                video_urls[fmt] = compat_urllib_parse.unquote_plus(mobj.group(1).decode("unicode_escape")) 

        video_info['video_urls'] = video_urls 

 

        return video_info 

 

    def _real_initialize(self): 

        if self._downloader is None: 

            return 

 

        useremail = None 

        password = None 

        downloader_params = self._downloader.params 

 

        # Attempt to use provided username and password or .netrc data 

        if downloader_params.get('username', None) is not None: 

            useremail = downloader_params['username'] 

            password = downloader_params['password'] 

        elif downloader_params.get('usenetrc', False): 

            try: 

                info = netrc.netrc().authenticators(self._NETRC_MACHINE) 

                if info is not None: 

                    useremail = info[0] 

                    password = info[2] 

                else: 

                    raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) 

            except (IOError, netrc.NetrcParseError) as err: 

                self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) 

                return 

 

        if useremail is None: 

            return 

 

        # Log in 

        login_form = { 

            'email': useremail, 

            'pass': password, 

            'login': 'Log+In' 

            } 

        request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) 

        try: 

            self.report_login() 

            login_results = compat_urllib_request.urlopen(request).read() 

            if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None: 

                self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.') 

                return 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) 

            return 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

        video_id = mobj.group('ID') 

 

        # Get video webpage 

        self.report_video_webpage_download(video_id) 

        request = compat_urllib_request.Request('https://www.facebook.com/video/video.php?v=%s' % video_id) 

        try: 

            page = compat_urllib_request.urlopen(request) 

            video_webpage = page.read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) 

            return 

 

        # Start extracting information 

        self.report_information_extraction(video_id) 

 

        # Extract information 

        video_info = self._parse_page(video_webpage) 

 

        # uploader 

        if 'owner' not in video_info: 

            self._downloader.trouble(u'ERROR: unable to extract uploader nickname') 

            return 

        video_uploader = video_info['owner'] 

 

        # title 

        if 'title' not in video_info: 

            self._downloader.trouble(u'ERROR: unable to extract video title') 

            return 

        video_title = video_info['title'] 

        video_title = video_title.decode('utf-8') 

 

        # thumbnail image 

        if 'thumbnail' not in video_info: 

            self._downloader.trouble(u'WARNING: unable to extract video thumbnail') 

            video_thumbnail = '' 

        else: 

            video_thumbnail = video_info['thumbnail'] 

 

        # upload date 

        upload_date = None 

        if 'upload_date' in video_info: 

            upload_time = video_info['upload_date'] 

            timetuple = email.utils.parsedate_tz(upload_time) 

            if timetuple is not None: 

                try: 

                    upload_date = time.strftime('%Y%m%d', timetuple[0:9]) 

                except: 

                    pass 

 

        # description 

        video_description = video_info.get('description', 'No description available.') 

 

        url_map = video_info['video_urls'] 

        if len(url_map.keys()) > 0: 

            # Decide which formats to download 

            req_format = self._downloader.params.get('format', None) 

            format_limit = self._downloader.params.get('format_limit', None) 

 

            if format_limit is not None and format_limit in self._available_formats: 

                format_list = self._available_formats[self._available_formats.index(format_limit):] 

            else: 

                format_list = self._available_formats 

            existing_formats = [x for x in format_list if x in url_map] 

            if len(existing_formats) == 0: 

                self._downloader.trouble(u'ERROR: no known formats available for video') 

                return 

            if req_format is None: 

                video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality 

            elif req_format == 'worst': 

                video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality 

            elif req_format == '-1': 

                video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats 

            else: 

                # Specific format 

                if req_format not in url_map: 

                    self._downloader.trouble(u'ERROR: requested format not available') 

                    return 

                video_url_list = [(req_format, url_map[req_format])] # Specific format 

 

        results = [] 

        for format_param, video_real_url in video_url_list: 

            # Extension 

            video_extension = self._video_extensions.get(format_param, 'mp4') 

 

            results.append({ 

                'id':       video_id.decode('utf-8'), 

                'url':      video_real_url.decode('utf-8'), 

                'uploader': video_uploader.decode('utf-8'), 

                'upload_date':  upload_date, 

                'title':    video_title, 

                'ext':      video_extension.decode('utf-8'), 

                'format':   (format_param is None and u'NA' or format_param.decode('utf-8')), 

                'thumbnail':    video_thumbnail.decode('utf-8'), 

                'description':  video_description.decode('utf-8'), 

            }) 

        return results 

 

class BlipTVIE(InfoExtractor): 

    """Information extractor for blip.tv""" 

 

    _VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv(/.+)$' 

    _URL_EXT = r'^.*\.([a-z0-9]+)$' 

    IE_NAME = u'blip.tv' 

 

    def report_extraction(self, file_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) 

 

    def report_direct_download(self, title): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Direct download detected' % (self.IE_NAME, title)) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

 

        if '?' in url: 

            cchar = '&' 

        else: 

            cchar = '?' 

        json_url = url + cchar + 'skin=json&version=2&no_wrap=1' 

        request = compat_urllib_request.Request(json_url) 

        self.report_extraction(mobj.group(1)) 

        info = None 

        try: 

            urlh = compat_urllib_request.urlopen(request) 

            if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download 

                basename = url.split('/')[-1] 

                title,ext = os.path.splitext(basename) 

                title = title.decode('UTF-8') 

                ext = ext.replace('.', '') 

                self.report_direct_download(title) 

                info = { 

                    'id': title, 

                    'url': url, 

                    'uploader': None, 

                    'upload_date': None, 

                    'title': title, 

                    'ext': ext, 

                    'urlhandle': urlh 

                } 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) 

            return 

        if info is None: # Regular URL 

            try: 

                json_code_bytes = urlh.read() 

                json_code = json_code_bytes.decode('utf-8') 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err)) 

                return 

 

            try: 

                json_data = json.loads(json_code) 

                if 'Post' in json_data: 

                    data = json_data['Post'] 

                else: 

                    data = json_data 

 

                upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d') 

                video_url = data['media']['url'] 

                umobj = re.match(self._URL_EXT, video_url) 

                if umobj is None: 

                    raise ValueError('Can not determine filename extension') 

                ext = umobj.group(1) 

 

                info = { 

                    'id': data['item_id'], 

                    'url': video_url, 

                    'uploader': data['display_name'], 

                    'upload_date': upload_date, 

                    'title': data['title'], 

                    'ext': ext, 

                    'format': data['media']['mimeType'], 

                    'thumbnail': data['thumbnailUrl'], 

                    'description': data['description'], 

                    'player_url': data['embedUrl'] 

                } 

            except (ValueError,KeyError) as err: 

                self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err)) 

                return 

 

        std_headers['User-Agent'] = 'iTunes/10.6.1' 

        return [info] 

 

 

class MyVideoIE(InfoExtractor): 

    """Information Extractor for myvideo.de.""" 

 

    _VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*' 

    IE_NAME = u'myvideo' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_webpage(self, video_id): 

        """Report webpage download.""" 

        self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[myvideo] %s: Extracting information' % video_id) 

 

    def _real_extract(self,url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._download.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

 

        video_id = mobj.group(1) 

 

        # Get video webpage 

        request = compat_urllib_request.Request('http://www.myvideo.de/watch/%s' % video_id) 

        try: 

            self.report_download_webpage(video_id) 

            webpage = compat_urllib_request.urlopen(request).read().decode('utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

            return 

 

        self.report_extraction(video_id) 

        mobj = re.search(r'<link rel=\'image_src\' href=\'(http://is[0-9].myvideo\.de/de/movie[0-9]+/[a-f0-9]+)/thumbs/[^.]+\.jpg\' />', 

                 webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract media URL') 

            return 

        video_url = mobj.group(1) + ('/%s.flv' % video_id) 

 

        mobj = re.search('<title>([^<]+)</title>', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract title') 

            return 

 

        video_title = mobj.group(1) 

 

        return [{ 

            'id':       video_id, 

            'url':      video_url, 

            'uploader': None, 

            'upload_date':  None, 

            'title':    video_title, 

            'ext':      u'flv', 

        }] 

 

class ComedyCentralIE(InfoExtractor): 

    """Information extractor for The Daily Show and Colbert Report """ 

 

    # urls can be abbreviations like :thedailyshow or :colbert 

    # urls for episodes like:  

    # or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day 

    #                     or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news 

    #                     or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524     

    _VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport) 

                      |(https?://)?(www\.)? 

                          (?P<showname>thedailyshow|colbertnation)\.com/ 

                         (full-episodes/(?P<episode>.*)| 

                          (?P<clip> 

                              (the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?)) 

                              |(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))))) 

                     $""" 

    IE_NAME = u'comedycentral' 

 

    _available_formats = ['3500', '2200', '1700', '1200', '750', '400'] 

 

    _video_extensions = { 

        '3500': 'mp4', 

        '2200': 'mp4', 

        '1700': 'mp4', 

        '1200': 'mp4', 

        '750': 'mp4', 

        '400': 'mp4', 

    } 

    _video_dimensions = { 

        '3500': '1280x720', 

        '2200': '960x540', 

        '1700': '768x432', 

        '1200': '640x360', 

        '750': '512x288', 

        '400': '384x216', 

    } 

 

    def suitable(self, url): 

        """Receives a URL and returns True if suitable for this IE.""" 

        return re.match(self._VALID_URL, url, re.VERBOSE) is not None 

 

    def report_extraction(self, episode_id): 

        self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id) 

 

    def report_config_download(self, episode_id): 

        self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id) 

 

    def report_index_download(self, episode_id): 

        self._downloader.to_screen(u'[comedycentral] %s: Downloading show index' % episode_id) 

 

    def report_player_url(self, episode_id): 

        self._downloader.to_screen(u'[comedycentral] %s: Determining player URL' % episode_id) 

 

 

    def _print_formats(self, formats): 

        print('Available formats:') 

        for x in formats: 

            print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???'))) 

 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url, re.VERBOSE) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

 

        if mobj.group('shortname'): 

            if mobj.group('shortname') in ('tds', 'thedailyshow'): 

                url = u'http://www.thedailyshow.com/full-episodes/' 

            else: 

                url = u'http://www.colbertnation.com/full-episodes/' 

            mobj = re.match(self._VALID_URL, url, re.VERBOSE) 

            assert mobj is not None 

 

        if mobj.group('clip'): 

            if mobj.group('showname') == 'thedailyshow': 

                epTitle = mobj.group('tdstitle') 

            else: 

                epTitle = mobj.group('cntitle') 

            dlNewest = False 

        else: 

            dlNewest = not mobj.group('episode') 

            if dlNewest: 

                epTitle = mobj.group('showname') 

            else: 

                epTitle = mobj.group('episode') 

 

        req = compat_urllib_request.Request(url) 

        self.report_extraction(epTitle) 

        try: 

            htmlHandle = compat_urllib_request.urlopen(req) 

            html = htmlHandle.read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

            return 

        if dlNewest: 

            url = htmlHandle.geturl() 

            mobj = re.match(self._VALID_URL, url, re.VERBOSE) 

            if mobj is None: 

                self._downloader.trouble(u'ERROR: Invalid redirected URL: ' + url) 

                return 

            if mobj.group('episode') == '': 

                self._downloader.trouble(u'ERROR: Redirected URL is still not specific: ' + url) 

                return 

            epTitle = mobj.group('episode') 

 

        mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', html) 

 

        if len(mMovieParams) == 0: 

            # The Colbert Report embeds the information in a without 

            # a URL prefix; so extract the alternate reference 

            # and then add the URL prefix manually. 

 

            altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', html) 

            if len(altMovieParams) == 0: 

                self._downloader.trouble(u'ERROR: unable to find Flash URL in webpage ' + url) 

                return 

            else: 

                mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])] 

 

        playerUrl_raw = mMovieParams[0][0] 

        self.report_player_url(epTitle) 

        try: 

            urlHandle = compat_urllib_request.urlopen(playerUrl_raw) 

            playerUrl = urlHandle.geturl() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err)) 

            return 

 

        uri = mMovieParams[0][1] 

        indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri}) 

        self.report_index_download(epTitle) 

        try: 

            indexXml = compat_urllib_request.urlopen(indexUrl).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err)) 

            return 

 

        results = [] 

 

        idoc = xml.etree.ElementTree.fromstring(indexXml) 

        itemEls = idoc.findall('.//item') 

        for itemEl in itemEls: 

            mediaId = itemEl.findall('./guid')[0].text 

            shortMediaId = mediaId.split(':')[-1] 

            showId = mediaId.split(':')[-2].replace('.com', '') 

            officialTitle = itemEl.findall('./title')[0].text 

            officialDate = itemEl.findall('./pubDate')[0].text 

 

            configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' + 

                        compat_urllib_parse.urlencode({'uri': mediaId})) 

            configReq = compat_urllib_request.Request(configUrl) 

            self.report_config_download(epTitle) 

            try: 

                configXml = compat_urllib_request.urlopen(configReq).read() 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err)) 

                return 

 

            cdoc = xml.etree.ElementTree.fromstring(configXml) 

            turls = [] 

            for rendition in cdoc.findall('.//rendition'): 

                finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text) 

                turls.append(finfo) 

 

            if len(turls) == 0: 

                self._downloader.trouble(u'\nERROR: unable to download ' + mediaId + ': No videos found') 

                continue 

 

            if self._downloader.params.get('listformats', None): 

                self._print_formats([i[0] for i in turls]) 

                return 

 

            # For now, just pick the highest bitrate 

            format,video_url = turls[-1] 

 

            # Get the format arg from the arg stream 

            req_format = self._downloader.params.get('format', None) 

 

            # Select format if we can find one 

            for f,v in turls: 

                if f == req_format: 

                    format, video_url = f, v 

                    break 

 

            # Patch to download from alternative CDN, which does not 

            # break on current RTMPDump builds 

            broken_cdn = "rtmpe://viacomccstrmfs.fplive.net/viacomccstrm/gsp.comedystor/" 

            better_cdn = "rtmpe://cp10740.edgefcs.net/ondemand/mtvnorigin/gsp.comedystor/" 

 

            if video_url.startswith(broken_cdn): 

                video_url = video_url.replace(broken_cdn, better_cdn) 

 

            effTitle = showId + u'-' + epTitle 

            info = { 

                'id': shortMediaId, 

                'url': video_url, 

                'uploader': showId, 

                'upload_date': officialDate, 

                'title': effTitle, 

                'ext': 'mp4', 

                'format': format, 

                'thumbnail': None, 

                'description': officialTitle, 

                'player_url': None #playerUrl 

            } 

 

            results.append(info) 

 

        return results 

 

 

class EscapistIE(InfoExtractor): 

    """Information extractor for The Escapist """ 

 

    _VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$' 

    IE_NAME = u'escapist' 

 

    def report_extraction(self, showName): 

        self._downloader.to_screen(u'[escapist] %s: Extracting information' % showName) 

 

    def report_config_download(self, showName): 

        self._downloader.to_screen(u'[escapist] %s: Downloading configuration' % showName) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

        showName = mobj.group('showname') 

        videoId = mobj.group('episode') 

 

        self.report_extraction(showName) 

        try: 

            webPage = compat_urllib_request.urlopen(url) 

            webPageBytes = webPage.read() 

            m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type']) 

            webPage = webPageBytes.decode(m.group(1) if m else 'utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err)) 

            return 

 

        descMatch = re.search('<meta name="description" content="([^"]*)"', webPage) 

        description = unescapeHTML(descMatch.group(1)) 

        imgMatch = re.search('<meta property="og:image" content="([^"]*)"', webPage) 

        imgUrl = unescapeHTML(imgMatch.group(1)) 

        playerUrlMatch = re.search('<meta property="og:video" content="([^"]*)"', webPage) 

        playerUrl = unescapeHTML(playerUrlMatch.group(1)) 

        configUrlMatch = re.search('config=(.*)$', playerUrl) 

        configUrl = compat_urllib_parse.unquote(configUrlMatch.group(1)) 

 

        self.report_config_download(showName) 

        try: 

            configJSON = compat_urllib_request.urlopen(configUrl) 

            m = re.match(r'text/html; charset="?([^"]+)"?', configJSON.headers['Content-Type']) 

            configJSON = configJSON.read().decode(m.group(1) if m else 'utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err)) 

            return 

 

        # Technically, it's JavaScript, not JSON 

        configJSON = configJSON.replace("'", '"') 

 

        try: 

            config = json.loads(configJSON) 

        except (ValueError,) as err: 

            self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err)) 

            return 

 

        playlist = config['playlist'] 

        videoUrl = playlist[1]['url'] 

 

        info = { 

            'id': videoId, 

            'url': videoUrl, 

            'uploader': showName, 

            'upload_date': None, 

            'title': showName, 

            'ext': 'flv', 

            'thumbnail': imgUrl, 

            'description': description, 

            'player_url': playerUrl, 

        } 

 

        return [info] 

 

 

class CollegeHumorIE(InfoExtractor): 

    """Information extractor for collegehumor.com""" 

 

    _WORKING = False 

    _VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$' 

    IE_NAME = u'collegehumor' 

 

    def report_manifest(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Downloading XML manifest' % (self.IE_NAME, video_id)) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

        video_id = mobj.group('videoid') 

 

        info = { 

            'id': video_id, 

            'uploader': None, 

            'upload_date': None, 

        } 

 

        self.report_extraction(video_id) 

        xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id 

        try: 

            metaXml = compat_urllib_request.urlopen(xmlUrl).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) 

            return 

 

        mdoc = xml.etree.ElementTree.fromstring(metaXml) 

        try: 

            videoNode = mdoc.findall('./video')[0] 

            info['description'] = videoNode.findall('./description')[0].text 

            info['title'] = videoNode.findall('./caption')[0].text 

            info['thumbnail'] = videoNode.findall('./thumbnail')[0].text 

            manifest_url = videoNode.findall('./file')[0].text 

        except IndexError: 

            self._downloader.trouble(u'\nERROR: Invalid metadata XML file') 

            return 

 

        manifest_url += '?hdcore=2.10.3' 

        self.report_manifest(video_id) 

        try: 

            manifestXml = compat_urllib_request.urlopen(manifest_url).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) 

            return 

 

        adoc = xml.etree.ElementTree.fromstring(manifestXml) 

        try: 

            media_node = adoc.findall('./{http://ns.adobe.com/f4m/1.0}media')[0] 

            node_id = media_node.attrib['url'] 

            video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text 

        except IndexError as err: 

            self._downloader.trouble(u'\nERROR: Invalid manifest file') 

            return 

 

        url_pr = compat_urllib_parse_urlparse(manifest_url) 

        url = url_pr.scheme + '://' + url_pr.netloc + '/z' + video_id[:-2] + '/' + node_id + 'Seg1-Frag1' 

 

        info['url'] = url 

        info['ext'] = 'f4f' 

        return [info] 

 

 

class XVideosIE(InfoExtractor): 

    """Information extractor for xvideos.com""" 

 

    _VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)' 

    IE_NAME = u'xvideos' 

 

    def report_webpage(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

        video_id = mobj.group(1) 

 

        self.report_webpage(video_id) 

 

        request = compat_urllib_request.Request(r'http://www.xvideos.com/video' + video_id) 

        try: 

            webpage_bytes = compat_urllib_request.urlopen(request).read() 

            webpage = webpage_bytes.decode('utf-8', 'replace') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) 

            return 

 

        self.report_extraction(video_id) 

 

 

        # Extract video URL 

        mobj = re.search(r'flv_url=(.+?)&', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video url') 

            return 

        video_url = compat_urllib_parse.unquote(mobj.group(1)) 

 

 

        # Extract title 

        mobj = re.search(r'<title>(.*?)\s+-\s+XVID', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video title') 

            return 

        video_title = mobj.group(1) 

 

 

        # Extract video thumbnail 

        mobj = re.search(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video thumbnail') 

            return 

        video_thumbnail = mobj.group(0) 

 

        info = { 

            'id': video_id, 

            'url': video_url, 

            'uploader': None, 

            'upload_date': None, 

            'title': video_title, 

            'ext': 'flv', 

            'thumbnail': video_thumbnail, 

            'description': None, 

        } 

 

        return [info] 

 

 

class SoundcloudIE(InfoExtractor): 

    """Information extractor for soundcloud.com 

       To access the media, the uid of the song and a stream token 

       must be extracted from the page source and the script must make 

       a request to media.soundcloud.com/crossdomain.xml. Then 

       the media can be grabbed by requesting from an url composed 

       of the stream token and uid 

     """ 

 

    _VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)' 

    IE_NAME = u'soundcloud' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_resolve(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Resolving id' % (self.IE_NAME, video_id)) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Retrieving stream' % (self.IE_NAME, video_id)) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

 

        # extract uploader (which is in the url) 

        uploader = mobj.group(1) 

        # extract simple title (uploader + slug of song title) 

        slug_title =  mobj.group(2) 

        simple_title = uploader + u'-' + slug_title 

 

        self.report_resolve('%s/%s' % (uploader, slug_title)) 

 

        url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title) 

        resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28' 

        request = compat_urllib_request.Request(resolv_url) 

        try: 

            info_json_bytes = compat_urllib_request.urlopen(request).read() 

            info_json = info_json_bytes.decode('utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) 

            return 

 

        info = json.loads(info_json) 

        video_id = info['id'] 

        self.report_extraction('%s/%s' % (uploader, slug_title)) 

 

        streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28' 

        request = compat_urllib_request.Request(streams_url) 

        try: 

            stream_json_bytes = compat_urllib_request.urlopen(request).read() 

            stream_json = stream_json_bytes.decode('utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) 

            return 

 

        streams = json.loads(stream_json) 

        mediaURL = streams['http_mp3_128_url'] 

 

        return [{ 

            'id':       info['id'], 

            'url':      mediaURL, 

            'uploader': info['user']['username'], 

            'upload_date':  info['created_at'], 

            'title':    info['title'], 

            'ext':      u'mp3', 

            'description': info['description'], 

        }] 

 

 

class InfoQIE(InfoExtractor): 

    """Information extractor for infoq.com""" 

 

    _VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$' 

    IE_NAME = u'infoq' 

 

    def report_webpage(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

 

        self.report_webpage(url) 

 

        request = compat_urllib_request.Request(url) 

        try: 

            webpage = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) 

            return 

 

        self.report_extraction(url) 

 

 

        # Extract video URL 

        mobj = re.search(r"jsclassref='([^']*)'", webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video url') 

            return 

        video_url = 'rtmpe://video.infoq.com/cfx/st/' + compat_urllib_parse.unquote(mobj.group(1).decode('base64')) 

 

 

        # Extract title 

        mobj = re.search(r'contentTitle = "(.*?)";', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video title') 

            return 

        video_title = mobj.group(1).decode('utf-8') 

 

        # Extract description 

        video_description = u'No description available.' 

        mobj = re.search(r'<meta name="description" content="(.*)"(?:\s*/)?>', webpage) 

        if mobj is not None: 

            video_description = mobj.group(1).decode('utf-8') 

 

        video_filename = video_url.split('/')[-1] 

        video_id, extension = video_filename.split('.') 

 

        info = { 

            'id': video_id, 

            'url': video_url, 

            'uploader': None, 

            'upload_date': None, 

            'title': video_title, 

            'ext': extension, # Extension is always(?) mp4, but seems to be flv 

            'thumbnail': None, 

            'description': video_description, 

        } 

 

        return [info] 

 

class MixcloudIE(InfoExtractor): 

    """Information extractor for www.mixcloud.com""" 

 

    _WORKING = False # New API, but it seems good http://www.mixcloud.com/developers/documentation/ 

    _VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)' 

    IE_NAME = u'mixcloud' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_json(self, file_id): 

        """Report JSON download.""" 

        self._downloader.to_screen(u'[%s] Downloading json' % self.IE_NAME) 

 

    def report_extraction(self, file_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) 

 

    def get_urls(self, jsonData, fmt, bitrate='best'): 

        """Get urls from 'audio_formats' section in json""" 

        file_url = None 

        try: 

            bitrate_list = jsonData[fmt] 

            if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list: 

                bitrate = max(bitrate_list) # select highest 

 

            url_list = jsonData[fmt][bitrate] 

        except TypeError: # we have no bitrate info. 

            url_list = jsonData[fmt] 

        return url_list 

 

    def check_urls(self, url_list): 

        """Returns 1st active url from list""" 

        for url in url_list: 

            try: 

                compat_urllib_request.urlopen(url) 

                return url 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                url = None 

 

        return None 

 

    def _print_formats(self, formats): 

        print('Available formats:') 

        for fmt in formats.keys(): 

            for b in formats[fmt]: 

                try: 

                    ext = formats[fmt][b][0] 

                    print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1])) 

                except TypeError: # we have no bitrate info 

                    ext = formats[fmt][0] 

                    print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1])) 

                    break 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

        # extract uploader & filename from url 

        uploader = mobj.group(1).decode('utf-8') 

        file_id = uploader + "-" + mobj.group(2).decode('utf-8') 

 

        # construct API request 

        file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json' 

        # retrieve .json file with links to files 

        request = compat_urllib_request.Request(file_url) 

        try: 

            self.report_download_json(file_url) 

            jsonData = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err)) 

            return 

 

        # parse JSON 

        json_data = json.loads(jsonData) 

        player_url = json_data['player_swf_url'] 

        formats = dict(json_data['audio_formats']) 

 

        req_format = self._downloader.params.get('format', None) 

        bitrate = None 

 

        if self._downloader.params.get('listformats', None): 

            self._print_formats(formats) 

            return 

 

        if req_format is None or req_format == 'best': 

            for format_param in formats.keys(): 

                url_list = self.get_urls(formats, format_param) 

                # check urls 

                file_url = self.check_urls(url_list) 

                if file_url is not None: 

                    break # got it! 

        else: 

            if req_format not in formats.keys(): 

                self._downloader.trouble(u'ERROR: format is not available') 

                return 

 

            url_list = self.get_urls(formats, req_format) 

            file_url = self.check_urls(url_list) 

            format_param = req_format 

 

        return [{ 

            'id': file_id.decode('utf-8'), 

            'url': file_url.decode('utf-8'), 

            'uploader': uploader.decode('utf-8'), 

            'upload_date': None, 

            'title': json_data['name'], 

            'ext': file_url.split('.')[-1].decode('utf-8'), 

            'format': (format_param is None and u'NA' or format_param.decode('utf-8')), 

            'thumbnail': json_data['thumbnail_url'], 

            'description': json_data['description'], 

            'player_url': player_url.decode('utf-8'), 

        }] 

 

class StanfordOpenClassroomIE(InfoExtractor): 

    """Information extractor for Stanford's Open ClassRoom""" 

 

    _VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$' 

    IE_NAME = u'stanfordoc' 

 

    def report_download_webpage(self, objid): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, objid)) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

 

        if mobj.group('course') and mobj.group('video'): # A specific video 

            course = mobj.group('course') 

            video = mobj.group('video') 

            info = { 

                'id': course + '_' + video, 

                'uploader': None, 

                'upload_date': None, 

            } 

 

            self.report_extraction(info['id']) 

            baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/' 

            xmlUrl = baseUrl + video + '.xml' 

            try: 

                metaXml = compat_urllib_request.urlopen(xmlUrl).read() 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) 

                return 

            mdoc = xml.etree.ElementTree.fromstring(metaXml) 

            try: 

                info['title'] = mdoc.findall('./title')[0].text 

                info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text 

            except IndexError: 

                self._downloader.trouble(u'\nERROR: Invalid metadata XML file') 

                return 

            info['ext'] = info['url'].rpartition('.')[2] 

            return [info] 

        elif mobj.group('course'): # A course page 

            course = mobj.group('course') 

            info = { 

                'id': course, 

                'type': 'playlist', 

                'uploader': None, 

                'upload_date': None, 

            } 

 

            self.report_download_webpage(info['id']) 

            try: 

                coursepage = compat_urllib_request.urlopen(url).read() 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) 

                return 

 

            m = re.search('<h1>([^<]+)</h1>', coursepage) 

            if m: 

                info['title'] = unescapeHTML(m.group(1)) 

            else: 

                info['title'] = info['id'] 

 

            m = re.search('<description>([^<]+)</description>', coursepage) 

            if m: 

                info['description'] = unescapeHTML(m.group(1)) 

 

            links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage)) 

            info['list'] = [ 

                { 

                    'type': 'reference', 

                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage), 

                } 

                    for vpage in links] 

            results = [] 

            for entry in info['list']: 

                assert entry['type'] == 'reference' 

                results += self.extract(entry['url']) 

            return results 

 

        else: # Root page 

            info = { 

                'id': 'Stanford OpenClassroom', 

                'type': 'playlist', 

                'uploader': None, 

                'upload_date': None, 

            } 

 

            self.report_download_webpage(info['id']) 

            rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php' 

            try: 

                rootpage = compat_urllib_request.urlopen(rootURL).read() 

            except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

                self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err)) 

                return 

 

            info['title'] = info['id'] 

 

            links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage)) 

            info['list'] = [ 

                { 

                    'type': 'reference', 

                    'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage), 

                } 

                    for cpage in links] 

 

            results = [] 

            for entry in info['list']: 

                assert entry['type'] == 'reference' 

                results += self.extract(entry['url']) 

            return results 

 

class MTVIE(InfoExtractor): 

    """Information extractor for MTV.com""" 

 

    _VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$' 

    IE_NAME = u'mtv' 

 

    def report_webpage(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) 

 

    def report_extraction(self, video_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

        if not mobj.group('proto'): 

            url = 'http://' + url 

        video_id = mobj.group('videoid') 

        self.report_webpage(video_id) 

 

        request = compat_urllib_request.Request(url) 

        try: 

            webpage = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) 

            return 

 

        mobj = re.search(r'<meta name="mtv_vt" content="([^"]+)"/>', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract song name') 

            return 

        song_name = unescapeHTML(mobj.group(1).decode('iso-8859-1')) 

        mobj = re.search(r'<meta name="mtv_an" content="([^"]+)"/>', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract performer') 

            return 

        performer = unescapeHTML(mobj.group(1).decode('iso-8859-1')) 

        video_title = performer + ' - ' + song_name 

 

        mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to mtvn_uri') 

            return 

        mtvn_uri = mobj.group(1) 

 

        mobj = re.search(r'MTVN.Player.defaultPlaylistId = ([0-9]+);', webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract content id') 

            return 

        content_id = mobj.group(1) 

 

        videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri 

        self.report_extraction(video_id) 

        request = compat_urllib_request.Request(videogen_url) 

        try: 

            metadataXml = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err)) 

            return 

 

        mdoc = xml.etree.ElementTree.fromstring(metadataXml) 

        renditions = mdoc.findall('.//rendition') 

 

        # For now, always pick the highest quality. 

        rendition = renditions[-1] 

 

        try: 

            _,_,ext = rendition.attrib['type'].partition('/') 

            format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate'] 

            video_url = rendition.find('./src').text 

        except KeyError: 

            self._downloader.trouble('Invalid rendition field.') 

            return 

 

        info = { 

            'id': video_id, 

            'url': video_url, 

            'uploader': performer, 

            'upload_date': None, 

            'title': video_title, 

            'ext': ext, 

            'format': format, 

        } 

 

        return [info] 

 

 

class YoukuIE(InfoExtractor): 

 

    _VALID_URL =  r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html' 

    IE_NAME = u'Youku' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_download_webpage(self, file_id): 

        """Report webpage download.""" 

        self._downloader.to_screen(u'[Youku] %s: Downloading webpage' % file_id) 

 

    def report_extraction(self, file_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[Youku] %s: Extracting information' % file_id) 

 

    def _gen_sid(self): 

        nowTime = int(time.time() * 1000) 

        random1 = random.randint(1000,1998) 

        random2 = random.randint(1000,9999) 

 

        return "%d%d%d" %(nowTime,random1,random2) 

 

    def _get_file_ID_mix_string(self, seed): 

        mixed = [] 

        source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890") 

        seed = float(seed) 

        for i in range(len(source)): 

            seed  =  (seed * 211 + 30031 ) % 65536 

            index  =  math.floor(seed / 65536 * len(source) ) 

            mixed.append(source[int(index)]) 

            source.remove(source[int(index)]) 

        #return ''.join(mixed) 

        return mixed 

 

    def _get_file_id(self, fileId, seed): 

        mixed = self._get_file_ID_mix_string(seed) 

        ids = fileId.split('*') 

        realId = [] 

        for ch in ids: 

            if ch: 

                realId.append(mixed[int(ch)]) 

        return ''.join(realId) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

        video_id = mobj.group('ID') 

 

        info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id 

 

        request = compat_urllib_request.Request(info_url, None, std_headers) 

        try: 

            self.report_download_webpage(video_id) 

            jsondata = compat_urllib_request.urlopen(request).read() 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

            return 

 

        self.report_extraction(video_id) 

        try: 

            jsonstr = jsondata.decode('utf-8') 

            config = json.loads(jsonstr) 

 

            video_title =  config['data'][0]['title'] 

            seed = config['data'][0]['seed'] 

 

            format = self._downloader.params.get('format', None) 

            supported_format = config['data'][0]['streamfileids'].keys() 

 

            if format is None or format == 'best': 

                if 'hd2' in supported_format: 

                    format = 'hd2' 

                else: 

                    format = 'flv' 

                ext = u'flv' 

            elif format == 'worst': 

                format = 'mp4' 

                ext = u'mp4' 

            else: 

                format = 'flv' 

                ext = u'flv' 

 

 

            fileid = config['data'][0]['streamfileids'][format] 

            keys = [s['k'] for s in config['data'][0]['segs'][format]] 

        except (UnicodeDecodeError, ValueError, KeyError): 

            self._downloader.trouble(u'ERROR: unable to extract info section') 

            return 

 

        files_info=[] 

        sid = self._gen_sid() 

        fileid = self._get_file_id(fileid, seed) 

 

        #column 8,9 of fileid represent the segment number 

        #fileid[7:9] should be changed 

        for index, key in enumerate(keys): 

 

            temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:]) 

            download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key) 

 

            info = { 

                'id': '%s_part%02d' % (video_id, index), 

                'url': download_url, 

                'uploader': None, 

                'upload_date': None, 

                'title': video_title, 

                'ext': ext, 

            } 

            files_info.append(info) 

 

        return files_info 

 

 

class XNXXIE(InfoExtractor): 

    """Information extractor for xnxx.com""" 

 

    _VALID_URL = r'^http://video\.xnxx\.com/video([0-9]+)/(.*)' 

    IE_NAME = u'xnxx' 

    VIDEO_URL_RE = r'flv_url=(.*?)&amp;' 

    VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM' 

    VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&amp;' 

 

    def report_webpage(self, video_id): 

        """Report information extraction""" 

        self._downloader.to_screen(u'[%s] %s: Downloading webpage' % (self.IE_NAME, video_id)) 

 

    def report_extraction(self, video_id): 

        """Report information extraction""" 

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

        video_id = mobj.group(1) 

 

        self.report_webpage(video_id) 

 

        # Get webpage content 

        try: 

            webpage_bytes = compat_urllib_request.urlopen(url).read() 

            webpage = webpage_bytes.decode('utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err) 

            return 

 

        result = re.search(self.VIDEO_URL_RE, webpage) 

        if result is None: 

            self._downloader.trouble(u'ERROR: unable to extract video url') 

            return 

        video_url = compat_urllib_parse.unquote(result.group(1)) 

 

        result = re.search(self.VIDEO_TITLE_RE, webpage) 

        if result is None: 

            self._downloader.trouble(u'ERROR: unable to extract video title') 

            return 

        video_title = result.group(1) 

 

        result = re.search(self.VIDEO_THUMB_RE, webpage) 

        if result is None: 

            self._downloader.trouble(u'ERROR: unable to extract video thumbnail') 

            return 

        video_thumbnail = result.group(1) 

 

        return [{ 

            'id': video_id, 

            'url': video_url, 

            'uploader': None, 

            'upload_date': None, 

            'title': video_title, 

            'ext': 'flv', 

            'thumbnail': video_thumbnail, 

            'description': None, 

        }] 

 

 

class GooglePlusIE(InfoExtractor): 

    """Information extractor for plus.google.com.""" 

 

    _VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)' 

    IE_NAME = u'plus.google' 

 

    def __init__(self, downloader=None): 

        InfoExtractor.__init__(self, downloader) 

 

    def report_extract_entry(self, url): 

        """Report downloading extry""" 

        self._downloader.to_screen(u'[plus.google] Downloading entry: %s' % url) 

 

    def report_date(self, upload_date): 

        """Report downloading extry""" 

        self._downloader.to_screen(u'[plus.google] Entry date: %s' % upload_date) 

 

    def report_uploader(self, uploader): 

        """Report downloading extry""" 

        self._downloader.to_screen(u'[plus.google] Uploader: %s' % uploader) 

 

    def report_title(self, video_title): 

        """Report downloading extry""" 

        self._downloader.to_screen(u'[plus.google] Title: %s' % video_title) 

 

    def report_extract_vid_page(self, video_page): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[plus.google] Extracting video page: %s' % video_page) 

 

    def _real_extract(self, url): 

        # Extract id from URL 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) 

            return 

 

        post_url = mobj.group(0) 

        video_id = mobj.group(1) 

 

        video_extension = 'flv' 

 

        # Step 1, Retrieve post webpage to extract further information 

        self.report_extract_entry(post_url) 

        request = compat_urllib_request.Request(post_url) 

        try: 

            webpage = compat_urllib_request.urlopen(request).read().decode('utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err)) 

            return 

 

        # Extract update date 

        upload_date = None 

        pattern = 'title="Timestamp">(.*?)</a>' 

        mobj = re.search(pattern, webpage) 

        if mobj: 

            upload_date = mobj.group(1) 

            # Convert timestring to a format suitable for filename 

            upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d") 

            upload_date = upload_date.strftime('%Y%m%d') 

        self.report_date(upload_date) 

 

        # Extract uploader 

        uploader = None 

        pattern = r'rel\="author".*?>(.*?)</a>' 

        mobj = re.search(pattern, webpage) 

        if mobj: 

            uploader = mobj.group(1) 

        self.report_uploader(uploader) 

 

        # Extract title 

        # Get the first line for title 

        video_title = u'NA' 

        pattern = r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]' 

        mobj = re.search(pattern, webpage) 

        if mobj: 

            video_title = mobj.group(1) 

        self.report_title(video_title) 

 

        # Step 2, Stimulate clicking the image box to launch video 

        pattern = '"(https\://plus\.google\.com/photos/.*?)",,"image/jpeg","video"\]' 

        mobj = re.search(pattern, webpage) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: unable to extract video page URL') 

 

        video_page = mobj.group(1) 

        request = compat_urllib_request.Request(video_page) 

        try: 

            webpage = compat_urllib_request.urlopen(request).read().decode('utf-8') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) 

            return 

        self.report_extract_vid_page(video_page) 

 

 

        # Extract video links on video page 

        """Extract video links of all sizes""" 

        pattern = '\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"' 

        mobj = re.findall(pattern, webpage) 

        if len(mobj) == 0: 

            self._downloader.trouble(u'ERROR: unable to extract video links') 

 

        # Sort in resolution 

        links = sorted(mobj) 

 

        # Choose the lowest of the sort, i.e. highest resolution 

        video_url = links[-1] 

        # Only get the url. The resolution part in the tuple has no use anymore 

        video_url = video_url[-1] 

        # Treat escaped \u0026 style hex 

        try: 

            video_url = video_url.decode("unicode_escape") 

        except AttributeError: # Python 3 

            video_url = bytes(video_url, 'ascii').decode('unicode-escape') 

 

 

        return [{ 

            'id':       video_id, 

            'url':      video_url, 

            'uploader': uploader, 

            'upload_date':  upload_date, 

            'title':    video_title, 

            'ext':      video_extension, 

        }] 

 

class NBAIE(InfoExtractor): 

    _VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*)(\?.*)?$' 

    IE_NAME = u'nba' 

 

    def report_extraction(self, video_id): 

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, video_id)) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

 

        video_id = mobj.group(1) 

        if video_id.endswith('/index.html'): 

            video_id = video_id[:-len('/index.html')] 

 

        self.report_extraction(video_id) 

        try: 

            urlh = compat_urllib_request.urlopen(url) 

            webpage_bytes = urlh.read() 

            webpage = webpage_bytes.decode('utf-8', 'ignore') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err)) 

            return 

 

        video_url = u'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4' 

        def _findProp(rexp, default=None): 

            m = re.search(rexp, webpage) 

            if m: 

                return unescapeHTML(m.group(1)) 

            else: 

                return default 

 

        shortened_video_id = video_id.rpartition('/')[2] 

        title = _findProp(r'<meta property="og:title" content="(.*?)"', shortened_video_id).replace('NBA.com: ', '') 

        info = { 

            'id': shortened_video_id, 

            'url': video_url, 

            'ext': 'mp4', 

            'title': title, 

            'uploader_date': _findProp(r'<b>Date:</b> (.*?)</div>'), 

            'description': _findProp(r'<div class="description">(.*?)</h1>'), 

        } 

        return [info] 

 

class JustinTVIE(InfoExtractor): 

    """Information extractor for justin.tv and twitch.tv""" 

    # TODO: One broadcast may be split into multiple videos. The key 

    # 'broadcast_id' is the same for all parts, and 'broadcast_part' 

    # starts at 1 and increases. Can we treat all parts as one video? 

 

    _VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/ 

        ([^/]+)(?:/b/([^/]+))?/?(?:\#.*)?$""" 

    _JUSTIN_PAGE_LIMIT = 100 

    IE_NAME = u'justin.tv' 

 

    def report_extraction(self, file_id): 

        """Report information extraction.""" 

        self._downloader.to_screen(u'[%s] %s: Extracting information' % (self.IE_NAME, file_id)) 

 

    def report_download_page(self, channel, offset): 

        """Report attempt to download a single page of videos.""" 

        self._downloader.to_screen(u'[%s] %s: Downloading video information from %d to %d' % 

                (self.IE_NAME, channel, offset, offset + self._JUSTIN_PAGE_LIMIT)) 

 

    # Return count of items, list of *valid* items 

    def _parse_page(self, url): 

        try: 

            urlh = compat_urllib_request.urlopen(url) 

            webpage_bytes = urlh.read() 

            webpage = webpage_bytes.decode('utf-8', 'ignore') 

        except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: 

            self._downloader.trouble(u'ERROR: unable to download video info JSON: %s' % compat_str(err)) 

            return 

 

        response = json.loads(webpage) 

        info = [] 

        for clip in response: 

            video_url = clip['video_file_url'] 

            if video_url: 

                video_extension = os.path.splitext(video_url)[1][1:] 

                video_date = re.sub('-', '', clip['created_on'][:10]) 

                info.append({ 

                    'id': clip['id'], 

                    'url': video_url, 

                    'title': clip['title'], 

                    'uploader': clip.get('user_id', clip.get('channel_id')), 

                    'upload_date': video_date, 

                    'ext': video_extension, 

                }) 

        return (len(response), info) 

 

    def _real_extract(self, url): 

        mobj = re.match(self._VALID_URL, url) 

        if mobj is None: 

            self._downloader.trouble(u'ERROR: invalid URL: %s' % url) 

            return 

 

        api = 'http://api.justin.tv' 

        video_id = mobj.group(mobj.lastindex) 

        paged = False 

        if mobj.lastindex == 1: 

            paged = True 

            api += '/channel/archives/%s.json' 

        else: 

            api += '/clip/show/%s.json' 

        api = api % (video_id,) 

 

        self.report_extraction(video_id) 

 

        info = [] 

        offset = 0 

        limit = self._JUSTIN_PAGE_LIMIT 

        while True: 

            if paged: 

                self.report_download_page(video_id, offset) 

            page_url = api + ('?offset=%d&limit=%d' % (offset, limit)) 

            page_count, page_info = self._parse_page(page_url) 

            info.extend(page_info) 

            if not paged or page_count != limit: 

                break 

            offset += limit 

        return info