You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

320 lines
11 KiB

  1. --- a/kernel/pf_ring.c
  2. +++ b/kernel/pf_ring.c
  3. @@ -6806,11 +6806,21 @@ int sk_detach_filter(struct sock *sk)
  4. #endif
  5. /* ************************************* */
  6. +#if(LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0))
  7. +#define copy_from_sockptr copy_from_user
  8. +#define copy_to_sockptr copy_to_user
  9. +#else
  10. +#define copy_to_sockptr(dst,src,size) copy_to_sockptr_offset(dst, 0, src, size)
  11. +#endif
  12. /* Code taken/inspired from core/sock.c */
  13. static int ring_setsockopt(struct socket *sock,
  14. int level, int optname,
  15. +#if(LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0))
  16. char __user * optval,
  17. +#else
  18. + sockptr_t optval,
  19. +#endif
  20. unsigned
  21. int optlen)
  22. {
  23. @@ -6842,7 +6852,7 @@ static int ring_setsockopt(struct socket
  24. ret = -EFAULT;
  25. - if(copy_from_user(&fprog, optval, sizeof(fprog)))
  26. + if(copy_from_sockptr(&fprog, optval, sizeof(fprog)))
  27. break;
  28. if(fprog.len <= 1) { /* empty filter */
  29. @@ -6888,7 +6898,7 @@ static int ring_setsockopt(struct socket
  30. if(optlen != sizeof(cluster))
  31. return(-EINVAL);
  32. - if(copy_from_user(&cluster, optval, sizeof(cluster)))
  33. + if(copy_from_sockptr(&cluster, optval, sizeof(cluster)))
  34. return(-EFAULT);
  35. write_lock_bh(&pfr->ring_rules_lock);
  36. @@ -6911,7 +6921,7 @@ static int ring_setsockopt(struct socket
  37. if(optlen != sizeof(channel_id_mask))
  38. return(-EINVAL);
  39. - if(copy_from_user(&channel_id_mask, optval, sizeof(channel_id_mask)))
  40. + if(copy_from_sockptr(&channel_id_mask, optval, sizeof(channel_id_mask)))
  41. return(-EFAULT);
  42. num_channels = 0;
  43. @@ -6967,7 +6977,7 @@ static int ring_setsockopt(struct socket
  44. if(optlen > sizeof(name) /* Names should not be too long */ )
  45. return(-EINVAL);
  46. - if(copy_from_user(&name, optval, optlen))
  47. + if(copy_from_sockptr(&name, optval, optlen))
  48. return(-EFAULT);
  49. if(pfr->appl_name != NULL)
  50. @@ -6985,7 +6995,7 @@ static int ring_setsockopt(struct socket
  51. if(optlen != sizeof(direction))
  52. return(-EINVAL);
  53. - if(copy_from_user(&direction, optval, sizeof(direction)))
  54. + if(copy_from_sockptr(&direction, optval, sizeof(direction)))
  55. return(-EFAULT);
  56. pfr->direction = direction;
  57. @@ -6999,7 +7009,7 @@ static int ring_setsockopt(struct socket
  58. if(optlen != sizeof(sockmode))
  59. return(-EINVAL);
  60. - if(copy_from_user(&sockmode, optval, sizeof(sockmode)))
  61. + if(copy_from_sockptr(&sockmode, optval, sizeof(sockmode)))
  62. return(-EFAULT);
  63. pfr->mode = sockmode;
  64. @@ -7013,7 +7023,7 @@ static int ring_setsockopt(struct socket
  65. if(optlen != sizeof(rule_inactivity))
  66. return(-EINVAL);
  67. - if(copy_from_user(&rule_inactivity, optval, sizeof(rule_inactivity)))
  68. + if(copy_from_sockptr(&rule_inactivity, optval, sizeof(rule_inactivity)))
  69. return(-EFAULT);
  70. else {
  71. write_lock_bh(&pfr->ring_rules_lock);
  72. @@ -7027,7 +7037,7 @@ static int ring_setsockopt(struct socket
  73. if(optlen != sizeof(rule_inactivity))
  74. return(-EINVAL);
  75. - if(copy_from_user(&rule_inactivity, optval, sizeof(rule_inactivity)))
  76. + if(copy_from_sockptr(&rule_inactivity, optval, sizeof(rule_inactivity)))
  77. return(-EFAULT);
  78. else {
  79. write_lock_bh(&pfr->ring_rules_lock);
  80. @@ -7043,7 +7053,7 @@ static int ring_setsockopt(struct socket
  81. else {
  82. u_int8_t new_policy;
  83. - if(copy_from_user(&new_policy, optval, optlen))
  84. + if(copy_from_sockptr(&new_policy, optval, optlen))
  85. return(-EFAULT);
  86. write_lock_bh(&pfr->ring_rules_lock);
  87. @@ -7075,7 +7085,7 @@ static int ring_setsockopt(struct socket
  88. if(rule == NULL)
  89. return(-EFAULT);
  90. - if(copy_from_user(&rule->rule, optval, optlen))
  91. + if(copy_from_sockptr(&rule->rule, optval, optlen))
  92. return(-EFAULT);
  93. INIT_LIST_HEAD(&rule->list);
  94. @@ -7099,7 +7109,7 @@ static int ring_setsockopt(struct socket
  95. if(rule == NULL)
  96. return(-EFAULT);
  97. - if(copy_from_user(&rule->rule, optval, optlen))
  98. + if(copy_from_sockptr(&rule->rule, optval, optlen))
  99. return(-EFAULT);
  100. write_lock_bh(&pfr->ring_rules_lock);
  101. @@ -7123,7 +7133,7 @@ static int ring_setsockopt(struct socket
  102. /* This is a list rule */
  103. int rc;
  104. - if(copy_from_user(&rule_id, optval, optlen))
  105. + if(copy_from_sockptr(&rule_id, optval, optlen))
  106. return(-EFAULT);
  107. write_lock_bh(&pfr->ring_rules_lock);
  108. @@ -7139,7 +7149,7 @@ static int ring_setsockopt(struct socket
  109. sw_filtering_hash_bucket rule;
  110. int rc;
  111. - if(copy_from_user(&rule.rule, optval, optlen))
  112. + if(copy_from_sockptr(&rule.rule, optval, optlen))
  113. return(-EFAULT);
  114. write_lock_bh(&pfr->ring_rules_lock);
  115. @@ -7156,7 +7166,7 @@ static int ring_setsockopt(struct socket
  116. if(optlen != sizeof(pfr->sample_rate))
  117. return(-EINVAL);
  118. - if(copy_from_user(&pfr->sample_rate, optval, sizeof(pfr->sample_rate)))
  119. + if(copy_from_sockptr(&pfr->sample_rate, optval, sizeof(pfr->sample_rate)))
  120. return(-EFAULT);
  121. break;
  122. @@ -7164,7 +7174,7 @@ static int ring_setsockopt(struct socket
  123. if(optlen != sizeof(pfr->filtering_sample_rate))
  124. return(-EINVAL);
  125. - if(copy_from_user(&pfr->filtering_sample_rate, optval, sizeof(pfr->filtering_sample_rate)))
  126. + if(copy_from_sockptr(&pfr->filtering_sample_rate, optval, sizeof(pfr->filtering_sample_rate)))
  127. return(-EFAULT);
  128. pfr->filtering_sampling_size = pfr->filtering_sample_rate;
  129. @@ -7231,7 +7241,7 @@ static int ring_setsockopt(struct socket
  130. else
  131. threshold = min_num_slots;
  132. - if(copy_from_user(&pfr->poll_num_pkts_watermark, optval, optlen))
  133. + if(copy_from_sockptr(&pfr->poll_num_pkts_watermark, optval, optlen))
  134. return(-EFAULT);
  135. if(pfr->poll_num_pkts_watermark > threshold)
  136. @@ -7248,7 +7258,7 @@ static int ring_setsockopt(struct socket
  137. if(optlen != sizeof(u_int16_t))
  138. return(-EINVAL);
  139. else {
  140. - if(copy_from_user(&pfr->poll_watermark_timeout, optval, optlen))
  141. + if(copy_from_sockptr(&pfr->poll_watermark_timeout, optval, optlen))
  142. return(-EFAULT);
  143. debug_printk(2, "--> SO_SET_POLL_WATERMARK_TIMEOUT=%u\n", pfr->poll_watermark_timeout);
  144. }
  145. @@ -7258,7 +7268,7 @@ static int ring_setsockopt(struct socket
  146. if(optlen != sizeof(u_int32_t))
  147. return(-EINVAL);
  148. - if(copy_from_user(&pfr->bucket_len, optval, optlen))
  149. + if(copy_from_sockptr(&pfr->bucket_len, optval, optlen))
  150. return(-EFAULT);
  151. debug_printk(2, "--> SO_RING_BUCKET_LEN=%d\n", pfr->bucket_len);
  152. @@ -7268,7 +7278,7 @@ static int ring_setsockopt(struct socket
  153. if(optlen != sizeof(zc_dev_mapping))
  154. return(-EINVAL);
  155. - if(copy_from_user(&mapping, optval, optlen))
  156. + if(copy_from_sockptr(&mapping, optval, optlen))
  157. return(-EFAULT);
  158. debug_printk(2, "SO_SELECT_ZC_DEVICE %s\n", mapping.device_name);
  159. @@ -7278,7 +7288,7 @@ static int ring_setsockopt(struct socket
  160. else
  161. ret = pfring_release_zc_dev(pfr);
  162. - if(copy_to_user(optval, &mapping, optlen)) /* returning device_model*/
  163. + if(copy_to_sockptr(optval, &mapping, optlen)) /* returning device_model*/
  164. return(-EFAULT);
  165. break;
  166. @@ -7291,7 +7301,7 @@ static int ring_setsockopt(struct socket
  167. if(optlen != sizeof(ring_id))
  168. return(-EINVAL);
  169. - if(copy_from_user(&ring_id, optval, sizeof(ring_id)))
  170. + if(copy_from_sockptr(&ring_id, optval, sizeof(ring_id)))
  171. return(-EFAULT);
  172. write_lock_bh(&pfr->ring_rules_lock);
  173. @@ -7303,7 +7313,7 @@ static int ring_setsockopt(struct socket
  174. if(optlen != sizeof(hw_filtering_rule))
  175. return(-EINVAL);
  176. - if(copy_from_user(&hw_rule, optval, sizeof(hw_rule)))
  177. + if(copy_from_sockptr(&hw_rule, optval, sizeof(hw_rule)))
  178. return(-EFAULT);
  179. /* Check if a rule with the same id exists */
  180. @@ -7343,7 +7353,7 @@ static int ring_setsockopt(struct socket
  181. if(optlen != sizeof(u_int16_t))
  182. return(-EINVAL);
  183. - if(copy_from_user(&rule_id, optval, sizeof(u_int16_t)))
  184. + if(copy_from_sockptr(&rule_id, optval, sizeof(u_int16_t)))
  185. return(-EFAULT);
  186. /* Check if the rule we want to remove exists */
  187. @@ -7381,7 +7391,7 @@ static int ring_setsockopt(struct socket
  188. if(optlen != sizeof(elem))
  189. return(-EINVAL);
  190. - if(copy_from_user(&elem, optval, sizeof(elem)))
  191. + if(copy_from_sockptr(&elem, optval, sizeof(elem)))
  192. return(-EFAULT);
  193. if((pfr->v_filtering_dev = add_virtual_filtering_device(pfr, &elem)) == NULL)
  194. @@ -7402,14 +7412,14 @@ static int ring_setsockopt(struct socket
  195. if(optlen < sizeof(ccri))
  196. return(-EINVAL);
  197. - if(copy_from_user(&ccri, optval, sizeof(ccri)))
  198. + if(copy_from_sockptr(&ccri, optval, sizeof(ccri)))
  199. return(-EFAULT);
  200. if(create_cluster_referee(pfr, ccri.cluster_id, &ccri.recovered) < 0)
  201. return(-EINVAL);
  202. /* copying back the structure (actually we need ccri.recovered only) */
  203. - if(copy_to_user(optval, &ccri, sizeof(ccri))) {
  204. + if(copy_to_sockptr(optval, &ccri, sizeof(ccri))) {
  205. remove_cluster_referee(pfr);
  206. return(-EFAULT);
  207. }
  208. @@ -7422,7 +7432,7 @@ static int ring_setsockopt(struct socket
  209. {
  210. struct public_cluster_object_info pcoi;
  211. - if(copy_from_user(&pcoi, optval, sizeof(pcoi)))
  212. + if(copy_from_sockptr(&pcoi, optval, sizeof(pcoi)))
  213. return(-EFAULT);
  214. if(publish_cluster_object(pfr, pcoi.cluster_id, pcoi.object_type, pcoi.object_id) < 0)
  215. @@ -7436,7 +7446,7 @@ static int ring_setsockopt(struct socket
  216. {
  217. struct lock_cluster_object_info lcoi;
  218. - if(copy_from_user(&lcoi, optval, sizeof(lcoi)))
  219. + if(copy_from_sockptr(&lcoi, optval, sizeof(lcoi)))
  220. return(-EFAULT);
  221. if(lock_cluster_object(pfr, lcoi.cluster_id, lcoi.object_type, lcoi.object_id, lcoi.lock_mask) < 0)
  222. @@ -7450,7 +7460,7 @@ static int ring_setsockopt(struct socket
  223. {
  224. struct lock_cluster_object_info lcoi;
  225. - if(copy_from_user(&lcoi, optval, sizeof(lcoi)))
  226. + if(copy_from_sockptr(&lcoi, optval, sizeof(lcoi)))
  227. return(-EFAULT);
  228. if(unlock_cluster_object(pfr, lcoi.cluster_id, lcoi.object_type, lcoi.object_id, lcoi.lock_mask) < 0)
  229. @@ -7465,7 +7475,7 @@ static int ring_setsockopt(struct socket
  230. if(optlen > (sizeof(pfr->custom_bound_device_name)-1))
  231. optlen = sizeof(pfr->custom_bound_device_name)-1;
  232. - if(copy_from_user(&pfr->custom_bound_device_name, optval, optlen)) {
  233. + if(copy_from_sockptr(&pfr->custom_bound_device_name, optval, optlen)) {
  234. pfr->custom_bound_device_name[0] = '\0';
  235. return(-EFAULT);
  236. } else
  237. @@ -7490,7 +7500,7 @@ static int ring_setsockopt(struct socket
  238. if(optlen > (sizeof(pfr->statsString)-1))
  239. optlen = sizeof(pfr->statsString)-1;
  240. - if(copy_from_user(&pfr->statsString, optval, optlen)) {
  241. + if(copy_from_sockptr(&pfr->statsString, optval, optlen)) {
  242. pfr->statsString[0] = '\0';
  243. return(-EFAULT);
  244. }
  245. @@ -7511,7 +7521,7 @@ static int ring_setsockopt(struct socket
  246. if(optlen != sizeof(u_int32_t))
  247. return (-EINVAL);
  248. - if(copy_from_user(&enable_promisc, optval, optlen))
  249. + if(copy_from_sockptr(&enable_promisc, optval, optlen))
  250. return (-EFAULT);
  251. if(!pfr->ring_dev || pfr->ring_dev == &none_device_element || pfr->ring_dev == &any_device_element) {
  252. @@ -7537,7 +7547,7 @@ static int ring_setsockopt(struct socket
  253. if(optlen != sizeof(vlan_id))
  254. return(-EINVAL);
  255. - if(copy_from_user(&vlan_id, optval, sizeof(vlan_id)))
  256. + if(copy_from_sockptr(&vlan_id, optval, sizeof(vlan_id)))
  257. return(-EFAULT);
  258. pfr->vlan_id = vlan_id;